diff --git a/.gitattributes b/.gitattributes index 1bb034ec22e214ced31a9efb9b937918bdff0809..7455a1f18c55630a1a17b20c8fe8792d3201562e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1648,3 +1648,11 @@ data/2025/2503_14xxx/2503.14088/240a9a75-8db1-4b3f-b6a1-a38880c80455_origin.pdf data/2025/2503_14xxx/2503.14118/5585c5a4-326b-4f41-99b2-afa63d72d72a_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_16xxx/2503.16528/ee4ac0dd-8eff-4341-983b-5242b8162c88_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_17xxx/2503.17395/5d1453fc-cf43-46d6-9479-151164433999_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_content_list.json b/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..29481b2d6fe99533c6398064cd16755c86311912 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_content_list.json @@ -0,0 +1,3638 @@ +[ + { + "type": "text", + "text": "Mitigating Spectral Bias in Neural Operators via High-Frequency Scaling for Physical Systems", + "text_level": 1, + "bbox": [ + 117, + 80, + 878, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Siavash Khodakaramia, Vivek Oommenb, Aniruddha Boraa, George Em Karniadakisa,c,*", + "bbox": [ + 144, + 149, + 850, + 167 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{a}$ Division of Applied Mathematics, Brown University, Providence, RI, 02912, USA", + "bbox": [ + 218, + 178, + 774, + 192 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{b}$ School of Engineering, Brown University, Providence, RI, 02912, USA", + "bbox": [ + 257, + 192, + 739, + 206 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{c}$ Pacific Northwest National Laboratory, Richland, WA, 99354, USA", + "bbox": [ + 268, + 206, + 727, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 115, + 274, + 196, + 288 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Neural operators have emerged as powerful surrogates for modeling complex physical problems. However, they suffer from spectral bias making them oblivious to high-frequency modes, which are present in multiscale physical systems. Therefore, they tend to produce over-smoothed solutions, which is particularly problematic in modeling turbulence and for systems with intricate patterns and sharp gradients such as multi-phase flow systems. In this work, we introduce a new approach named high-frequency scaling (HFS) to mitigate spectral bias in convolutional-based neural operators. By integrating HFS with proper variants of UNet neural operators, we demonstrate a higher prediction accuracy by mitigating spectral bias in single and two-phase flow problems. Unlike Fourier-based techniques, HFS is directly applied to the latent space, thus eliminating the computational cost associated with the Fourier transform. Additionally, we investigate alternative spectral bias mitigation through diffusion models conditioned on neural operators. While the diffusion model integrated with the standard neural operator may still suffer from significant errors, these errors are substantially reduced when the diffusion model is integrated with a HFS-enhanced neural operator.", + "bbox": [ + 117, + 298, + 887, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Neural operator, Spectral Bias, Two-phase flow, Boiling, Kolmogorov flow, Diffusion model", + "bbox": [ + 115, + 545, + 823, + 579 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 115, + 621, + 257, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Design and control problems in engineering often require repeated simulation of the underlying physical system, necessitating the solution of governing partial differential equations (PDEs) multiple times. For a wide range of applications from fluid dynamics to material science, classical discretization-based direct numerical simulation (DNS) [1, 2, 3, 4] has been the cornerstone of scientific computing. While the methods for DNS have matured significantly over the past several decades, their computational cost becomes prohibitive when performing repeated simulations over varying parametric conditions or configurations. This challenge has fueled a growing interest in developing computationally efficient surrogate models capable of approximating these simulations at only a fraction of the cost.", + "bbox": [ + 117, + 649, + 884, + 803 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In particular, the classical DNS can estimate the solution for a given set of conditions. If one of these conditions is modified, the solver has to be re-run, further aggravating the computational cost. To mitigate this issue, neural operators were developed to handle a plurality of", + "bbox": [ + 115, + 821, + 884, + 873 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13695v1 [cs.LG] 17 Mar 2025", + "bbox": [ + 21, + 304, + 60, + 725 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding Author: george_karniadakis@brown.edu", + "bbox": [ + 139, + 898, + 527, + 913 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "March 19, 2025", + "bbox": [ + 768, + 916, + 880, + 931 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "conditions and parametric settings [5, 6, 7, 8, 9, 10, 11]. Neural operators, which are based on the universal operator approximation theorem [12], are trained to learn the mapping between infinite-dimensional functional spaces. Although it is expensive to train such surrogates offline, a trained neural operator can efficiently estimate solutions of unseen conditions almost instantaneously during inference. Many studies have used neural operators as surrogates to learn physical problems in space and time. Various physical problems such as vortex-induced vibration [13], crack nucleation and propagation [14], Riemann problems [15], turbulence [16, 17], plasma modeling [18], and many more have been solved, at least under limited conditions, by neural operators. Furthermore, other studies [19, 20, 21] attempted to learn the temporal evolution of two-phase microstructures in diffusion-driven processes such as spinodal decomposition and dendritic growth. However, very few studies have investigated the application of neural operators for buoyancy-dominated or advection-dominated two-phase flow problems, such as those encountered in boiling and condensation [22].", + "bbox": [ + 117, + 85, + 884, + 310 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1.1. Neural operators and applications in two-phase flow modeling", + "text_level": 1, + "bbox": [ + 117, + 340, + 665, + 357 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modeling and predicting two-phase flow during boiling is one of the most challenging problems in computational fluid dynamics. These phenomena involve complex interface dynamics and phase transitions, resulting in high-frequency spatio-temporal variations that are both challenging and computationally expensive to capture. Analyzing the solutions of such a system reveals a slowly decaying energy spectrum, where even the high wavenumbers carry a nontrivial amount of energy that cannot be neglected. Effective modeling of a two-phase flow system requires the neural operators to accurately predict spatio-temporal evolution of both low and high wavenumber modes. Unfortunately, neural networks and neural operators suffer from spectral bias [23, 24, 25], which makes them oblivious to high wavenumber modes. Consequently, the neural operators can only offer an over-smoothed prediction that fails to capture the intricate features near the interfaces where the sharp gradients are commonly observed.", + "bbox": [ + 117, + 361, + 884, + 549 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Previous studies in boiling modeling with neural operators also confirm the spectral bias problem. [26] used DeepONet [5] to solve for the transient solution of a single bubble growth. Their findings demonstrate that DeepONet can effectively capture the mean component of the solution in the microscale regime, but it fails to accurately predict the stochastic fluctuations described by high-frequency components of the solution. A study by Jain et al. [27] on the prediction of multiphase flow through porous media with UNet [28] also showed that larger errors occurred near the interfaces. The Fourier neural operator (FNO) [6] also suffers from spectral bias [29]. The common practice of truncating high-frequency modes in FNOs leads to the loss of rich information, hindering the accurate modeling of chaotic systems in multi-phase heat transfer and turbulence. However, without truncation, training FNOs becomes unstable [9].", + "bbox": [ + 117, + 565, + 884, + 755 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A recent study by Hassan et al.[30] collected a valuable boiling dataset based on Flash-X simulations [31] and developed neural operators based on different structures such as UNet, FNO, and group equivariant FNO (GFNO) for prediction in boiling problems. As shown in the results of our work, the previously best neural operator still struggles to capture high-frequency modes, which are prominently observed within the bubbles, along the interfaces, and in condensation traces in subcooled pool boiling. These over-smoothened solutions highlight the need for further advancements to mitigate spectral bias in modeling phase-change and multi-phase flow phenomena. Similarly, spectral bias of neural operators cannot be overlooked when mod-", + "bbox": [ + 117, + 772, + 884, + 910 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 916, + 505, + 929 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "eling other chaotic systems like turbulence [32], where small-scale, low-energy features play a crucial role.", + "bbox": [ + 117, + 87, + 878, + 118 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "1.2. Spectral bias mitigation strategies", + "text_level": 1, + "bbox": [ + 119, + 135, + 435, + 152 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Previous studies have proposed various methods to mitigate spectral bias and over-smoothing in deep neural networks (DNNs). Cai et al. [33] proposed a multi-scale DNN (MscaleDNN) to enhance approximations over a wide range of frequencies for the solution of PDEs. Tancik et al. [34] proposed Fourier feature mapping for coordinate-based multilayer perceptron (MLP) to tackle spectral bias in image regression tasks in low dimensional domains. Wang et al. [35] used Fourier feature mapping along with Physics-informed Neural Networks (PINNs) [36, 37] to enhance the multi-scale PDE solutions by mitigating the spectral bias compared to vanilla PINN. A better optimization of activation functions have been also shown to slightly reduce spectral bias of DNNs and PINNs [38, 39]. Phase shift DNN is another method converting high-frequency component of the data into low frequency spectrum, which can be learned and represented by a DNN. Subsequently, the learned representation is converted into the original high-frequency. However, phase shift DNN suffers from the curse of dimensionality [40].", + "bbox": [ + 117, + 155, + 885, + 360 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Efforts have also been made to mitigate the spectral bias encountered by neural operators trained to learn spatiotemporal systems. Lippe at al. [41] developed PDE-Refiner, which iteratively adds noise to perturb different scales of the system and trains the neural operator to correct these corrupted states. Zhang et al. [42] developed Hybrid Iterative Numerical Transferable Solver (HINTS) to exploit the spectral bias in solving large linear systems by blending neural operators and relaxation methods. Generative Artificial Intelligence (GenAI)-based algorithms are also emerging as effective methods to overcome the spectral bias barrier. Wu et al. [43] accurately reconstructed the small-scale structures accompanying turbulent boundary layers in wall turbulence using Super Resolution Generative Adversarial Networks (SRGANs). Wang et al. [44] developed a framework based on GANs to reconstruct high spatiotemporal resolution supersonic flow states from sparse measurements. Molinaro et al. [45] developed GenCFD using score-based diffusion models to learn three-dimensional turbulence in compressible and incompressible flows. Lockwood et al. [46] used denoising diffusion probabilistic models to refine the estimates of tropical cyclone wind intensities. Oommen et al. [47] addressed the spectral limitations of neural operators in modeling a series of turbulent systems by training a conditional score-based diffusion model conditioned on the neural operator as prior.", + "bbox": [ + 117, + 378, + 878, + 652 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, we first propose the use of UNet with residual blocks (ResUNet) to achieve more accurate two-phase flow predictions compared to the state-of-the-art neural operators. Subsequently, we present a new method named high-frequency scaling (HFS) to mitigate spectral bias in two-phase flow predictions. Our approach demonstrates higher accuracy and better alignment of energy spectra, with negligible additional memory requirements and only a small computational overhead on the neural operator. We applied HFS to different variants of ResUNet. Finally, we explore the dependency of diffusion models on the prior accuracies when integrated with neural operators. Specifically, we show that the integration of the diffusion model with neural operators equipped with HFS results in further mitigation of spectral bias without compromising prediction accuracy. We demonstrate the effectiveness of our methodology for both two-phase and single-phase flows.", + "bbox": [ + 117, + 670, + 878, + 857 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The manuscript is organized as follows. We start with providing an in-depth description about neural operators, HFS, and diffusion models in Section 2. We present the results of our", + "bbox": [ + 117, + 876, + 878, + 909 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 492, + 917, + 504, + 928 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "investigations in Section 3, followed by discussion and summary in Sections 4 and 5, respectively. In the Appendix, we include more technical details and additional results.", + "bbox": [ + 114, + 86, + 880, + 118 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2. Methods", + "text_level": 1, + "bbox": [ + 115, + 141, + 223, + 156 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1. Neural Operators", + "text_level": 1, + "bbox": [ + 115, + 168, + 305, + 185 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The mathematical operator $\\mathcal{N}$ that governs the temporal evolution of a time-dependent system can be expressed as,", + "bbox": [ + 114, + 190, + 878, + 223 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {u} (\\boldsymbol {x}, t + \\Delta t) \\approx \\mathcal {N} (\\boldsymbol {u} (\\boldsymbol {x}, t)) (\\Delta t), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 225, + 880, + 242 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{u}$ is the representative state variable(s) of interest. The objective here is to train a neural operator $\\mathcal{F}_{\\theta}$ to learn the true underlying operator $(\\mathcal{N})$ by, typically, minimizing the mean of an error norm such as $\\| \\pmb {u}(\\pmb {x},t + \\Delta t) - \\mathcal{F}_{\\theta}(u(\\pmb {x},t))(\\Delta t)\\| _2$ .", + "bbox": [ + 114, + 249, + 880, + 300 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this work, we focus on resolving solutions in pool boiling problems and single-phase turbulent flows. We start our analysis with pool boiling problems. Then, we investigate the application of our method on single-phase turbulent flows. There have been several efforts to use neural operators to learn temperature and flow dynamics in two-phase flow problems. Here, we demonstrate the advantage of using the ResUNet structure compared to previously developed neural operators such as UNet and FNO for two-phase flow problems with high-frequency features [48]. The models are trained to predict future temperatures based on temperature history and velocity information. The problem configuration is shown in Equation 2, where $x$ is the spatial mesh, $T$ is the temperature, $V$ is the velocity, $k$ specifies the prediction time interval length, and $\\mathcal{F}_{\\theta}$ is the trained neural operator.", + "bbox": [ + 114, + 318, + 884, + 489 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nT (\\boldsymbol {x}, t: t + k \\Delta t) = \\mathcal {F} _ {\\theta} (T (\\boldsymbol {x}, t - k \\Delta t: t), V (\\boldsymbol {x}, t - k \\Delta t: t + k \\Delta t)) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 253, + 507, + 880, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "UNet with residual blocks (ResUNet) was first introduced for a semantic segmentation task by imposing skip connections between convolutional layers in a UNet-like structure [49]. We use the same idea to add skip connections in the form of residual blocks to both the encoder and decoder side of the UNet. The residual blocks have been shown to mitigate vanishing gradient problems by offering a smoother optimization landscape [50]. We also demonstrate that they help with the better flow of information in the network for complex datasets such as two-phase flows, which results in a better capture of localized features, possibly reducing the spectral bias towards low-frequency components. We also introduced several modifications, such as the GELU activation function and group normalization, that demonstrated superior prediction accuracy. We used the mean squared error (MSE) loss function in all prediction time steps (Equation 3) as the objective criterion to train the model, i.e.,", + "bbox": [ + 114, + 531, + 882, + 721 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL (\\theta) = \\frac {1}{N _ {u} k} \\sum_ {i = 1} ^ {N _ {u}} \\sum_ {j = 1} ^ {k} \\| T ^ {i} (\\boldsymbol {x}, t + j \\Delta t) - \\mathcal {F} _ {\\theta} (T ^ {i} (\\boldsymbol {x}, t)) (j \\Delta t) \\| _ {2} ^ {2} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 278, + 734, + 880, + 778 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We used the Lion optimizer [51] to perform the optimization as we observed superior performance with this optimizer compared to the conventional Adam optimizer [52]. More details about the ResUNet structure, the training hyperparameters, and comparison with UNet predictions are included in Appendix A.", + "bbox": [ + 114, + 799, + 880, + 866 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We evaluated our baseline neural operator on both saturated and subcooled pool boiling datasets from the BubbleML data repository, which is generated through Flash-X simulations", + "bbox": [ + 114, + 866, + 880, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 492, + 917, + 505, + 929 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "[53] and were collected in a previous study [48]. It should be noted that predictions in sub-cooled boiling is more difficult due to the vortices generated by condensation trails. Therefore, the errors are higher in subcooled boiling predictions, and the results look more over-smoothed compared to saturated boiling prediction results. A visualization of the subcooled boiling prediction results is shown in Appendix A. A comprehensive comparison of our baseline model with the previous best baseline model developed by [48] is included in Table 1 and Table 2 for saturated and subcooled pool boiling dataset, respectively. The ResUNet improves the resolution of high-frequency features, resulting in higher prediction accuracy. We note that given the possible differences in the testing dataset, the one-to-one comparison with the previously reported numbers may not be fair. Therefore, we trained and tested the previously reported best model (e.g., UNet) with our dataset configuration, which consists of a larger test dataset and smaller training dataset compared to the previous work.", + "bbox": [ + 112, + 85, + 882, + 292 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluated our model using six different field metrics relevant to two-phase problems. These metrics include relative error (Rel. Error), root mean square error (RMSE), boundary RMSE (BRMSE) showing the error on the boundaries, bubble RMSE showing the error in the bubble areas and at the interfaces, mean maximum error $(\\mathrm{Max}_{\\mathrm{mean}})$ showing the mean of the maximum error for each prediction, and overall maximum error $(\\mathrm{Max}_{\\mathrm{max}})$ showing the maximum error over the test dataset. We also evaluated the predictions in three different frequency bands using spectral errors at low frequency $(F_{\\mathrm{low}})$ , medium frequency $(F_{\\mathrm{mid}})$ , and high frequency $(F_{\\mathrm{high}})$ . Exact definitions of BRMSE and bubble RMSE, as well as spectral errors are described in Appendix B. All the metrics are computed on the normalized dataset $(T^{i}(\\boldsymbol{x},t + j\\Delta t)\\in [-1,1]\\forall \\{i,j\\})$ . For all the results, the state of the temperature at five future time-steps are predicted based on five time-step previous temperature history and the velocity information in two spatial dimensions.", + "bbox": [ + 112, + 309, + 884, + 514 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/f37a91c2b7d01648a6b5163c046b04d6f2f03461a786f529a25ee22873ad0693.jpg", + "table_caption": [ + "Table 1: Saturated pool boiling temperature prediction errors. The training dataset consists of simulations from 11 different wall temperatures. The test dataset consists of simulations with two other wall temperatures $(70^{\\circ}\\mathrm{C},$ and $95^{\\circ}\\mathrm{C})$ not seen during training." + ], + "table_footnote": [], + "table_body": "
UNetResUNet
Rel. Error0.01910.0149
RMSE0.01890.0148
BRMSE0.05820.0364
Bubble RMSE0.1160.0726
Maxmean0.7050.553
Maxmax1.2041.154
Flow0.1050.0745
Fmid0.1130.0919
Fhigh0.02380.0182
Parameters [Millions]7.83.5
", + "bbox": [ + 305, + 583, + 690, + 782 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 916, + 505, + 929 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/26fae4e7f5d449cdb38456dce2702cfb869bd6bbd14b388e48a3f8928a80c39d.jpg", + "table_caption": [ + "Table 2: Subcooled pool boiling temperature prediction errors. The training dataset consists of simulations from eight different wall temperatures. The test dataset consists of simulations with two other wall temperatures (95°C, and 98°C) not seen during training." + ], + "table_footnote": [], + "table_body": "
UNetResUNet
Rel. Error0.05160.0295
RMSE0.05010.0288
BRMSE0.1390.0646
Bubble RMSE0.2690.127
Maxmean1.1410.837
Maxmax2.2791.433
Flow0.3460.157
Fmid0.3670.197
Fhigh0.05830.0370
Parameters [Millions]7.83.5
", + "bbox": [ + 305, + 136, + 692, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The results in Tables 1 and 2 demonstrate that all the metrics are improved by simply introducing residual blocks in the network, a better optimizer, and a better normalization. For example, there is approximately $21\\%$ and $42\\%$ reduction of RMSE in saturated and subcooled boiling, respectively. Interestingly, the ResUNet achieves better accuracies with less than half of the number of parameters in UNet. Most of the prediction errors occur within the bubble areas and at the condensation trails. This is due to the larger gradients in the bubble areas and around condensation trails resulting into more complex patterns that are more challenging to capture with the neural operator. This is expected as the neural operators are known to have spectral bias to low-frequency modes. The high-frequency content typically exists in regions with significant gradients such as interfaces and condensation trails. In subcooled pool boiling, departing bubbles may condense after departure, creating vortices that gradually dissipate over time. These vortices form complex structures containing higher energy at high frequencies. As a result, subcooled boiling presents greater prediction challenges compared to saturated boiling. For instance, prediction spectral errors $(F_{\\mathrm{low}}, F_{\\mathrm{mid}}, F_{\\mathrm{high}})$ are approximately two times higher in subcooled boiling, highlighting the increased complexity with the high-frequency content.", + "bbox": [ + 114, + 354, + 882, + 612 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "While the residual blocks improve the neural operator's ability to reduce field errors (e.g., RMSE) and over-smoothing of certain high-frequency contents, the results still suffer from significant over-smoothing (see Appendix A). Previous studies have also shown the oversmoothing issue of convolutional based neural operators for image generation tasks and scientific computing [54, 55]. Other studies demonstrated the frequency selectiveness of convolutional neural network (CNN) architectures resulting in different learning rates for low and high-frequency components [56, 57]. Wang et al. [58] demonstrated the spectral bias in vision transformers (ViT) through Fourier analysis. They showed that the problem arises by self-attention layers that act as low-pass filters and continuously reduce high-frequency information with the network depth. A feature scaling technique was proposed to decompose the attention output signal into direct and high-frequency components and scale them separately to adjust the proportion of different frequencies of the signal. We draw inspiration from this technique and propose a similar approach to separately scale low frequency and high-frequency components of the features in the latent space of the neural operator to mitigate spectral bias.", + "bbox": [ + 114, + 627, + 880, + 869 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 917, + 505, + 928 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.2. High-frequency scaling", + "text_level": 1, + "bbox": [ + 115, + 86, + 349, + 103 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As discussed in Section 2.1, neural operators suffer from spectral bias. While residual blocks offer improvements up to some extent, they cannot effectively mitigate the spectral bias inherent in the neural operators. Hence, we propose the high-frequency scaling (HFS) approach to be applied to the output of convolutional layers. The latent feature map of each convolutional layer is first divided into non-overlapping patches, similar to the first step in vision transformers. This will break down the spatial dimensions into smaller regions, which empirically will allow for better localized processing. We consider the mean of the patches as the direct component (DC) of these signals. Then, the high-frequency component (HFC) for each patch can be defined as the difference of each patch with the DC. It should be noted that here the DC is calculated across the patches and not individually for each patch. Then, we introduce two parameter groups of $\\lambda_{\\mathrm{DC}}$ and $\\lambda_{\\mathrm{HFC}}$ to separately scale the DC and HFC for each patch. We then re-assemble the patches to the original latent feature size before the next operation.", + "bbox": [ + 112, + 107, + 882, + 311 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A more rigorous description of the method is as follows: Let $X \\in \\mathbb{R}^{H \\times W \\times C}$ be the output feature map of a convolutional layer, where $H$ , $W$ , and $C$ are the height, width, and number of channels, respectively. We divide $X$ into $N$ non-overlapping patches of size $p \\times p$ denoted as $X^{(i)} \\in \\mathbb{R}^{p \\times p \\times C}$ , where $i \\in [0, N]$ . The DC is defined as the mean patch across all $N$ patches as shown in Equation (4). The HFC calculation for each patch and the scaling step are shown in Equations (5-6):", + "bbox": [ + 114, + 312, + 884, + 416 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nD C (X) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} X ^ {(i)}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 429, + 880, + 472 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nH F C \\left(X ^ {(i)}\\right) = X ^ {(i)} - D C (X), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 485, + 880, + 502 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {X} ^ {(i)} = X ^ {(i)} + \\lambda_ {D C} \\odot D C (X) + \\lambda_ {H F C} \\odot H F C (X ^ {(i)}). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 526, + 880, + 545 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The scaled feature map can be then reconstructed by re-assembling the $\\hat{X}^{(i)}\\mathrm{s}$", + "bbox": [ + 144, + 551, + 766, + 568 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The scaling parameters $\\lambda_{DC} \\in \\mathbb{R}^{1 \\times 1 \\times C}$ and $\\lambda_{HFC} \\in \\mathbb{R}^{1 \\times 1 \\times C}$ are left to be learnable parameters that are optimized using gradient descent simultaneously with the network optimization. Here, we initialized the parameters to be one and optimized them with the same learning rate used for network training. In ResUNet structure, HFS is applied to the output of both convolutional layers and the skip-connection paths with $1 \\times 1$ convolutions or identity skip-connections. In practice, HFS can be seen as a new module incorporated to each layer of the encoder and decoder, as shown in Fig. 1. Fig. 1 also depicts examples of the learned feature maps for models with and without HFS. The most similar feature maps between the models from the first encoder layers and the last decoder layers are depicted. The model with HFS learns features with more pronounced high-frequency content and reduced over-smoothing, which possibly enhances the capture of high-frequency components of the solution and mitigates spectral bias of the neural operator. A summary of the improvements in prediction accuracy achieved through HFS is provided in Appendix C.", + "bbox": [ + 112, + 569, + 882, + 791 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 916, + 505, + 929 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cb76dc3b8182510948260c393fd0b1837e9635110ffd915e5f5771e0712f0739.jpg", + "image_caption": [ + "Figure 1: Structure of the HFS-enhanced NO. (a) Schematic of the HFS module (right) integrated with the residual block (left). (b) Structure of the ResUNet with the HFS modules (blocks in front of conv layers). (c) An example of a learned latent space feature from the first layer of the encoder trained with and without HFS. The most similar feature maps of the models in the first encoder level are shown. (d) An example of a learned latent space feature from the last layer of the decoder trained with and without HFS. The most similar feature maps of the two models at the last decoder level are shown. (e-f) Examples of temperature prediction with NO and HFS-enhanced NO at two different time-steps. A region with high-frequency features (top right corner) is zoomed in for better visualization." + ], + "image_footnote": [], + "bbox": [ + 122, + 89, + 357, + 373 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/686e2d01f5e6a349e72871fbccfb325e6cbbd5c81366e8d859267b2c0dfd9c41.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 89, + 537, + 378 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0b11dca30d91ab7cc7f583b42ca3b48537df8a367e51724eab149a411f6447b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 546, + 87, + 690, + 202 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/86cb337727b5fed499f55faf0f9b8ba65d56df1ad1aefcc124e2d92f7f7ac2c2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 202, + 673, + 376 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4c7260a04619c0b1d7dbd6af86e9d60a152d47c4eda51b99716497509cce91f6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 682, + 87, + 843, + 202 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5920e7c922da97a60a54c9bbdc9099dafa8582f64a270ac461208c70d6889c24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 685, + 202, + 865, + 375 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2.3. Diffusion Model", + "text_level": 1, + "bbox": [ + 115, + 531, + 294, + 548 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As discussed earlier, the NO and the HFS-enhanced NO learn the solution by minimizing some variant of the Euclidean distance, such as MSE, RMSE, relative $L^2$ or relative $L^1$ norms of the errors, between the true and predicted states. Unfortunately, such a loss function effectively prioritizes the error at those wavenumbers that bear higher energy. The systems considered in this study exhibit a decaying energy spectrum, implying that the lower wavenumbers carrying higher energy will be over-represented, while the higher wavenumbers that bear lower energy will be ignored due to its minimal influence on the Euclidean distance-based loss function. The recent efforts aimed at improving the spectral bias of NO using GenAI algorithms, discussed in Section 1, motivated us to explore this route. Specifically, we investigate if diffusion models [59] can help further refine the predictions estimated by NO and HFS-enhanced NO.", + "bbox": [ + 114, + 552, + 882, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Diffusion models (DM) are generative frameworks capable of producing samples that align with the true underlying function distribution, $\\mathcal{T}$ , given a limited set of observations from $\\mathcal{T}$ . These models achieve sample generation by progressively refining a simple prior distribution, such as a standard normal distribution $(\\Gamma_0 = \\mathcal{N}(0,I))$ , into the desired complex distribution $(\\Gamma_N \\approx \\mathcal{T})$ over $N$ iterative steps.", + "bbox": [ + 114, + 740, + 880, + 827 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The diffusion process begins with an initial sample $\\mathbf{X_0}$ drawn from $\\Gamma_0$ and subsequently predicts $\\mathbf{X}_1$ . Since $\\Gamma_0 = \\mathcal{N}(0,I)$ , obtaining $\\mathbf{X}_0$ is straightforward. The model then iteratively refines the sample, estimating $\\mathbf{X}_{i + 1}$ from $\\mathbf{X}_i$ over $N$ steps. However, a key challenge arises on how to train the diffusion model to transition from $\\Gamma_0 = \\mathcal{N}(0,I)$ to $\\Gamma_N\\approx \\mathcal{T}$ when intermedi-", + "bbox": [ + 114, + 843, + 880, + 910 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 916, + 505, + 929 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ate distributions $\\Gamma_{i}$ for $i = \\{1,2,3,\\ldots ,N - 1\\}$ are not explicitly available. This challenge is addressed using denoising score matching combined with Langevin dynamics [60]. The objective of a score-based diffusion model is to estimate the score function, which is defined as $s_{\\theta_D}(\\mathbf{X}) = \\nabla_X\\log p(\\mathbf{X})$ , where $\\theta_{D}$ represents the parameters of the diffusion model and $p$ is the probability density of $\\mathbf{X}$ , where $\\mathbf{X}$ corresponds to continuous realizations of $\\mathbf{X}_i\\sim \\Gamma_i$ . Since the exact data distribution is unknown and may reside on a lower-dimensional manifold, the score function can become ill-defined in regions lacking data. To mitigate this issue, Gaussian noise is added to perturb the data, ensuring a well-defined score function across the entire space by smoothing the distribution. The score function provides a directional gradient toward regions of higher probability. However, a direct mechanism to sample from the learned distribution is still absent.", + "bbox": [ + 112, + 85, + 882, + 274 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This limitation is overcome using Langevin dynamics, as proposed in [61]. Langevin dynamics ensures that the generated samples converge to the true underlying distribution by balancing deterministic motion, driven by the gradient of the log probability, with stochastic exploration introduced by noise. In our approach, we condition the score function on the output of the pre-trained NO, $\\mathcal{F}_{\\theta}$ , leading to the modified score function:", + "bbox": [ + 112, + 292, + 884, + 380 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\ns _ {\\theta_ {D}} (\\mathbf {X}, \\sigma , \\mathcal {F} _ {\\theta}) = \\nabla_ {X} \\log p (\\mathbf {X} | \\mathcal {F} _ {\\theta}), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 373, + 395, + 878, + 413 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\sigma$ represents the noise level. This conditioned score function guides the DM to sample from the posterior distribution of $\\mathbf{X}$ given $\\mathcal{F}_{\\theta}$ , ensuring that the generated samples are consistent with both the structures imposed by $\\mathcal{F}_{\\theta}$ and the true data distribution. The update rule for Langevin dynamics is given by:", + "bbox": [ + 112, + 420, + 882, + 491 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {X} _ {j + 1} = \\mathbf {X} _ {j} + \\frac {\\varepsilon}{2} s _ {\\theta_ {D}} (\\mathbf {X} _ {j}, \\sigma_ {j}, \\mathcal {F} _ {\\theta}) + \\sqrt {\\varepsilon} z _ {j}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 500, + 880, + 530 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\varepsilon$ is the step size, $z_{j}$ is the noise component, and $\\sigma_{j}$ denotes the noise scale at iteration $j$ during the sampling process. The iterative denoising of the noised states by a diffusion model conditioned on the outputs of a pre-trained HFS-enhanced NO is illustrated in Fig 2.", + "bbox": [ + 112, + 533, + 882, + 585 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "During training, the diffusion model learns to denoise the state of the system perturbed by a noise with zero mean and $\\sigma$ standard deviation, where $\\ln \\sigma \\sim \\mathcal{N}(-1.2, 1.2^2)$ . When $\\sigma$ is small, the score function $s_{\\theta_D}$ increasingly focuses on reconstructing high-frequency details and vice versa. In this manner, the diffusion model learns to perturb and reconstruct the signal at multiple scales, unlike the NO whose scale is fixed throughout its training, and thereby learns the structure of the underlying system across all the scales. Our implementation of the DM conditioned on the NO and HFS-enhanced NO is based on [47] that adopts the training, network architecture, pre-conditioning, and sampling routine proposed in \"EDM\" [62].", + "bbox": [ + 112, + 586, + 880, + 722 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 916, + 505, + 929 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/faa78b851598a3a8e280e4c419776b1b8ff245583ac31e14424ca9a458edbe62.jpg", + "image_caption": [ + "Figure 2: Mitigating Spectral Bias with Diffusion Model. The states estimated by the NO exhibit oversmoothing. They serve as the prior that conditions the DM, which in turn reconstructs the missing frequencies iteratively through conditional sampling. The results are based on a NO with 2 million parameters." + ], + "image_footnote": [], + "bbox": [ + 122, + 85, + 868, + 609 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3. Results", + "text_level": 1, + "bbox": [ + 115, + 697, + 211, + 714 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.1. HFS-enhanced NO for two-phase flow problems", + "text_level": 1, + "bbox": [ + 115, + 726, + 549, + 741 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We first conduct several experiments with different variants of ResUNet to demonstrate the advantage of HFS in spectral bias mitigation for two-phase flow operator learning problem. Given the higher complexity of subcooled boiling data compared to saturated boiling data, we will focus on the subcooled boiling experiments. Examples showing the saturated boiling predictions are shown in Appendix D. Given the flexibility of our NO structure, we investigated different variants of ResUNet by varying the NO size by changing the number of parameters in the range of $\\sim 2$ to $\\sim 16$ million parameters. The number of parameters was changed by simply changing the number of latent feature maps at each level of the ResUNet structure. The number of downsamplings/upsamplings was kept at five steps for all the models to achieve spatially consistent resolutions at each level across all the NOs. The subcooled pool boiling dataset", + "bbox": [ + 114, + 746, + 882, + 917 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "consists of 10 different simulation trajectories, two of which were used for testing. Each simulation trajectory consists of 201 time-steps. However, similar to [48], the first 30 unsteady time-steps were not included in the training and testing of the models. Fig. 3 demonstrates the variation of RMSE, BRMSE, bubble RMSE, and $\\mathrm{Max}_{\\mathrm{mean}}$ metrics with NO size for results obtained from NO and HFS-enhanced NO. As expected, both NO and HFS-enhanced NO exhibit error-decreasing trends with the number of parameters. However, the HFS-enhanced NO always yields lower errors compared to NO in all metrics and irrespective of the NO size. The effect of HFS is more pronounced in the bubble RMSE due to larger high-frequency content at the bubble interface and within the bubbles. For example, HFS yields $8\\%$ improvement in RMSE for the 16 million NO. This improvement is $16\\%$ for the bubble RMSE metric. On average, HFS decreases the RMSE and bubble RMSE by $12.4\\%$ and $18.2\\%$ , respectively.", + "bbox": [ + 112, + 86, + 882, + 277 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/404739d4e1a7bce81a61b07954c246561d4c0b5d4e5df4224d919983d7e0719e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 119, + 290, + 500, + 481 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/ce9d9915b51cc6b01cef8d4fd9ad5004076ee8c64d31f8dc18cd381d62f2850c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 505, + 290, + 875, + 481 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/8b521be2ad6db402353a683c8e72a14a74947a986a7ba99f5da56787754d3d44.jpg", + "image_caption": [ + "Neural Operator Size (Millions)" + ], + "image_footnote": [], + "bbox": [ + 127, + 482, + 500, + 670 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/5bd552819042709e8514a471c06051655dec81d3d173212e98e78c9851f40c04.jpg", + "image_caption": [ + "Neural Operator Size (Millions)", + "Figure 3: Temperature prediction errors of NO and HFS-enhanced NO varying with NO size. (a) Root mean square error (RMSE), (b) Boundary RMSE (BRMSE), (c) Bubble RMSE, (d) Mean maximum error. All the errors are calculated over the 5 time-step temperature predictions. The legends in (a) are applicable to (b - d) as well. All the results are based on test dataset in subcooled pool boiling." + ], + "image_footnote": [], + "bbox": [ + 515, + 482, + 875, + 671 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.2. Spectral analysis of HFS-enhanced NO", + "text_level": 1, + "bbox": [ + 115, + 777, + 480, + 795 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "HFS reduces the over-smoothing effect, hence, the intricate features of vortices induced by condensation trails in subcooled boiling are better resolved. Moreover, HFS results in better alignment of the energy spectra to the ground truth signal, especially at high wave numbers attributed to the high frequency features. Fig. 4 depicts the enhancements obtained by adding HFS modules to NO. The prediction results of HFS-enhanced NO are improved compared to NO for all time-steps. However, the enhancement is more pronounced at later time-steps, where the NO predictions are significantly over-smoothed.", + "bbox": [ + 112, + 797, + 882, + 917 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/629dde0d1af87ae717d9f62eb3415d917ebe53718ff14baae00b6da5edc7a8fa.jpg", + "image_caption": [ + "Figure 4: Subcooled pool boiling transient temperature prediction. (a) Ground truth (GT) temperatures for 5 consecutive time steps (from left to right) $(\\Delta t = 8$ ms). (b) NO prediction results. (c) HFS-enhanced NO prediction results. (d) The corresponding energy spectra $(p(k))$ for each time step. For better visualization, the subplots in (d) show the energy spectra only for the high wavenumbers. The legends in first plot are applicable to other plots as well. All the results are based on a $\\sim 3.5$ M parameter NO." + ], + "image_footnote": [], + "bbox": [ + 127, + 89, + 873, + 539 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The average energy for the high-frequency component of the latent features (e.g., excluding the first $12.5\\%$ frequencies at the full resolution) is generally higher for HFS-enhanced NO. This behavior is specifically seen in all the encoder layers and the last three layers of the decoder for a five-layer decoder and five-layer encoder (e.g., five downsampling and five upsampling steps). The first two layers after the bottleneck are at very low spatial resolutions and may not represent any useful spectral information. However, more high-frequency component is generated in the later stages of the decoder that are closer to the output. The NO decoder mean feature maps at each layer show low-contrast regions at both left and right side of the maps, starting from layer two to the end. However, these regions are diminished when HFS is used, showing that a more diverse set of features is generated in the decoder (see Appendix E). However, the same behavior does not necessarily exist for the encoder mean latent features, suggesting that the mean feature map may not be a good representative of the high-frequency component. Instead, analysis of individual feature maps appears to be a more appropriate approach in this case.", + "bbox": [ + 114, + 643, + 884, + 885 + ], + "page_idx": 11 + }, + { + "type": "footer", + "text": "Individual latent space features exhibit improved preservation and propagation of high-12", + "bbox": [ + 144, + 900, + 878, + 929 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "frequency components when HFS is integrated in the NO structure. Fig. 5 depicts examples of latent features from the first layer of the encoder and the last layer of decoder. These layers are specifically chosen due to their proximity to the input and output layers, making the visualizations more understandable. When comparing similar latent feature maps, HFS reduces the excessive smoothing and increases the high-frequency component within the features in the latent space. The energy spectra plots in Fig. 5 demonstrate similar trends for both NO and HFS-enhanced NO with the later having larger spectral energy at the mid and high wave numbers (e.g. $k > 20$ ). For a more robust spectral analysis of latent features, we compared the individual latent features in the NO and HFS-enhanced NO with both $\\sim 3.5$ and $\\sim 16$ million parameter models. The HFS-enhanced NO decreases the over-smoothing in latent features when compared with a similar feature map from NO. The normalized energy spectra of these latent features exhibit larger high-frequency component with HFS-enhanced NO. This is evident in Fig. 5(b, d, f, and h), where the HFS-enhanced NO curves surpass the NO curves after a certain wave number.", + "bbox": [ + 117, + 85, + 880, + 324 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Comparison of the ratio of high-frequency component energy when calculated separately for each latent feature and then averaged over all the features at each layer in the encoder also shows consistently higher values when HFS is used. The same trend is also observed in the last three layers of the decoder. These results are shown in Fig. 5i and 5j. We observed similar trends for other samples where the ratio of high-frequency component energy to total energy in the latent space is higher when HFS is integrated with the NO. However, this advantage may not be noticeable using the mean latent feature visualization at each layer. Note that for the analysis presented in Fig. 5i and 5j, we progressively increased the threshold (from $12.5\\%$ to $50\\%$ ) for separating the low and high-frequency bands as the spatial dimension in the latent space decreases. This result is based on a random sample from the test dataset. Similar results were obtained with other samples. It should be noted that a one-to-one comparison of similar feature maps may provide a more reliable assessment, as not all feature maps carry equally significant information and some might be irrelevant for our analysis.", + "bbox": [ + 117, + 343, + 880, + 565 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In general, the HFS-enhanced NO contains more high-frequency component in the latent space, which can help with the propagation of high-frequency information to the output, helping with the better capture of high-frequency features. The enhancement in high-frequency component is achieved without any degradation in the low-frequency components. Therefore, both field errors such as RMSE, and the spectral errors are improved (see Appendix C).", + "bbox": [ + 117, + 583, + 880, + 669 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 489, + 916, + 509, + 928 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/bd897e21140790da8c2beed7842475c14e96f323cdd2ec392c15e182b1ce4a42.jpg", + "image_caption": [ + "(a)", + "NO" + ], + "image_footnote": [], + "bbox": [ + 147, + 91, + 275, + 192 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/26821bc824c2e55cf6de7c68946bbee89a3839676996ae1a9d5b3a48a7ec2804.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 147, + 192, + 273, + 288 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/05fba3cc6e4ff3f30a68095ffe33390bd08dc560864db76ec9a2afb800f5746a.jpg", + "image_caption": [ + "NO + HFS" + ], + "image_footnote": [], + "bbox": [ + 297, + 99, + 426, + 192 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/f60206ede39643fc5c044f9f4610b8380888747ad4c4e5778e0eb56aee2fce8c.jpg", + "image_caption": [ + "(b)", + "(d)" + ], + "image_footnote": [], + "bbox": [ + 453, + 99, + 598, + 192 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/32ecc7c14abb8460e4d6c4033cf8cd415b55ec2ac589b266e28f285a8d99e10a.jpg", + "image_caption": [ + "(e)", + "(g)" + ], + "image_footnote": [], + "bbox": [ + 146, + 291, + 275, + 388 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/675a8f9336f44185e5a4a8f988d3a39818f20adc98510f75efb9bd2996a69884.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 197, + 426, + 288 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/e7f016f46bca5309b8f0172ae1f4e4919c83884da81073d94df649bce739af4e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 428, + 197, + 598, + 288 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/7ad97d2813f0aaf6bea9c40b37d06895f3bb84041a8c318c716f437749e17e13.jpg", + "image_caption": [ + "(i)" + ], + "image_footnote": [], + "bbox": [ + 638, + 96, + 875, + 282 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3c80f7d8bae04c690074b0bd039bbc2ccb94c86c53fb41a6d536ea842e012cfb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 147, + 387, + 275, + 485 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/c56982feee0b6b0776494c62e84144cadf9d1c1c5ea6d8927a0b1794c6c97899.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 296, + 426, + 388 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/b862fe7585e9b671c2dd19d6695b0f98522b4600cbfca9788e317af5b5c1ff45.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 394, + 426, + 485 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/f761d30cc71f1613eca2ac1939256b2e144c0b60cd211db5ea28ffe6a928d863.jpg", + "image_caption": [ + "(f)" + ], + "image_footnote": [], + "bbox": [ + 428, + 291, + 598, + 384 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/5f217779f4ebeb8edefda5e739dab23bb283949bec47b55a0e34af427e3e5413.jpg", + "image_caption": [ + "(h)", + "Figure 5: Latent space features in HFS-enhanced NO. (a, b) Example of latent space feature in the first layer of encoder and the corresponding normalized energy spectra $(p(k))$ in the $\\sim 3.5$ million parameter models. (c, d) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra for the model with $\\sim 3.5$ million parameters. (e) Example of latent feature in the first layer of encoder and the corresponding normalized energy spectra in the $\\sim 16$ million parameter models. (g) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra in the $\\sim 16$ million parameter models. (i-j) Average ratio of high-frequency energy to total energy at each layer in encoder (i) and decoder (j). Note that the low-frequency cutoff is set to the first $12.5\\%$ , $18.75\\%$ , $25\\%$ , $37.5\\%$ , and $50\\%$ of the wavenumbers, from highest to lowest spatial resolutions (384 to 24 pixels), respectively" + ], + "image_footnote": [], + "bbox": [ + 428, + 387, + 598, + 488 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/8d4ce1826ae5537dfd8426399bd05bca2a0010646e57a820e373e7035e1a5285.jpg", + "image_caption": [ + "(i)" + ], + "image_footnote": [], + "bbox": [ + 638, + 285, + 875, + 482 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Given the advantage of HFS in the mitigation of spectral bias towards low-frequency components, it is natural to calculate the prediction errors at different wavenumbers. Following the terminology proposed in [48], we divided the frequencies to three components including only low-frequency component (low $F$ ), mid-frequency component (mid $F$ ), and high-frequency component (high $F$ ). For all the NOs with varying number of parameters, the errors in the mid $F$ and high $F$ components are always lower for HFS-enhanced NO. The RMSE for the low $F$ component is lower for HFS-enhanced NO with one exception in the NO with $\\sim 3.5$ million parameters. We attribute this to the larger enhancement observed in mid $F$ and high $F$ of the 3.5 million parameter HFS-enhanced NO, causing the operator showing larger error in the low $F$ as it fails to reduce the errors in all three components simultaneously. Visualization of each frequency component and the average spectral errors in each component are shown in Fig. 6.", + "bbox": [ + 114, + 649, + 880, + 835 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 487, + 916, + 510, + 928 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/62f709bfa82bfda8d899c4456f8d18cb4aaae24a3a3354dc86d148ef45498e67.jpg", + "image_caption": [ + "Figure 6: Impact of HFS on spectral errors at different frequency bands (a-b) Examples showing the input, low, mid, and high-frequency contents of the input. (c-e) Spectral error $(F$ . Error) of low, mid, and high-frequency bands over the test dataset. For these results, the low-frequency cutoff is set to the first $2\\%$ of the frequencies. The mid frequency band includes the first $6.2\\%$ of the frequencies excluding the first $2\\%$ . The high-frequency band includes the last $93.8\\%$ of the frequencies." + ], + "image_footnote": [], + "bbox": [ + 131, + 93, + 880, + 524 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "3.3. HFS parameter history", + "text_level": 1, + "bbox": [ + 115, + 624, + 349, + 640 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The DC and HFC of the signals are scaled using two learnable parameters, $\\lambda_{DC} \\in \\mathbb{R}^{1 \\times 1 \\times C}$ and $\\lambda_{HFC} \\in \\mathbb{R}^{1 \\times 1 \\times C}$ . These parameters remain consistent across all patches in each latent space feature map, and also across all batches of the dataset. Therefore, the parameters are optimized based on all the samples in the training dataset. However, they are allowed to vary freely across the feature channels at each layer. This design enables the model to adaptively scale each channel based on its content. For instance, a feature channel with a larger high-frequency component can be scaled differently than a smoother feature channel. This flexibility enhances the effectiveness of HFS while minimizing the computational costs and reducing the optimization burden by maintaining fixed parameters across patches and samples. To better understand the learning process of $\\lambda_{DC}$ and $\\lambda_{HFC}$ , the histories of these parameters during the training phase in each of the encoder and decoder layers are shown in Fig. 7. The results in Fig. 7 show the mean $\\lambda_{DC}$ and $\\lambda_{HFC}$ across all latent features at each layer. The mean $\\lambda_{HFC}$ is always larger than the mean $\\lambda_{DC}$ , demonstrating that the model is learning to scale HFC with larger weights, enhancing the representation of the HFC. Also, the optimized mean $\\lambda_{HFC}$ is higher in the deeper layers of the encoder. However, no such behavior is observed in the decoder. Another interesting observation is that the abrupt change in the slope of the $\\lambda_{DC}$ history curves", + "bbox": [ + 114, + 642, + 882, + 917 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 489, + 916, + 510, + 929 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "( $\\sim$ iteration $160 \\times 10^{3}$ ) aligns well with the iteration when overfitting starts. After this iteration, the error over training dataset keeps decreasing but the error over validation dataset increases, leading to larger generalization gap. The dashed lines in Fig. 7 specify the iteration at which the validation dataset error is minimum.", + "bbox": [ + 112, + 85, + 882, + 156 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "It should be noted that $\\lambda_{DC}$ and $\\lambda_{HFC}$ are both free of any constraints and are automatically learned during the model optimization. However, comparing the final values of these parameters align well with the heuristic viewpoint proposed in our work. The larger values of $\\lambda_{HFC}$ imply that the HFC of the signals are better preserved and propagated through layers with HFS. This could explain why the HFS-enhanced NO results resolve high-frequency features better, and why the spectral bias of the NO is mitigated.", + "bbox": [ + 112, + 171, + 884, + 277 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/836bc2f95d5c77926d61af7c14d4b1113300a5ed667f5ba49106606f9e5c07d1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 294, + 499, + 489 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/dda5aca73cc52ff6e026a688772971bb0db28707f5dbb8771a941a8018016d77.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 505, + 294, + 875, + 489 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/eac42b74e203c3984876f446ef93ec5dd5fca48a451c96c024fa9dd7291c3fc6.jpg", + "image_caption": [ + "Figure 7: $\\lambda_{DC}$ and $\\lambda_{HFC}$ histories during training phase of the HFS-enhanced NO. (a, b) $\\lambda_{DC}$ and $\\lambda_{HFC}$ training history in all 5 layers of encoder. Note that layer 1 and layer 5 are defined as layers at highest and lowest spatial resolution, respectively, in the encoder. (c, d) $\\lambda_{DC}$ and $\\lambda_{HFC}$ training history in all 5 layers of decoder. Note that layer 1 and layer 5 are defined as layers at lowest and highest spatial resolution, respectively, in the decoder, which is the opposite terminology used in encoder. The dashed lines specify the iteration from which overfitting on the training dataset starts. The results are based on the training of a model with $\\sim 1.7$ million parameters and $\\lambda_{DC}$ and $\\lambda_{HFC}$ were initialized at 0.85 and 1.15, respectively." + ], + "image_footnote": [], + "bbox": [ + 124, + 492, + 497, + 697 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/1a0ef49c31cd391865df11f05db9cb02c6029f97bb662e0074c4d4064dacdf5c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 505, + 492, + 877, + 696 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3.4. Kolmogorov flow", + "text_level": 1, + "bbox": [ + 115, + 829, + 302, + 847 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To evaluate the effectiveness of HFS in mitigating spectral bias in a more chaotic system, we applied it on the prediction of a standard benchmark, namely the 2D Kolmogorov flow problem. This problem is governed by the unsteady and incompressible Navier-Stokes equations for a viscous fluid subject to a forcing term. The vorticity form of the problem is defined in", + "bbox": [ + 112, + 848, + 882, + 917 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 487, + 917, + 510, + 929 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Appendix H. We generated the dataset [63] using a publicly available pseudo-spectral solver [6]. The dataset consisted of 1000 samples with $80\\%$ , $10\\%$ , and $10\\%$ of them being used for training, validation, and testing respectively. We trained the NO with and without HFS to learn the mapping $\\omega(x,y,t)\\big|_{t\\in[0,10]} \\to \\omega(x,y,t)\\big|_{t\\in[10,t_{final}]}$ , where $\\omega$ is the vorticity. Here, we used $t_{final} = 12.5$ s, and a NO with $\\sim 1.7$ million parameters as the benchmark. We optimized the hyperparameters based on the NO performance without HFS and then used the same hyperparameters for training the NO with HFS. This ensured that any improvement achieved with HFS was solely attributed to its effect and not simply due to differences in optimization strategies or hyperparameters. Although not specifically designed for turbulent problems, the HFS-enhanced NO demonstrated improvements over the NO for the 2D Kolmogorov problem, reducing the relative error from $5.3\\%$ to $4.7\\%$ . Comparison of the energy spectra of the HFS-enhanced NO predictions also demonstrated better alignment with the ground truth solutions at high wavenumbers. The prediction results for snapshots chosen through random sampling from the test dataset are shown in Fig. 8. High-frequency features are more accurately captured and the energy spectra alignment at high wavenumbers is enhanced with the HFS-enhanced NO. We should acknowledge that HFS was effective for this problem only when the NO already provided reasonably accurate predictions. If the NO produced extremely over-smoothed predictions, integrating HFS offered little to no improvement. More detailed results showing the temporal predictions are shown in Appendix H.", + "bbox": [ + 117, + 85, + 884, + 413 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The improvements in predicting Komogorov flow are less pronounced compared to the two-phase flow problem. This is due to the different underlying structures of the solution maps. The HFS approach operates by decomposing the feature maps into low-frequency and high-frequency components through observing the patches as different signals. This approach is most effective for the data with localized features, making the DC and HFC of the signals significantly different. For example, this is true for the subcooled pool boiling dataset with localized features at the bubble interface and condensation trails. For the data with similar features across all regions, the distinction between DC and HFC diminishes, thus reducing the impact of HFS.", + "bbox": [ + 117, + 429, + 884, + 583 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 489, + 916, + 509, + 928 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/90f68fd7a6962a77f49e5e72d7250151fc08ad8098feb7692cd4ae7c3d8d8efd.jpg", + "image_caption": [ + "Figure 8: HFS-enhanced Kolmogorov flow predictions. (a-c) denote different samples chosen randomly from the test dataset. Each example shows the ground truth (GT), NO and HFS-enhanced NO predictions along with the energy spectra $(p(k))$ for each prediction." + ], + "image_footnote": [], + "bbox": [ + 129, + 87, + 877, + 615 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3.5. Diffusion Model Results", + "text_level": 1, + "bbox": [ + 115, + 690, + 356, + 707 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We investigated further mitigation of spectral bias using the score-based diffusion model (DM) with HFS-enhanced NO predictions as the prior. Specifically, we first conducted a systematic study to investigate the effect of NO prediction accuracy, obtained by varying the number of parameters in the NO, on the diffusion model performance. Second, we demonstrated that using HFS-enhanced NO can further help the diffusion model to match the correct energy spectra of the solutions without degrading the mean prediction errors. Since the NO predictions are used as priors to diffusion model, the accuracy of diffusion model predictions is strongly influenced by the reliability of these priors. For example, if the prior information is significantly erroneous or over-smoothed, then the diffusion model struggles to accurately recover the missing frequencies without compromising the accuracy of the mean predictions.", + "bbox": [ + 114, + 711, + 882, + 883 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Fig. 9 shows the subcooled pool boiling prediction results of DM conditioned on NO and", + "bbox": [ + 144, + 898, + 880, + 916 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 489, + 916, + 510, + 929 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "HFS-enhanced NO predictions. Other prediction examples with DM integrated with NO and HFS-enhanced NO are visualized in Appendix F. When the NO predictions have significant errors, the DM can barely mitigate those errors. However, when HFS is integrated with the NO, the significant errors at large structures are reduced, and high-frequency components of the solutions are captured more accurately compared to $\\mathrm{NO} + \\mathrm{DM}$ predictions. In addition, when the DM is integrated with the HFS-enhanced NO predictions, the DM is able to more accurately reconstruct intricate features that are already enhanced through more accurate predictions provided by HFS-enhanced NO. Therefore, less over-smoothing is observed in the $\\mathrm{NO} + \\mathrm{HFS} + \\mathrm{DM}$ predictions and spectral bias is further reduced. It can be seen that both HFS and DM are helping with the capture of high-frequency features. DM cannot fix significant errors caused by NO predictions at large scale features (e.g., bubble interfaces). However, HFS reduces the errors around large scale features in addition to enhancing the smaller scale features. When DM is integrated with HFS-enhanced NO, it further enhances the small scale features. The quantitative metrics are shown in Fig. 10. It should be noted that the models are trained with a different set of hyperparameters for the results shown in Fig. 10 compared to the previous results (Fig. 3). However, HFS enhanced the prediction results of NO, irrespective of hyperparameters (either optimal or non-optimal hyperparameters), as long as the same hyperparameters are used for training both NO and HFS-enhanced NO.", + "bbox": [ + 117, + 85, + 884, + 395 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/f77d7cde018d6c74bb0d627654ab043227cdb2f775c121acb59c5727d34539a7.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 147, + 89, + 277, + 192 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/14d2c43c9f26237111d97d362572890aad6cd1ac9400cb8eb6973553a8a9c8d1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 285, + 89, + 418, + 192 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/824288cda52c2891f4d10fcc048556f5025e12a9a08916a114c2879b54a5a4e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 426, + 89, + 557, + 192 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/d434f1115c71df8fc03f6ff24ee680bd8baf845b45ea59fc5cacb1faa46bfbf1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 89, + 699, + 192 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/cfcb1f30c856f0902c80a6ca75eb09c6526ab9c4fc6d870998c725e3266546de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 89, + 863, + 192 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/4b5053f22f5d1045d4bc1f94ad7a3d9e743281a60a70f0fe826b8afd6b118513.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 146, + 195, + 277, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/4077f969a3d5c28f84a171a7ac2a8f647114a1140ff1cab20e790c3e6be946c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 193, + 416, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/bfd8e918449ad4e2c5b9c9505fa08483327fc17c132d390ea6522a9ebfd8e7c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 426, + 193, + 557, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/2e662c6924e4dc916926501fd33cf6fb7658ebfdf77a643b1876c42b62c9c1aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 193, + 697, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/e9182f9b9d24aea5d10f9e8d93f5432f03b021167555925e5f3ae7c3a8199e16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 195, + 838, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b5e173640877cb0cef234d0b4ae167124b4fc270eef3d2f99ffc99eb772ed691.jpg", + "image_caption": [ + "(c)", + "(d)" + ], + "image_footnote": [], + "bbox": [ + 147, + 340, + 277, + 432 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/9dd2b8fb0cc2df44a4b3dffb169d3d12295ff795a4bc3448676f90a768ee5d25.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 289, + 341, + 416, + 432 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/21d6dea1825b413f89957217e3a9148548072fa1907281353e79782273e0fc08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 428, + 341, + 557, + 432 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/5724b436a86300be2784fa68ea7aec5bd1eb7e60f905356e9a61d6ce2dea4070.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 568, + 341, + 695, + 432 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/7acbf978c26e0e5745e06313089bfbd821791e207c3cd601e0ff50804ab6dc4b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 709, + 340, + 863, + 432 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/f0284ad1958098bbf58a3a6ea0491e38ff5e9e5a2a1d05a08a833dda237df2d2.jpg", + "image_caption": [ + "Figure 9: Visualization of the prediction by DM integrated with NO and HFS-enhanced NO. (a) Example showing ground truth (GT) solution and predictions by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Zoomed-in visualization of (a) focusing on the high-frequency contents. (c) Predictions of another randomly selected sample. (d) Zoomed-in visualization of (c) focusing on high-frequency contents." + ], + "image_footnote": [], + "bbox": [ + 147, + 435, + 277, + 539 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/8f424c4056077457e9ba4f82ff71b2dceebe8e63582d2e9e2910553e7c5a91a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 435, + 416, + 539 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/ba25d5e98b4b75fd94526f7be742d92551619db9606fb8a8bb8ed6f2f35060f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 428, + 435, + 557, + 539 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/6acce8a74d6789197fed4cf6e66d8f7ce554c25121db815c611f22a624e7aa8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 435, + 695, + 539 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/9baf9f6b18eb233208ff948902b5fec1c3ee7677059013bbd5eea3505b509fce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 435, + 836, + 539 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The results presented in Fig. 9 and Fig. 10 illustrate the following key points:", + "bbox": [ + 144, + 631, + 773, + 648 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- HFS reduces the prediction errors in both physical and spectral domains, irrespective of NO size. On average, the relative errors (e.g., RMSE) and energy spectrum errors $(\\mathcal{E}_F)$ (see Appendix B) are reduced by $23.5\\%$ and $15.2\\%$ , respectively, with HFS-enhanced NOs.", + "- Generally, DM does not change the prediction field errors (Fig. 10a). However, DM reduces the energy spectrum error, showing better energy spectra alignment with the correct solutions. On average, $\\mathrm{NO} + \\mathrm{DM}$ has $27.8\\%$ lower relative $\\varepsilon_{F}$ compared to NO. The only exception is the NO with 16 millions parameters. On average, $\\mathrm{NO} + \\mathrm{HFS} + \\mathrm{DM}$ has $23.2\\%$ lower relative $\\varepsilon_{F}$ compared to $\\mathrm{NO} + \\mathrm{HFS}$ (Fig. 10b).", + "- HFS reduces the energy spectrum errors at all different frequency bands $(\\mathcal{E}_{F_{\\mathrm{low}}}, \\mathcal{E}_{F_{\\mathrm{mid}}},$ and $\\mathcal{E}_{F_{\\mathrm{high}}})$ , containing only the low, mid, and high-frequency components of the solutions, respectively. We refer to Fig. 6 for visualization of solutions at these frequency bands. However, DM does not enhance the results at $\\mathcal{E}_{F_{\\mathrm{low}}}$ and $\\mathcal{E}_{F_{\\mathrm{mid}}}$ when integrated with HFS-enhanced NO. Indeed, the results at these two frequency bands are sometimes the best" + ], + "bbox": [ + 144, + 656, + 880, + 916 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 485, + 917, + 510, + 929 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "for HFS-enhanced NO without DM, depending on the NO size. However, the advantage of DM is taken into action at $\\mathcal{E}_{F_{\\mathrm{high}}}$ (Fig. 10e) with improved results compared to NO and HFS-enhanced NO. This explains the role of DM in further mitigation of spectral bias.", + "bbox": [ + 163, + 85, + 882, + 137 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/adcc8ac420e67795e1500fd44cec9e0c040709c3a1c3f3f3c6afdf73a317f59a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 170, + 478, + 391 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/c61ee8c84c709ba417474b42e09eec116bd2299f768602183e70caadd447b32c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 487, + 170, + 821, + 390 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/829637f19e543333f93c6632ec44ddd9bda78de8bfca3bc1bff0bc0e7512c61c.jpg", + "image_caption": [ + "Figure 10: Diffusion model prediction results. (a) Relative errors (Rel. Error) of prediction by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Relative energy spectrum errors (Rel. $\\mathcal{E}_F$ ). (c) Relative energy spectrum errors in the low frequency band (Rel. $\\mathcal{E}_{F_{\\mathrm{low}}}$ ). (d) Relative energy spectrum errors in the mid frequency band (Rel. $\\mathcal{E}_{F_{\\mathrm{mid}}}$ ). (e) Relative energy spectrum errors in the high-frequency band (Rel. $\\mathcal{E}_{F_{\\mathrm{high}}}$ ). Low, mid, and high-frequency thresholds are set to the first $2\\%$ , the first $6.2\\%$ excluding the first $2\\%$ , and the last $93.8\\%$ of the wavenumbers." + ], + "image_footnote": [], + "bbox": [ + 119, + 395, + 366, + 558 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/24e9419cdc031bbe8ee845630b852d0f762ff0f707d7ec8c2a0be986d2140e2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 395, + 610, + 558 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/fb0cd2d796a34b11d747483d3064e7f18b5a3e9c93c722d66bc954154eeb9ef0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 611, + 397, + 863, + 558 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "4. Discussion", + "text_level": 1, + "bbox": [ + 115, + 669, + 236, + 684 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "HFS works by preserving more high-frequency components in the latent space after each convolutional layer in the NO. The flexibility of learning to scale the DC and HFC of the signals allows the model to enhance the predictions in mid and high-frequency contents without any degradation in the low frequency content of the solutions. As a result, both field metrics suh as RMSE and bubble RMSE, and the spectral errors are reduced in two-phase flow predictions. The enhancements observed in HFS-enhanced NO prediction results are more pronounced in areas with larger high-frequency features such as within the bubbles and at condensation trails seen in subcooled boiling solutions. This emphasizes the role of HFS in spectral bias mitigation, which helps with better capture of intricate features and sharp gradients. Similarly, both the relative errors and spectral errors are reduced, and high-frequency features are enhanced in the Kolmogorov flow predictions.", + "bbox": [ + 114, + 697, + 882, + 885 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 485, + 916, + 507, + 929 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The scaling parameters $\\lambda_{DC}$ and $\\lambda_{HFC}$ in the HFS method are optimized during the network training. Notably, the optimized values for $\\lambda_{HFC}$ are consistently larger than $\\lambda_{DC}$ , indicating that the model is trying to pay more attention to the HFC in the latent space. This biased attention helps mitigating the persistent challenge of spectral bias in the NO. To reduce the optimization burden, the scaling parameters were consistent across all the patches but were allowed to vary across different feature maps. This flexibility enables the model to automatically adjust the scaling of the HFC of the feature map depending on its content and significance. The learned $\\lambda_{DC}$ and $\\lambda_{HFC}$ for each of the latent feature maps in the HFS-enhanced NO with $\\sim 1.7$ million parameters are shown in Appendix G. In our work, all the scaling parameters were initialized at one and they were optimized using gradient descent with the same learning rate used for training the NO ( $\\sim 8\\times 10^{-4}$ ). It would be interesting to explore faster convergence by using different initializations and optimization frameworks for the scaling parameters in future work.", + "bbox": [ + 114, + 86, + 882, + 292 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Another method for spectral bias mitigation is through diffusion models conditioned on NO predictions as prior information. However, using diffusion models has two drawbacks. First, the diffusion model predictions are strongly dependent on the prior information. Therefore, it can only reduce over-smoothing from reasonably accurate NO predictions. If the NO predictions are not sufficiently accurate, then the diffusion model cannot perform well. Second, training diffusion models requires extensive computational cost as each training iteration involves $n(= 32)$ auto-regressive denoising steps to estimate the state of the solution at each time-step. In our experiments, the diffusion model training cost is approximately 2 to 4 times higher than the NO training itself. On the other hand, the HFS method requires only a small additional computational cost and negligible additional memory for training along the NO. The number of parameters added by HFS modules varies depending on the underlying NO size. However, it is generally less than $0.1\\%$ of the number of parameters in the NO. In our experimentation, the HFS module parameters vary between $0.018\\%$ to $0.045\\%$ of the number of parameters in NO, depending on the underlying NO size. Based on our experiments, the computational time for each training iteration is within $10\\%$ to $30\\%$ higher, depending on the NO size and the computational resource.", + "bbox": [ + 114, + 309, + 884, + 583 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In addition to the enhancements observed in field metrics such as RMSE and bubble RMSE, our investigation revealed that HFS also helps with reducing the spectral errors. We demonstrated that matching the correct energy spectra at mid and high wavenumbers is directly correlated with capturing the complex features in the solutions. We would like to emphasize the importance of considering both field errors and correct energy spectra alignment in scientific machine learning problems. The field analysis demonstrates the average performance of the predictions. However, the energy spectra analysis reveals useful information about the prediction accuracies at different frequencies and thereby explaining the possible spectral bias and loss of useful information near interfaces, vortices, and sharp gradient areas in two-phase flow and turbulence problems. It should be noted that the predictions with enhanced energy spectra alignment is beneficial when accompanied by improved mean field predictions (e.g., RMSE) and HFS-enhanced NO results satisfy this requirement.", + "bbox": [ + 114, + 600, + 882, + 806 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "When aiming to scale the different frequency bands of the signals, a logical alternative would be to perform the scaling directly in the frequency domain rather than the physical domain. As a comparison, we implemented and compared scaling in the frequency domain with our proposed method (HFS). In this regard, let $\\mathbf{X}^{(l)}\\in \\mathbb{R}^{H\\times W\\times C}$ be the output of $l$ -th convolutional layer, then the feature maps can be transferred to frequency domain using a 2D Fourier", + "bbox": [ + 114, + 824, + 880, + 910 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 485, + 916, + 510, + 929 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "transform $(\\mathcal{F})$", + "bbox": [ + 115, + 86, + 240, + 102 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {X}} ^ {(l)} (:,:, c) = \\mathcal {F} \\left(\\mathbf {X} ^ {(l)} (:,: c)\\right), \\quad c = 1, 2, \\dots , C, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 309, + 117, + 880, + 140 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where $\\hat{\\mathbf{X}}^{(l)}\\in \\mathbb{C}^{H\\times W\\times C}$ includes the Fourier-transformed feature maps. The low frequency and high-frequency component of features maps can be generated by truncating $\\hat{\\mathbf{X}}^{(l)}$ at a frequency threshold of $\\tau$ . We name these components as $\\hat{\\mathbf{X}}_{\\mathrm{low}}^{(l)}$ and $\\hat{\\mathbf{X}}_{\\mathrm{high}}^{(l)}$ . Each Fourier-transformed feature will be scaled separately:", + "bbox": [ + 114, + 147, + 880, + 217 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {X}} _ {\\text {s c a l e d}} ^ {(l)} = \\lambda_ {\\text {l o w}} \\odot \\hat {\\mathbf {X}} _ {\\text {l o w}} ^ {(l)} + \\lambda_ {\\text {h i g h}} \\odot \\hat {\\mathbf {X}} _ {\\text {h i g h}} ^ {(l)}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 351, + 228, + 880, + 252 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where $\\lambda_{\\mathrm{low}} \\in \\mathbb{R}^{1 \\times 1 \\times C}$ and $\\lambda_{\\mathrm{high}} \\in \\mathbb{R}^{1 \\times 1 \\times C}$ are learnable parameters that are optimized simultaneously with the network training. Finally, the scaled feature map is reconstructed using the inverse Fourier transform:", + "bbox": [ + 114, + 263, + 880, + 315 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {X} _ {\\text {s c a l e d}} ^ {(l)} (:, :, c) = \\mathcal {F} ^ {- 1} \\left(\\hat {\\mathbf {X}} _ {\\text {s c a l e d}} ^ {(l)} (:, :, c)\\right), \\quad c = 1, 2, \\dots , C. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 280, + 330, + 880, + 353 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Our preliminary results demonstrate that scaling in the frequency domain also improves the two-phase flow prediction results, thus helping with the spectral bias mitigation. However, the enhancements are lower than the proposed HFS method, while the computational cost is significantly higher. This is due to the Fourier and Fourier inverse transforms required in this method. Consequently, we did not proceed with the second method. However, it may worth investigating this method in future work. There is one hyperparameter for each of these scaling methods. For the proposed HFS method, the patch size is the hyperparameter, and for scaling in the frequency domain the truncation frequency is the hyperparameter. A comparison of the prediction errors and computation costs of the two methods with a NO with $\\sim 1.7$ million parameters is shown in Table 3.", + "bbox": [ + 114, + 357, + 880, + 529 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/54e291267e3c4266a59c83d920c861ff4f608dfa734ee3f6ab5844d262da96c2.jpg", + "table_caption": [ + "Table 3: Comparison of the proposed HFS (Method 1) and scaling in frequency domain (Method 2)." + ], + "table_footnote": [], + "table_body": "
NONO + Method 1NO + Method 2
Rel. Error0.0440.0330.034
RMSE0.0430.0330.034
BRMSE0.1160.0720.076
Maxmean1.140.890.92
Parameters [Millions]1.7111.7121.712
Iteration time (s)31.434.552.6
", + "bbox": [ + 203, + 583, + 794, + 712 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "4.1. Effectiveness Criteria", + "text_level": 1, + "bbox": [ + 115, + 731, + 337, + 747 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The HFS approach operates by spatially decomposing the features into several patches and scaling the common DC and individual HFC of the patches separately. Our investigation showed that HFS is mostly effective on datasets with localized features such as those in subcooled pool boiling dataset. For extremely chaotic systems with globally small-scale features, the DC and HFC cannot be directly separated from spatial patching as all the patches may contain similar frequency components. To better quantify this limitation, we directly applied HFS to the samples from three different case studies with inherently different features. The samples were chosen from the subcooled pool boiling, Kolmogorov flow, and a turbulent jet problem. We found that HFS is effective for the first two problems (with the effect being less", + "bbox": [ + 114, + 751, + 880, + 906 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 485, + 916, + 510, + 929 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "pronounced on the later one), but is not effective for the third case.", + "bbox": [ + 115, + 86, + 655, + 102 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The turbulent jet data is from the experimental Schlieren velocimetry of turbulent helium jet in air. More details about the dataset is available in the previous work [64]. We directly used the publicly available Schlieren velocimetry dataset [64] in the raw .tif format. All the regions in the turbulent jet have similar small-scale features (see Fig. 11), which are different from the more localized features in the subcooled pool boiling and less localized features in the Kolmogorov flow. We directly applied HFS to these datasets and visualized the gradient magnitude in a region with high-frequency features. Additionally, we visualized the ratio of the gradient strength on a high frequency region with and without HFS, as defined by $\\frac{|\\nabla x_{\\mathrm{HFS}}|}{|\\nabla x_{\\mathrm{baseline}}|}$ where $x$ is the chosen region, $\\nabla$ is the gradient operator, and baseline refers to the case without HFS. This ratio compares the selectiveness in scaling the gradient of the features. The HFS approach is effective for cases where it can selectively scale the gradients across the localized features. In contrast, HFS may not be effective if it results in a uniform gradient scaling, as it can be seen in the sample from the turbulent jet dataset.", + "bbox": [ + 117, + 120, + 884, + 344 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Specifically, as shown in Fig. 11, the HFS approach successfully increases the gradient strength at high frequency regions in subcooled pool boiling and Kolmogorov flow. However, it scales the gradient uniformly in the turbulent jet case. Therefore, the ratio of the gradient strength with HFS to the baseline shows a less uniform solution on the subcooled pool boiling sample, followed by the Kolmogorov flow sample. However, this ratio is almost uniform for the turbulent jet case. Selective enhancement of the gradient near the edges and high-frequency features helps with the better representation of these local regions which helps the NO to better capture the high-frequency details. Since HFS is applied in the latent space, the artifacts caused by patching are mitigated and ultimately discarded in the deeper levels of the NO.", + "bbox": [ + 114, + 360, + 884, + 514 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/a3920278fe9862529af92facf4befe880ca3cdb6ffd2727b6fd4c8c5dd3e3403.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 161, + 89, + 344, + 225 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/b20b61142a3bce86ad36ea7d9f2a959ee5be79e20456608ea2474ed5fc37e15f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 96, + 519, + 215 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/3af287cf23e6f53219a3589e04c57a037bb16dd30b3685c7cc69a05bbec05bb1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 527, + 96, + 678, + 215 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/06a9908dbf997c468a6ff901a52a01cdf5091d706ec7f0c0dae199bce729c2d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 685, + 83, + 865, + 215 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/e6da86530a472579fba0dd641c2521881c22ad246da13baba0f7a778d0d158dd.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 159, + 229, + 337, + 367 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/02e29bc84d1768810e1282a65f1138753dd896b1b55969f5a4838f82670d53f6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 381, + 250, + 505, + 359 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/a2d0d2a7c6fc7b04c17becb19f33300d8aa8cf147d4405358f82dc9231fef45b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 539, + 249, + 663, + 357 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/bde31bbafc0dd86d2cacce81eadabd84b6642b05c7685ef1db226bcddcc21013.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 249, + 852, + 359 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/b8520abce2c4b102c3587cdadf9a75faceafd934ce44f67cb0d3a2e2c69cb107.jpg", + "image_caption": [ + "(c)", + "Turbulent jet (Schlieren velocimetry)", + "Figure 11: HFS impact on gradient magnitude for different problems. (a) Subcooled pool boiling. (b) Kolmogorov flow. (c) Schlieren velocimetry of turbulent jet. For each case, the first column shows the sample and the chosen region with high frequency features (dashed boxes), the second column shows the gradient magnitude, the third column shows the gradient magnitude after applying HFS to the sample, and the fourth column shows the ratio of the HFS-enhanced gradient magnitude to the baseline gradient magnitude." + ], + "image_footnote": [], + "bbox": [ + 159, + 385, + 389, + 464 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/b6f0c5dd6c47a2958767bffd09c994b0edb2e6ba4a6e8bf3fdcb4e17cb43447e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 388, + 487, + 460 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/54d45f249993056243a6c7a732dd99069452cb3aadac4e89970dbe83138eb984.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 557, + 388, + 647, + 460 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/cd911c74dd595db620775568094d4217a9e0788b4a95e0be4c48f5928145f1f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 719, + 387, + 836, + 460 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "5. Summary", + "text_level": 1, + "bbox": [ + 115, + 577, + 230, + 594 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In this work, we proposed a new method named high-frequency scaling (HFS) to mitigate the spectral bias in convolutional-based neural operators. We demonstrated that integrating HFS with feature maps in the latent space of the neural operator reduces the prediction errors in two-phase flow problems and the Kolmogorov flow problem. Through spectral bias mitigation, HFS helps to better capture intricate features and sharp gradients commonly seen within the bubbles and induced vortices in subcooled pool boiling problem, and the small-scale features in the Kolmogorov flow. These high-frequency features are prone to over-smoothing when predicted with neural operators without HFS. HFS-enhanced neural operators can improve neural operator performance irrespective of the neural operator size. We showed that for different variants of ResUNet with number of parameters varying within $\\sim 2$ to $\\sim 16$ millions, HFS consistently reduces the prediction errors. Furthermore, a better energy spectra alignment is observed for the results of the neural operator with HFS. Additionally, we showed that the diffusion model predictions are strongly dependent on the quality of the prior neural operator predictions. Therefore, it is important to improve the neural operator prediction accuracy using HFS so that the diffusion model can further recover the missing high-frequencies in the solutions. Otherwise, the diffusion model can barely improve the erroneous large features or significantly over-smoothed predictions of the neural operator. The advantages of HFS are obtained with a negligible memory requirement and a small computational cost trade-off.", + "bbox": [ + 114, + 605, + 882, + 913 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 485, + 916, + 509, + 929 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Finally, we investigated the effectiveness criteria for HFS approach by visualizing the gradient magnitudes of high-frequency regions of three different problems. We showed that HFS works the best on the subcooled pool boiling dataset due to the more localized features, which result in a selective gradient enhancement near the edges and high-frequency features. The HFS approach effectiveness decreases in the Kolmogorov flow problem, and is negligible in the turbulent jet problem. The gradient magnitude is scaled more uniformly in the Kolmogorov flow data and almost completely uniform in the turbulent jet problem, hence explaining why HFS is ineffective for this problem.", + "bbox": [ + 112, + 103, + 882, + 240 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "CRediT authorship contribution statement", + "text_level": 1, + "bbox": [ + 115, + 260, + 490, + 278 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Siavash Khodakarami: Writing - review & editing, Writing - original draft, Visualization, Validation, Software, Methodology, Investigation, Formal analysis, Data Curation, Conceptualization. Vivek Oommen: Writing - review & editing, Writing - original draft, Visualization, Validation, Methodology, Investigation, Formal analysis, Data curation. Aniruddha Bora: Writing - review & editing, Writing - original draft, Validation, Methodology, Investigation. George Em Karniadakis Writing - review & editing, Writing - original draft, Supervision, Funding acquisition, Conceptualization.", + "bbox": [ + 112, + 288, + 882, + 409 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Declaration of competing interest", + "text_level": 1, + "bbox": [ + 115, + 429, + 410, + 445 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.", + "bbox": [ + 114, + 456, + 880, + 492 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 115, + 510, + 282, + 527 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We would like to acknowledge funding from the Office of Naval Research as part of MURI-METHODS project with grant number N00014242545. The authors would like to acknowledge the computational resources and services at the Center for Computation and Visualization (CCV), Brown University. The experiments were also partly conducted using the Delta AI computational resources at the National Center for Supercomputing Applications at the University of Illinois Urbana-Champaign through allocation CIS240932 from the Advanced Cyberinfrastructure Coordination Ecosystem: Services & Support (ACCESS) program, which is supported by the National Science Foundation.", + "bbox": [ + 112, + 537, + 882, + 677 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Data Availability", + "text_level": 1, + "bbox": [ + 115, + 695, + 268, + 714 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "All codes and datasets will be made publicly available at https://github.com/SiaK4/HFS_ResUNet.git upon publication.", + "bbox": [ + 114, + 722, + 880, + 758 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 115, + 777, + 216, + 793 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] S. K. Godunov, I. Bohachevsky, Finite difference method for numerical computation of discontinuous solutions of the equations of fluid dynamics, Matematicheskij Sbornik 47 (1959) 271-306.", + "[2] R. Eymard, T. Gallouet, R. Herbin, Finite volume methods, Handbook of Numerical Analysis 7 (2000) 713-1018.", + "[3] G. Karniadakis, S. J. Sherwin, Spectral/hp element methods for computational fluid dynamics, Oxford University Press, USA, 2005.", + "[4] T. J. Hughes, The Finite Element Method: Linear Static and Dynamic Finite Element Analysis, Courier Corporation, 2012." + ], + "bbox": [ + 124, + 802, + 880, + 916 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 487, + 917, + 510, + 929 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[5] L. Lu, P. Jin, G. Pang, Z. Zhang, G. E. Karniadakis, Learning nonlinear operators via deeponet based on the universal approximation theorem of operators, Nature Machine Intelligence 3 (2021) 218-229. URL: https://doi.org/10.1038/s42256-021-00302-5. doi:10.1038/s42256-021-00302-5.", + "[6] Z. Li, N. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, A. Anandkumar, Fourier neural operator for parametric partial differential equations, arXiv preprint arXiv:2010.08895 (2020).", + "[7] Q. Cao, S. Goswami, G. E. Karniadakis, Laplace neural operator for solving differential equations, Nature Machine Intelligence 6 (2024) 631-640.", + "[8] T. Tripura, S. Chakraborty, Wavelet neural operator for solving parametric partial differential equations in computational mechanics problems, Computer Methods in Applied Mechanics and Engineering 404 (2023) 115783.", + "[9] O. Ovadia, V. Oommen, A. Kahana, A. Peyvan, E. Turkel, G. E. Karniadakis, Real-time inference and extrapolation via a diffusion-inspired temporal transformer operator (ditto), arXiv preprint arXiv:2307.09072 (2023).", + "[10] Z. Li, K. Meidani, A. B. Farimani, Transformer for partial differential equations' operator learning, arXiv preprint arXiv:2205.13671 (2022).", + "[11] A. Sharma, S. Singh, S. Ratna, Graph neural network operators: a review, Multimedia Tools and Applications 83 (2024) 23413-23436.", + "[12] T. Chen, H. Chen, Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems, IEEE Transactions on Neural Networks 6 (1995) 911-917. doi:10.1109/72.392253.", + "[13] R. Wan, E. Kharazmi, M. S. Triantafyllou, G. E. Karniadakis, Deepvivonet: Using deep neural operators to optimize sensor locations with application to vortex-induced vibrations, arXiv preprint arXiv:2501.04105 (2025).", + "[14] E. Kiyani, M. Manav, N. Kadivar, L. De Lorenzis, G. E. Karniadakis, Predicting crack nucleation and propagation in brittle materials using deep operator networks with diverse trunk architectures, arXiv preprint arXiv:2501.00016 (2024).", + "[15] A. Peyvan, V. Oommen, A. D. Jagtap, G. E. Karniadakis, Riemannonets: Interpretable neural operators for riemann problems, Computer Methods in Applied Mechanics and Engineering 426 (2024) 116996.", + "[16] Z. Li, W. Peng, Z. Yuan, J. Wang, Long-term predictions of turbulence by implicit u-net enhanced fourier neural operator, Physics of Fluids 35 (2023).", + "[17] Y. Jiang, Z. Li, Y. Wang, H. Yang, J. Wang, An implicit adaptive fourier neural operator for long-term predictions of three-dimensional turbulence, arXiv preprint arXiv:2501.12740 (2025).", + "[18] V. Gopakumar, S. Pamela, L. Zanisi, Z. Li, A. Anandkumar, M. Team, Fourier neural operator for plasma modelling, arXiv preprint arXiv:2302.06542 (2023).", + "[19] D. Montes de Oca Zapiain, J. A. Stewart, R. Dingreville, Accelerating phase-field-based microstructure evolution predictions via surrogate models trained by machine learning methods, npj Computational Materials 7 (2021) 3.", + "[20] V. Oommen, K. Shukla, S. Goswami, R. Dingreville, G. E. Karniadakis, Learning two-phase microstructure evolution using neural operators and autoencoder architectures, npj Computational Materials 8 (2022) 190.", + "[21] V. Oommen, K. Shukla, S. Desai, R. Dingreville, G. E. Karniadakis, Rethinking materials simulations: Blending direct numerical simulations with neural operators, npj Computational Materials 10 (2024) 145.", + "[22] S. Khodakarami, Y. Suh, Y. Won, N. Miljkovic, An intelligent strategy for phase change heat and mass transfer: Application of machine learning, in: Advances in Heat Transfer, volume 56, Elsevier, 2023, pp. 113-168.", + "[23] N. Rahaman, A. Baratin, D. Arpit, F. Draxler, M. Lin, F. Hamprecht, Y. Bengio, A. Courville, On the spectral bias of neural networks, in: K. Chaudhuri, R. Salakhutdinov (Eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, PMLR, 2019, pp. 5301-5310. URL: https://proceedings.mlr.press/v97/rahaman19a.html.", + "[24] Z.-Q. J. Xu, Y. Zhang, T. Luo, Y. Xiao, Z. Ma, Frequency principle: Fourier analysis sheds light on deep neural networks, arXiv preprint arXiv:1901.06523 (2019).", + "[25] Z.-Q. J. Xu, L. Zhang, W. Cai, On understanding and overcoming spectral biases of deep neural network learning methods for solving pdes, arXiv preprint arXiv:2501.09987 (2025).", + "[26] C. Lin, Z. Li, L. Lu, S. Cai, M. Maxey, G. E. Karniadakis, Operator learning for predicting multiscale bubble growth dynamics, The Journal of Chemical Physics 154 (2021).", + "[27] N. Jain, S. Roy, H. Kodamana, P. Nair, Scaling the predictions of multiphase flow through porous media using operator learning, Chemical Engineering Journal 503 (2025) 157671.", + "[28] O. Ronneberger, P. Fischer, T. Brox, U-net: Convolutional networks for biomedical image segmentation, in: Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international confer" + ], + "bbox": [ + 117, + 87, + 880, + 912 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 487, + 916, + 509, + 928 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ence, Munich, Germany, October 5-9, 2015, proceedings, part III 18, Springer, 2015, pp. 234-241.", + "[29] S. Qin, F. Lyu, W. Peng, D. Geng, J. Wang, N. Gao, X. Liu, L. L. Wang, Toward a better understanding of fourier neural operators: Analysis and improvement from a spectral perspective, arXiv preprint arXiv:2404.07200 (2024).", + "[30] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: A multiphase multiphysics dataset and benchmarks for machine learning, Advances in Neural Information Processing Systems 36 (2024).", + "[31] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168.", + "[32] X. Liu, B. Xu, S. Cao, L. Zhang, Mitigating spectral bias for the multiscale operator learning, Journal of Computational Physics 506 (2024) 112944.", + "[33] W. Cai, Z.-Q. J. Xu, Multi-scale deep neural networks for solving high dimensional pdes, arXiv preprint arXiv:1910.11710 (2019).", + "[34] M. Tancik, P. Srinivasan, B. Mildenhall, S. Fridovich-Keil, N. Raghavan, U. Singhal, R. Ramamoorthi, J. Barron, R. Ng, Fourier features let networks learn high frequency functions in low dimensional domains, Advances in neural information processing systems 33 (2020) 7537-7547.", + "[35] S. Wang, H. Wang, P. Perdikaris, On the eigenvector bias of fourier feature networks: From regression to solving multi-scale pdes with physics-informed neural networks, Computer Methods in Applied Mechanics and Engineering 384 (2021) 113938.", + "[36] M. Raissi, P. Perdikaris, G. E. Karniadakis, Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations, Journal of Computational physics 378 (2019) 686-707.", + "[37] J. D. Toscano, V. Oommen, A. J. Varghese, Z. Zou, N. A. Daryakenari, C. Wu, G. E. Karniadakis, From pinns to pikans: Recent advances in physics-informed machine learning, arXiv preprint arXiv:2410.13228 (2024).", + "[38] S. Liang, L. Lyu, C. Wang, H. Yang, Reproducing activation function for deep learning, arXiv preprint arXiv:2101.04844 (2021).", + "[39] A. D. Jagtap, K. Kawaguchi, G. E. Karniadakis, Adaptive activation functions accelerate convergence in deep and physics-informed neural networks, Journal of Computational Physics 404 (2020) 109136.", + "[40] W. Cai, X. Li, L. Liu, A phase shift deep neural network for high frequency approximation and wave problems, SIAM Journal on Scientific Computing 42 (2020) A3285-A3312.", + "[41] P. Lippe, B. Veeling, P. Perdikaris, R. Turner, J. Brandstetter, Pde-refiner: Achieving accurate long rollouts with neural pde solvers, Advances in Neural Information Processing Systems 36 (2023) 67398-67433.", + "[42] E. Zhang, A. Kahana, A. Kopaničáková, E. Turkel, R. Ranade, J. Pathak, G. E. Karniadakis, Blending neural operators and relaxation methods in pde numerical solvers, Nature Machine Intelligence (2024) 1-11.", + "[43] H. Wu, K. Zhang, D. Zhou, W.-L. Chen, Z. Han, Y. Cao, High-flexibility reconstruction of small-scale motions in wall turbulence using a generalized zero-shot learning, Journal of Fluid Mechanics 990 (2024) R1.", + "[44] Z. Wang, X. Li, L. Liu, X. Wu, P. Hao, X. Zhang, F. He, Deep-learning-based super-resolution reconstruction of high-speed imaging in fluids, Physics of Fluids 34 (2022).", + "[45] R. Molinaro, S. Lanthaler, B. Raonic, T. Rohner, V. Armegioiu, Z. Y. Wan, F. Sha, S. Mishra, L. Zepeda-Nuñez, Generative ai for fast and accurate statistical computation of fluids, arXiv preprint arXiv:2409.18359 (2024).", + "[46] J. W. Lockwood, A. Gori, P. Gentine, A generative super-resolution model for enhancing tropical cyclone wind field intensity and resolution, Journal of Geophysical Research: Machine Learning and Computation 1 (2024) e2024JH000375.", + "[47] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Integrating neural operators with diffusion models improves spectral representation in turbulence modeling, arXiv preprint arXiv:2409.08477 (2024).", + "[48] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: a multi-physics dataset and benchmarks for machine learning, arXiv preprint arXiv:2307.14623 (2023).", + "[49] F. I. Diakogiannis, F. Waldner, P. Caccetta, C. Wu, Resunet-a: A deep learning framework for semantic segmentation of remotely sensed data, ISPRS Journal of Photogrammetry and Remote Sensing 162 (2020) 94-114.", + "[50] H. Li, Z. Xu, G. Taylor, C. Studer, T. Goldstein, Visualizing the loss landscape of neural nets, Advances in neural information processing systems 31 (2018).", + "[51] X. Chen, C. Liang, D. Huang, E. Real, K. Wang, H. Pham, X. Dong, T. Luong, C.-J. Hsieh, Y. Lu, et al., Symbolic discovery of optimization algorithms, Advances in neural information processing systems 36 (2024)." + ], + "bbox": [ + 117, + 87, + 880, + 910 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 487, + 916, + 510, + 928 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] D. P. Kingma, Adam: A method for stochastic optimization, arXiv preprint arXiv:1412.6980 (2014).", + "[53] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168.", + "[54] M. Wei, X. Zhang, Super-resolution neural operator, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 18247-18256.", + "[55] R. Wang, K. Kashinath, M. Mustafa, A. Albert, R. Yu, Towards physics-informed deep learning for turbulent flow prediction, in: Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining, 2020, pp. 1457-1466.", + "[56] P. Chakrabarty, S. Maji, The spectral bias of the deep image prior, arXiv preprint arXiv:1912.08905 (2019).", + "[57] A. M. Saxe, P. W. Koh, Z. Chen, M. Bhand, B. Suresh, A. Y. Ng, On random weights and unsupervised feature learning., in: Icml, volume 2, 2011, p. 6.", + "[58] P. Wang, W. Zheng, T. Chen, Z. Wang, Anti-oversmoothing in deep vision transformers via the fourier domain analysis: From theory to practice, arXiv preprint arXiv:2203.05962 (2022).", + "[59] J. Ho, A. Jain, P. Abbeel, Denoising diffusion probabilistic models, Advances in neural information processing systems 33 (2020) 6840-6851.", + "[60] Y. Song, J. Sohl-Dickstein, D. P. Kingma, A. Kumar, S. Ermon, B. Poole, Score-based generative modeling through stochastic differential equations, arXiv preprint arXiv:2011.13456 (2020).", + "[61] Y. Song, S. Ermon, Generative modeling by estimating gradients of the data distribution, Advances in neural information processing systems 32 (2019).", + "[62] T. Karras, M. Aittala, T. Aila, S. Laine, Elucidating the design space of diffusion-based generative models, Advances in neural information processing systems 35 (2022) 26565-26577.", + "[63] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Data for \"integrating neural operators with diffusion models improves spectral representation in turbulence modeling\" (kolmogorov flow case), 2025. URL: https://doi.org/10.6084/m9.figshare.28250960.v1. doi:10.6084/m9.figshare.28250960.v1.", + "[64] G. S. Settles, A. Liberzon, Schlieren and bos velocimetry of a round turbulent helium jet in air, Optics and Lasers in Engineering 156 (2022) 107104." + ], + "bbox": [ + 115, + 87, + 878, + 470 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Appendix A. Training strategies and ResUNet prediction results", + "text_level": 1, + "bbox": [ + 115, + 493, + 680, + 511 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "All the models were trained for $\\sim 1000$ epochs (convergence typically happened earlier). The initial learning rate was set to $8\\times 10^{-4}$ and it was reduced after the first 700 epochs using a linear step scheduler. We used GELU activation function and group normalization after convolutional layers. Lion optimizer with weight decay of 0.02 to 0.1 were used, depending on the neural operator size. Batch size of 4 or 8 was used, depending on the neural operator size. We found that gradient clipping at maximum gradient norm of 0.4 to 1 (depending on neural operator size) helps with the optimization. Our preliminary findings showed better results with the Lion optimizer compared to Adam and AdamW optimizers. Therefore, all the trainings for this work were conducted with the Lion optimizer. For all the neural operators, the number of layers in the encoder and decoder were kept constant and the number of parameters at each layer was modified to change the neural operator size.", + "bbox": [ + 114, + 521, + 880, + 712 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/216db5d1060828f55173fc75cc3f6bdd527f94ae0d29cc7ad38e6bb2fa212e64.jpg", + "image_caption": [ + "5", + "(b)" + ], + "image_footnote": [], + "bbox": [ + 157, + 97, + 273, + 179 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/f9f5641b6c82f11306de0d3689768b2b13e8457b6f1cd3662c1b66e60e3ed261.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 97, + 413, + 179 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/d3c67e83e17b5fd46d7760afc8200d37ecb7166597966030724c7e43b7db60ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 438, + 97, + 554, + 179 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/96e9fe952ba04a71427afb1dec43bde98adb15cd8c90862e8d75dc70147f8e70.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 97, + 694, + 179 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/f986092ac9f6ad1661f7ee50cf594bcb849746ce965b8ad4798f4b49976d2df6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 717, + 96, + 853, + 179 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/9c5bc0ee49ccb11782a786fdf420a16404e092d08ea9b8b167f0def97ae4f912.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 157, + 186, + 273, + 268 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/32f06be7849e92231d12edb1f8aab5d30df58ae3d8707e22c1fe4771b3eb8a5e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 186, + 413, + 268 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/cd6f9024ad18056f68340ec20642f220bc4cfe1c04907395459421d74c229cb8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 438, + 186, + 552, + 268 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/24a5a142fba9df36a266ff532a6dd6f10514925e5ceb3121f29d52b03610d45f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 186, + 694, + 268 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/59f496ffcbc52b4a9b0fa7ec0bf05bdff02cdd881eb5c24ff6271a98046d3169.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 717, + 186, + 853, + 268 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/6af72eec91e07a50d37a4a08fa6586c52969c4291b266bc7426bfb1b0c1c374a.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 159, + 275, + 273, + 357 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/68dddade02c2590e94e546d373570e8a15684914e477cd49190c23707cd2bc18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 275, + 413, + 357 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/6c7b0c2e0c10c5a798fdcfa928b49ce73045efbd3b1cd8453688f7c99d17cd8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 438, + 275, + 552, + 357 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/3379c693ec231bdecec176e4940ec55fb41c91a3584394e5c718fa44b432e3e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 275, + 694, + 357 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/58537f9f0b24681dc7cbeaeca1b1a9b091ce3452f0c6cb59fec6e162e5eb03b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 717, + 275, + 853, + 357 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/7afd174a61dc3d1de47273ea402d57ec42d4b4855f6e00716ed25610f3bf2e99.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 159, + 363, + 273, + 445 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/bf303478b135b4702e050d5bc329eafb8ed3bee1cea9c893e56c2e3acfcdfe30.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 363, + 413, + 445 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/46f47561b47a1d8e82a51bf46d73c89d671b14142e75b748b48a9f23c4bc18e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 438, + 363, + 552, + 445 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/fe38d8420ef1f2b61297d2ff15a9cd6a35da167a630d84b91c2c48f27b959981.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 363, + 694, + 445 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/0a2207b92d4a876252be664d76541511a159339b9f752eac681e2c9f2f8745fa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 717, + 363, + 853, + 445 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/296cbe932f78eae1ce2bd146deed18f92446c6a21cc9c53a07e445e9df2d2011.jpg", + "image_caption": [ + "(e)" + ], + "image_footnote": [], + "bbox": [ + 159, + 451, + 273, + 533 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/a758c825f19ab19d8b155d31e8b085f5d408ecd312bcde014754a5ce38a102a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 451, + 413, + 533 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/c33f2ead32bb0936798071bc0c00592261094bdf3e45f08d526a0fbf6f153427.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 438, + 451, + 552, + 533 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/0d8bcff3f5dad1dfd457e9f5dd4d81f799dc0b6bcee229617b3874b126d9c772.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 451, + 694, + 533 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/0255dc7ff44b63e60640617dbe202f47d5e43c9354551d69843c1a39ce725f0f.jpg", + "image_caption": [ + "Figure A.12: Example of subcooled pool boiling temperature prediction results by neural operators. (a) Ground truth results, (b) UNet prediction results, (c) ResUNet prediction results, (d) UNet prediction errors, (e) ResUNet prediction errors. The results show five time-step predictions from left to right." + ], + "image_footnote": [], + "bbox": [ + 717, + 451, + 853, + 535 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Appendix B. Boundary RMSE, Bubble RMSE, and Spectral Errors", + "text_level": 1, + "bbox": [ + 115, + 625, + 645, + 658 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Boundary RMSE (BRMSE) for a single sample and time-step is defined by calculating the errors only at the boundaries of the domain:", + "bbox": [ + 114, + 667, + 880, + 702 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {B R M S E} = \\sqrt {\\frac {1}{| \\partial \\Omega |} \\sum_ {\\mathbf {x} _ {i} \\in \\partial \\Omega} \\left(\\hat {T} _ {i} - T _ {i}\\right) ^ {2}}, \\tag {B.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 707, + 878, + 751 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "where $\\mathbf{x}_i\\in \\partial \\Omega$ specifies the points at the boundaries, $\\hat{T}_i$ is the predicted temperature, and $T_{i}$ is the actual temperature. Similarly, bubble RMSE is defined by calculating the errors only within the bubble areas. These areas are specified through a level-set function in the simulations.", + "bbox": [ + 114, + 759, + 878, + 810 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\text {B u b b l e} = \\sqrt {\\frac {1}{| \\Omega_ {\\text {b u b b l e}} \\cup \\partial \\Omega_ {\\text {b u b b l e}} |} \\sum_ {\\mathbf {x} _ {i} \\in \\Omega_ {\\text {b u b b l e}} \\cup \\partial \\Omega_ {\\text {b u b b l e}}} (\\hat {y} _ {i} - y _ {i}) ^ {2}}, \\tag {B.2}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 816, + 878, + 860 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "where $\\mathbf{x}_i\\in \\Omega_{\\mathrm{bubble}}$ and $\\partial \\Omega_{\\mathrm{bubble}}$ specify the points inside the bubble areas and at the interfaces, respectively.", + "bbox": [ + 114, + 866, + 878, + 901 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The spectral errors in each of the low, mid, and high-frequency bands are defined as follows:", + "bbox": [ + 144, + 86, + 878, + 104 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\nF _ {\\text {b a n d}} = \\sqrt {\\frac {1}{N _ {\\text {b a n d}}} \\sum_ {k \\in \\text {b a n d}} \\left| \\mathcal {F} (T) (\\mathbf {k}) - \\mathcal {F} (\\hat {T}) (\\mathbf {k}) \\right| ^ {2}}, \\quad \\text {b a n d} \\in \\{\\text {l o w}, \\text {m i d}, \\text {h i g h} \\}, \\tag {B.3}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 116, + 880, + 161 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "where $k$ is the spatial frequency component of the Fourier transformed solutions, $\\mathcal{F}$ denotes the Fourier transform, and $N_{\\mathrm{band}}$ specifies the number of components at each frequency band. The low, mid, and high bands may be defined differently based on the underlying dataset and the amount of high-frequency components. In this work, these bands were set to the first 2%, the first 6.2% excluding the low band components, and the last 93.8% of the components.", + "bbox": [ + 114, + 173, + 880, + 260 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Similarly, the energy spectrum error, showing the energy spectra misalignment at each frequency band is defined as follows:", + "bbox": [ + 114, + 277, + 880, + 311 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {E} _ {F _ {\\mathrm {b a n d}}} = \\sqrt {\\frac {1}{N _ {\\mathrm {b a n d}}} \\sum_ {k \\in \\mathrm {b a n d}} \\left(\\left| \\mathcal {F} (T) (\\mathbf {k}) \\right| ^ {2} - \\left| \\mathcal {F} (\\hat {T}) (\\mathbf {k}) \\right| ^ {2}\\right) ^ {2}}, \\quad \\mathrm {b a n d} \\in \\{\\text {l o w , m i d , h i g h} \\}, \\tag {B.4}\n$$\n", + "text_format": "latex", + "bbox": [ + 154, + 323, + 880, + 370 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "where $\\mathcal{E}$ denotes the energy spectrum error.", + "bbox": [ + 144, + 381, + 500, + 398 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Appendix C. Summary of subcooled pool boiling prediction results with HFS-enhanced NO", + "text_level": 1, + "bbox": [ + 115, + 419, + 880, + 453 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In this work, we tested different variants of ResUNet by varying number of parameters in the range of $\\sim 2$ millions to $\\sim 16$ millions. In the following table, we summarized the results of the two of the models (smallest and largest models), trained with optimal hyperparameters. Note that the same hyperparameters were used for training a neural operator with and without HFS. The parameters were first optimized for the NO without HFS and the same set of parameters were used for training the HFS-enhanced NO. The results of the other models are not included in this table for easier comparison and interpretation. We refer the reader to Figure 4 for observing the effect of HFS on all the tested models. Similar to the rest of the paper, the results are based on five time-step predictions.", + "bbox": [ + 114, + 464, + 882, + 620 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/bf8c0afdc7ba0107544fd83710ce308cb2759d8c05cdf5f7306cec86bee95cb2.jpg", + "table_caption": [ + "Table C.4: Subcooled pool boiling temperature prediction errors with neural operator (NO) with and without high-frequency scaling (HFS) The columns correspond to the metrics, NO with $\\sim 1.7$ millions parameters, HFS-enhanced NO with $\\sim 1.7$ millions parameters, NO with $\\sim 16.2$ millions parameters, and HFS-enhanced NO with $\\sim 16.2$ millions parameters." + ], + "table_footnote": [], + "table_body": "
NO, 1.7 MNO+HFS, 1.7 MNO, 16.2 MNO+HFS, 16.2 M
Rel. Error0.04140.03330.02510.0238
RMSE0.04030.03240.02440.0232
BRMSE0.09730.07290.05620.0505
Bubble RMSE0.19240.1430.1090.0985
Maxmean1.0190.8970.6850.656
Flow0.3230.2370.2120.141
Fmid0.2820.2180.1850.148
Fhigh0.04760.04000.03920.0296
Parameters [Millions]1.7111.71216.26316.268
", + "bbox": [ + 117, + 701, + 880, + 879 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 487, + 916, + 507, + 929 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Appendix D. Saturated pool boiling prediction results", + "text_level": 1, + "bbox": [ + 115, + 86, + 591, + 103 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Saturated pool boiling dataset involves less complexity due to lower high-frequency components and small scale features. Therefore, a well-optimized NO without HFS can successfully resolve the solutions. However, HFS still enhances the prediction accuracies, especially at bubble areas. The following figure demonstrates an example of predictions using NO and HFS-enhanced NO for saturated pool boiling dataset. Generally, the errors are much smaller than subcooled pool boiling predictions. However, it can be seen that the errors in the regions with departed bubbles are reduced with HFS-enhanced NO.", + "bbox": [ + 114, + 112, + 882, + 233 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/c875edc340d8e0893926ec11727820b8909097c64ebe04e21e7e0b12c6ba14a6.jpg", + "image_caption": [ + "Figure D.13: Examples of saturated pool boiling temperature prediction results by NO and HFS-enhanced NO (a) Ground truth (GT) results. (b) NO predictions. (c) $\\mathrm{NO} + \\mathrm{HFS}$ predictions. (d) Absolute prediction errors of NO $(E_{\\mathrm{NO}})$ . (e) Absolute prediction errors of $\\mathrm{NO} + \\mathrm{HFS}$ $(E_{\\mathrm{NO} + \\mathrm{HFS}})$ . The results are shown for five time-step predictions from left to right. The departed bubbles areas are circled (dashed red circles) in error maps for easier interpretation and comparison. The results are based on a NO with $\\sim 3.5$ millions parameter." + ], + "image_footnote": [], + "bbox": [ + 127, + 262, + 868, + 712 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "To further investigate if HFS can enhance the predictions with smaller NO on this simpler dataset, we trained another NO with the same structure (ResUNet) but with only $\\sim 0.6$ millions parameters with and without HFS. Consistent with previous results, HFS enhanced the predictions by reducing the field errors such as RMSE and bubble RMSE as well the spectral errors. The prediction results of saturated pool boiling dataset using two different NOs with", + "bbox": [ + 112, + 821, + 884, + 906 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "and without HFS are summarized in the following table. Similar to the rest of the paper, the results are based on five time-step predictions.", + "bbox": [ + 114, + 86, + 880, + 122 + ], + "page_idx": 32 + }, + { + "type": "table", + "img_path": "images/7336432f3f14053949b40efa42bad4fd0c39bbab6cbcf2075dc6987d42cba825.jpg", + "table_caption": [ + "Table D.5: Saturated pool boiling temperature prediction errors of NO with and without HFS. The columns correspond to the metrics, NO with $\\sim 0.6$ millions parameters, HFS-enhanced NO with $\\sim 0.6$ millions parameters, NO with $\\sim 3.5$ millions parameters, and HFS-enhanced NO with $\\sim 3.5$ millions parameters." + ], + "table_footnote": [], + "table_body": "
NO, 0.6 MNO+HFS, 0.6 MNO, 3.5 MNO+HFS, 3.5 M
Rel. Error0.01730.01650.01490.0145
RMSE0.01710.01640.01480.0144
BRMSE0.04620.04500.03640.0355
Bubble RMSE0.09180.08980.07260.0692
Maxmean0.5920.5950.5530.544
Flow0.09640.08350.07450.0736
Fmid0.10860.09980.09190.0855
Fhigh0.02090.02080.01820.0180
Parameters [Millions]0.6140.6153.4803.481
", + "bbox": [ + 115, + 187, + 882, + 373 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/109eabf1c2090326d7c86275a3cb013ee490da972d1d8047ce97ccfd130ab144.jpg", + "image_caption": [ + "(a)", + "Figure E.14: Effect of HFS on the latent space mean features. (a) Mean latent feature maps in decoder (downsampling) with five layers. (b) Mean latent feature maps in decoder (upsampling) with five layers. The results are based on a NO with $\\sim 16$ millions parameters." + ], + "image_footnote": [], + "bbox": [ + 152, + 172, + 502, + 694 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/f7993c093c7192f5cb3cbfb825bab433b9db09b4062eae2dea9b86fd1e4c6d5b.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 512, + 170, + 858, + 694 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Appendix E. Visualization of latent space", + "bbox": [ + 115, + 86, + 484, + 103 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 487, + 916, + 510, + 929 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Appendix F. Additional visualizations of the subcooled pool boiling predictions", + "text_level": 1, + "bbox": [ + 115, + 85, + 803, + 104 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/818538e3c98e7ef1cb7ca02c7839b776ab683c489ef3ad9ddcfc9a7d6277cc8e.jpg", + "image_caption": [ + "Figure F.15: Examples of subcooled pool boiling prediction results by DM integrated with NO and HFS-enhanced NO. (a) Ground truth (GT) results. (b) NO predictions. (c) NO + DM predictions. (d) NO + HFS predictions. (e) NO + HFS + DM predictions. The results are shown for five time-step predictions from left to right." + ], + "image_footnote": [], + "bbox": [ + 127, + 133, + 865, + 621 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Appendix G. Optimized scaling parameters, $\\lambda_{DC}$ and $\\lambda_{HFC}$", + "text_level": 1, + "bbox": [ + 115, + 715, + 628, + 734 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The following figure demonstrates the learned $\\lambda_{DC}$ and $\\lambda_{HFC}$ across all the feature maps in the latent space of the encoder and decoder. The results are based on the training of a HFS-enhanced NO with $\\sim 1.7$ million parameters for the subcooled pool boiling problem.", + "bbox": [ + 114, + 744, + 882, + 797 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 487, + 916, + 509, + 929 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/b32764dafa3d505c255f1f64f1257658a5fa838bf50794dd76059aea8719ee2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 171, + 104, + 831, + 384 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/a0784824075ff041ec54bce51f4a9f920414699bc99c6bfb71782ab60b664316.jpg", + "image_caption": [ + "Figure G.16: (a) learned values of $\\lambda_{DC}$ and $\\lambda_{HFC}$ in the encoder of the NO. (b) learned values of $\\lambda_{DC}$ and $\\lambda_{HFC}$ in the decoder of NO. Layers start from highest spatial resolution to the lowest in the encoder and vice versa for the decoder." + ], + "image_footnote": [], + "bbox": [ + 169, + 376, + 831, + 667 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Appendix H. Kolmogorov flow prediction results", + "text_level": 1, + "bbox": [ + 115, + 749, + 549, + 766 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "The vorticity formulation of the unsteady 2D incompressible Navier-Stokes equation for a viscous and incompressible fluid with the Kolmogorov forcing term is given as follows, where $\\omega$ is the vorticity, $u$ is the velocity vector, and $\\nu$ is the kinematic viscosity.", + "bbox": [ + 114, + 775, + 882, + 828 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l l} \\partial_ {t} \\omega + \\mathbf {u} \\cdot \\nabla \\omega = \\nu \\Delta \\omega + f (x, y), & (x, y) \\in (0, 2 \\pi) ^ {2}, t \\in (0, t _ {\\text {f i n a l}} ] \\\\ f (x, y) = \\chi (\\sin (2 \\pi (x + y)) + \\cos (2 \\pi (x + y))), & (x, y) \\in (0, 2 \\pi) ^ {2} \\\\ \\nabla \\cdot \\mathbf {u} = 0, & (x, y) \\in (0, 2 \\pi) ^ {2}, t \\in (0, t _ {\\text {f i n a l}} ] \\\\ \\omega (x, y, 0) = \\omega_ {0}, & (x, y) \\in (0, 2 \\pi) ^ {2} \\end{array} \\right. \\tag {H.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 159, + 848, + 880, + 928 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "In this study, we used $\\chi = 0.1$ , $\\nu = 10^{-5}$ , and periodic boundary conditions. The vorticity initial condition was sampled from a Gaussian random field according to the distribution $\\mathcal{N}(0,14^{0.5}(-\\Delta +196I)^{-1.5})$ . The following figure demonstrate an example of the prediction results of the neural operator with and without HFS.", + "bbox": [ + 114, + 85, + 882, + 156 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/1cfcf41b9fcb16a03911d047cd5dfb8c48cb6eb5ef8522a0e365588ab7ffadcb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 174, + 875, + 404 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/f2e000dac344778458cfa4d7d28377618422c7e3854b30636a302d1144e056ab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 404, + 868, + 536 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/dfc42718a5b15fa76af837f1cf09d44379ea5e84068c7dfccc29da3080f16166.jpg", + "image_caption": [ + "Figure H.17: 2D Kolmogorov flow prediction results. (a) Ground truth solutions. (b) NO predictions. (c) HFS-enhanced NO predictions. (d) The corresponding energy spectra $((p(k))$ for predictions at each time-step. (e) Zoomed-in view of energy spectra showing only the high wavenumbers for better visualization of the differences. The legends in (d) are applicable to (e) as well." + ], + "image_footnote": [], + "bbox": [ + 127, + 539, + 868, + 712 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 487, + 916, + 509, + 928 + ], + "page_idx": 36 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_model.json b/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5057373ce284155a26caac0fe89580f87466b67c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_model.json @@ -0,0 +1,4575 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.305, + 0.061, + 0.726 + ], + "angle": 270, + "content": "arXiv:2503.13695v1 [cs.LG] 17 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.119, + 0.081, + 0.88, + 0.131 + ], + "angle": 0, + "content": "Mitigating Spectral Bias in Neural Operators via High-Frequency Scaling for Physical Systems" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.15, + 0.852, + 0.168 + ], + "angle": 0, + "content": "Siavash Khodakaramia, Vivek Oommenb, Aniruddha Boraa, George Em Karniadakisa,c,*" + }, + { + "type": "text", + "bbox": [ + 0.22, + 0.179, + 0.776, + 0.193 + ], + "angle": 0, + "content": "\\(^{a}\\)Division of Applied Mathematics, Brown University, Providence, RI, 02912, USA" + }, + { + "type": "text", + "bbox": [ + 0.258, + 0.193, + 0.74, + 0.208 + ], + "angle": 0, + "content": "\\(^{b}\\)School of Engineering, Brown University, Providence, RI, 02912, USA" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.208, + 0.729, + 0.221 + ], + "angle": 0, + "content": "\\(^{c}\\)Pacific Northwest National Laboratory, Richland, WA, 99354, USA" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.275, + 0.198, + 0.29 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.299, + 0.889, + 0.538 + ], + "angle": 0, + "content": "Neural operators have emerged as powerful surrogates for modeling complex physical problems. However, they suffer from spectral bias making them oblivious to high-frequency modes, which are present in multiscale physical systems. Therefore, they tend to produce over-smoothed solutions, which is particularly problematic in modeling turbulence and for systems with intricate patterns and sharp gradients such as multi-phase flow systems. In this work, we introduce a new approach named high-frequency scaling (HFS) to mitigate spectral bias in convolutional-based neural operators. By integrating HFS with proper variants of UNet neural operators, we demonstrate a higher prediction accuracy by mitigating spectral bias in single and two-phase flow problems. Unlike Fourier-based techniques, HFS is directly applied to the latent space, thus eliminating the computational cost associated with the Fourier transform. Additionally, we investigate alternative spectral bias mitigation through diffusion models conditioned on neural operators. While the diffusion model integrated with the standard neural operator may still suffer from significant errors, these errors are substantially reduced when the diffusion model is integrated with a HFS-enhanced neural operator." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.546, + 0.825, + 0.58 + ], + "angle": 0, + "content": "Keywords: Neural operator, Spectral Bias, Two-phase flow, Boiling, Kolmogorov flow, Diffusion model" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.623, + 0.258, + 0.639 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.65, + 0.885, + 0.804 + ], + "angle": 0, + "content": "Design and control problems in engineering often require repeated simulation of the underlying physical system, necessitating the solution of governing partial differential equations (PDEs) multiple times. For a wide range of applications from fluid dynamics to material science, classical discretization-based direct numerical simulation (DNS) [1, 2, 3, 4] has been the cornerstone of scientific computing. While the methods for DNS have matured significantly over the past several decades, their computational cost becomes prohibitive when performing repeated simulations over varying parametric conditions or configurations. This challenge has fueled a growing interest in developing computationally efficient surrogate models capable of approximating these simulations at only a fraction of the cost." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.822, + 0.885, + 0.874 + ], + "angle": 0, + "content": "In particular, the classical DNS can estimate the solution for a given set of conditions. If one of these conditions is modified, the solver has to be re-run, further aggravating the computational cost. To mitigate this issue, neural operators were developed to handle a plurality of" + }, + { + "type": "page_footnote", + "bbox": [ + 0.14, + 0.899, + 0.529, + 0.914 + ], + "angle": 0, + "content": "*Corresponding Author: george_karniadakis@brown.edu" + }, + { + "type": "footer", + "bbox": [ + 0.769, + 0.917, + 0.881, + 0.932 + ], + "angle": 0, + "content": "March 19, 2025" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.118, + 0.086, + 0.885, + 0.311 + ], + "angle": 0, + "content": "conditions and parametric settings [5, 6, 7, 8, 9, 10, 11]. Neural operators, which are based on the universal operator approximation theorem [12], are trained to learn the mapping between infinite-dimensional functional spaces. Although it is expensive to train such surrogates offline, a trained neural operator can efficiently estimate solutions of unseen conditions almost instantaneously during inference. Many studies have used neural operators as surrogates to learn physical problems in space and time. Various physical problems such as vortex-induced vibration [13], crack nucleation and propagation [14], Riemann problems [15], turbulence [16, 17], plasma modeling [18], and many more have been solved, at least under limited conditions, by neural operators. Furthermore, other studies [19, 20, 21] attempted to learn the temporal evolution of two-phase microstructures in diffusion-driven processes such as spinodal decomposition and dendritic growth. However, very few studies have investigated the application of neural operators for buoyancy-dominated or advection-dominated two-phase flow problems, such as those encountered in boiling and condensation [22]." + }, + { + "type": "title", + "bbox": [ + 0.118, + 0.341, + 0.666, + 0.358 + ], + "angle": 0, + "content": "1.1. Neural operators and applications in two-phase flow modeling" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.362, + 0.885, + 0.55 + ], + "angle": 0, + "content": "Modeling and predicting two-phase flow during boiling is one of the most challenging problems in computational fluid dynamics. These phenomena involve complex interface dynamics and phase transitions, resulting in high-frequency spatio-temporal variations that are both challenging and computationally expensive to capture. Analyzing the solutions of such a system reveals a slowly decaying energy spectrum, where even the high wavenumbers carry a nontrivial amount of energy that cannot be neglected. Effective modeling of a two-phase flow system requires the neural operators to accurately predict spatio-temporal evolution of both low and high wavenumber modes. Unfortunately, neural networks and neural operators suffer from spectral bias [23, 24, 25], which makes them oblivious to high wavenumber modes. Consequently, the neural operators can only offer an over-smoothed prediction that fails to capture the intricate features near the interfaces where the sharp gradients are commonly observed." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.567, + 0.885, + 0.756 + ], + "angle": 0, + "content": "Previous studies in boiling modeling with neural operators also confirm the spectral bias problem. [26] used DeepONet [5] to solve for the transient solution of a single bubble growth. Their findings demonstrate that DeepONet can effectively capture the mean component of the solution in the microscale regime, but it fails to accurately predict the stochastic fluctuations described by high-frequency components of the solution. A study by Jain et al. [27] on the prediction of multiphase flow through porous media with UNet [28] also showed that larger errors occurred near the interfaces. The Fourier neural operator (FNO) [6] also suffers from spectral bias [29]. The common practice of truncating high-frequency modes in FNOs leads to the loss of rich information, hindering the accurate modeling of chaotic systems in multi-phase heat transfer and turbulence. However, without truncation, training FNOs becomes unstable [9]." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.773, + 0.885, + 0.911 + ], + "angle": 0, + "content": "A recent study by Hassan et al.[30] collected a valuable boiling dataset based on Flash-X simulations [31] and developed neural operators based on different structures such as UNet, FNO, and group equivariant FNO (GFNO) for prediction in boiling problems. As shown in the results of our work, the previously best neural operator still struggles to capture high-frequency modes, which are prominently observed within the bubbles, along the interfaces, and in condensation traces in subcooled pool boiling. These over-smoothened solutions highlight the need for further advancements to mitigate spectral bias in modeling phase-change and multi-phase flow phenomena. Similarly, spectral bias of neural operators cannot be overlooked when mod-" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.917, + 0.506, + 0.93 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.119, + 0.088, + 0.88, + 0.119 + ], + "angle": 0, + "content": "eling other chaotic systems like turbulence [32], where small-scale, low-energy features play a crucial role." + }, + { + "type": "title", + "bbox": [ + 0.12, + 0.136, + 0.436, + 0.153 + ], + "angle": 0, + "content": "1.2. Spectral bias mitigation strategies" + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.156, + 0.886, + 0.361 + ], + "angle": 0, + "content": "Previous studies have proposed various methods to mitigate spectral bias and over-smoothing in deep neural networks (DNNs). Cai et al. [33] proposed a multi-scale DNN (MscaleDNN) to enhance approximations over a wide range of frequencies for the solution of PDEs. Tancik et al. [34] proposed Fourier feature mapping for coordinate-based multilayer perceptron (MLP) to tackle spectral bias in image regression tasks in low dimensional domains. Wang et al. [35] used Fourier feature mapping along with Physics-informed Neural Networks (PINNs) [36, 37] to enhance the multi-scale PDE solutions by mitigating the spectral bias compared to vanilla PINN. A better optimization of activation functions have been also shown to slightly reduce spectral bias of DNNs and PINNs [38, 39]. Phase shift DNN is another method converting high-frequency component of the data into low frequency spectrum, which can be learned and represented by a DNN. Subsequently, the learned representation is converted into the original high-frequency. However, phase shift DNN suffers from the curse of dimensionality [40]." + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.379, + 0.88, + 0.653 + ], + "angle": 0, + "content": "Efforts have also been made to mitigate the spectral bias encountered by neural operators trained to learn spatiotemporal systems. Lippe at al. [41] developed PDE-Refiner, which iteratively adds noise to perturb different scales of the system and trains the neural operator to correct these corrupted states. Zhang et al. [42] developed Hybrid Iterative Numerical Transferable Solver (HINTS) to exploit the spectral bias in solving large linear systems by blending neural operators and relaxation methods. Generative Artificial Intelligence (GenAI)-based algorithms are also emerging as effective methods to overcome the spectral bias barrier. Wu et al. [43] accurately reconstructed the small-scale structures accompanying turbulent boundary layers in wall turbulence using Super Resolution Generative Adversarial Networks (SRGANs). Wang et al. [44] developed a framework based on GANs to reconstruct high spatiotemporal resolution supersonic flow states from sparse measurements. Molinaro et al. [45] developed GenCFD using score-based diffusion models to learn three-dimensional turbulence in compressible and incompressible flows. Lockwood et al. [46] used denoising diffusion probabilistic models to refine the estimates of tropical cyclone wind intensities. Oommen et al. [47] addressed the spectral limitations of neural operators in modeling a series of turbulent systems by training a conditional score-based diffusion model conditioned on the neural operator as prior." + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.671, + 0.88, + 0.858 + ], + "angle": 0, + "content": "In this work, we first propose the use of UNet with residual blocks (ResUNet) to achieve more accurate two-phase flow predictions compared to the state-of-the-art neural operators. Subsequently, we present a new method named high-frequency scaling (HFS) to mitigate spectral bias in two-phase flow predictions. Our approach demonstrates higher accuracy and better alignment of energy spectra, with negligible additional memory requirements and only a small computational overhead on the neural operator. We applied HFS to different variants of ResUNet. Finally, we explore the dependency of diffusion models on the prior accuracies when integrated with neural operators. Specifically, we show that the integration of the diffusion model with neural operators equipped with HFS results in further mitigation of spectral bias without compromising prediction accuracy. We demonstrate the effectiveness of our methodology for both two-phase and single-phase flows." + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.877, + 0.88, + 0.91 + ], + "angle": 0, + "content": "The manuscript is organized as follows. We start with providing an in-depth description about neural operators, HFS, and diffusion models in Section 2. We present the results of our" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.918, + 0.505, + 0.929 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.087, + 0.881, + 0.12 + ], + "angle": 0, + "content": "investigations in Section 3, followed by discussion and summary in Sections 4 and 5, respectively. In the Appendix, we include more technical details and additional results." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.142, + 0.224, + 0.158 + ], + "angle": 0, + "content": "2. Methods" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.17, + 0.307, + 0.186 + ], + "angle": 0, + "content": "2.1. Neural Operators" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.191, + 0.88, + 0.224 + ], + "angle": 0, + "content": "The mathematical operator \\(\\mathcal{N}\\) that governs the temporal evolution of a time-dependent system can be expressed as," + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.226, + 0.881, + 0.243 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {u} (\\boldsymbol {x}, t + \\Delta t) \\approx \\mathcal {N} (\\boldsymbol {u} (\\boldsymbol {x}, t)) (\\Delta t), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.25, + 0.881, + 0.302 + ], + "angle": 0, + "content": "where \\(\\mathbf{u}\\) is the representative state variable(s) of interest. The objective here is to train a neural operator \\(\\mathcal{F}_{\\theta}\\) to learn the true underlying operator \\((\\mathcal{N})\\) by, typically, minimizing the mean of an error norm such as \\(\\| \\pmb {u}(\\pmb {x},t + \\Delta t) - \\mathcal{F}_{\\theta}(u(\\pmb {x},t))(\\Delta t)\\| _2\\)." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.319, + 0.885, + 0.491 + ], + "angle": 0, + "content": "In this work, we focus on resolving solutions in pool boiling problems and single-phase turbulent flows. We start our analysis with pool boiling problems. Then, we investigate the application of our method on single-phase turbulent flows. There have been several efforts to use neural operators to learn temperature and flow dynamics in two-phase flow problems. Here, we demonstrate the advantage of using the ResUNet structure compared to previously developed neural operators such as UNet and FNO for two-phase flow problems with high-frequency features [48]. The models are trained to predict future temperatures based on temperature history and velocity information. The problem configuration is shown in Equation 2, where \\( x \\) is the spatial mesh, \\( T \\) is the temperature, \\( V \\) is the velocity, \\( k \\) specifies the prediction time interval length, and \\( \\mathcal{F}_{\\theta} \\) is the trained neural operator." + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.508, + 0.881, + 0.526 + ], + "angle": 0, + "content": "\\[\nT (\\boldsymbol {x}, t: t + k \\Delta t) = \\mathcal {F} _ {\\theta} (T (\\boldsymbol {x}, t - k \\Delta t: t), V (\\boldsymbol {x}, t - k \\Delta t: t + k \\Delta t)) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.532, + 0.883, + 0.722 + ], + "angle": 0, + "content": "UNet with residual blocks (ResUNet) was first introduced for a semantic segmentation task by imposing skip connections between convolutional layers in a UNet-like structure [49]. We use the same idea to add skip connections in the form of residual blocks to both the encoder and decoder side of the UNet. The residual blocks have been shown to mitigate vanishing gradient problems by offering a smoother optimization landscape [50]. We also demonstrate that they help with the better flow of information in the network for complex datasets such as two-phase flows, which results in a better capture of localized features, possibly reducing the spectral bias towards low-frequency components. We also introduced several modifications, such as the GELU activation function and group normalization, that demonstrated superior prediction accuracy. We used the mean squared error (MSE) loss function in all prediction time steps (Equation 3) as the objective criterion to train the model, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.279, + 0.735, + 0.881, + 0.78 + ], + "angle": 0, + "content": "\\[\nL (\\theta) = \\frac {1}{N _ {u} k} \\sum_ {i = 1} ^ {N _ {u}} \\sum_ {j = 1} ^ {k} \\| T ^ {i} (\\boldsymbol {x}, t + j \\Delta t) - \\mathcal {F} _ {\\theta} (T ^ {i} (\\boldsymbol {x}, t)) (j \\Delta t) \\| _ {2} ^ {2} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.8, + 0.881, + 0.868 + ], + "angle": 0, + "content": "We used the Lion optimizer [51] to perform the optimization as we observed superior performance with this optimizer compared to the conventional Adam optimizer [52]. More details about the ResUNet structure, the training hyperparameters, and comparison with UNet predictions are included in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.868, + 0.881, + 0.902 + ], + "angle": 0, + "content": "We evaluated our baseline neural operator on both saturated and subcooled pool boiling datasets from the BubbleML data repository, which is generated through Flash-X simulations" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.918, + 0.506, + 0.93 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.086, + 0.883, + 0.293 + ], + "angle": 0, + "content": "[53] and were collected in a previous study [48]. It should be noted that predictions in sub-cooled boiling is more difficult due to the vortices generated by condensation trails. Therefore, the errors are higher in subcooled boiling predictions, and the results look more over-smoothed compared to saturated boiling prediction results. A visualization of the subcooled boiling prediction results is shown in Appendix A. A comprehensive comparison of our baseline model with the previous best baseline model developed by [48] is included in Table 1 and Table 2 for saturated and subcooled pool boiling dataset, respectively. The ResUNet improves the resolution of high-frequency features, resulting in higher prediction accuracy. We note that given the possible differences in the testing dataset, the one-to-one comparison with the previously reported numbers may not be fair. Therefore, we trained and tested the previously reported best model (e.g., UNet) with our dataset configuration, which consists of a larger test dataset and smaller training dataset compared to the previous work." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.31, + 0.885, + 0.515 + ], + "angle": 0, + "content": "We evaluated our model using six different field metrics relevant to two-phase problems. These metrics include relative error (Rel. Error), root mean square error (RMSE), boundary RMSE (BRMSE) showing the error on the boundaries, bubble RMSE showing the error in the bubble areas and at the interfaces, mean maximum error \\((\\mathrm{Max}_{\\mathrm{mean}})\\) showing the mean of the maximum error for each prediction, and overall maximum error \\((\\mathrm{Max}_{\\mathrm{max}})\\) showing the maximum error over the test dataset. We also evaluated the predictions in three different frequency bands using spectral errors at low frequency \\((F_{\\mathrm{low}})\\), medium frequency \\((F_{\\mathrm{mid}})\\), and high frequency \\((F_{\\mathrm{high}})\\). Exact definitions of BRMSE and bubble RMSE, as well as spectral errors are described in Appendix B. All the metrics are computed on the normalized dataset \\((T^{i}(\\boldsymbol{x},t + j\\Delta t)\\in [-1,1]\\forall \\{i,j\\})\\). For all the results, the state of the temperature at five future time-steps are predicted based on five time-step previous temperature history and the velocity information in two spatial dimensions." + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.531, + 0.883, + 0.575 + ], + "angle": 0, + "content": "Table 1: Saturated pool boiling temperature prediction errors. The training dataset consists of simulations from 11 different wall temperatures. The test dataset consists of simulations with two other wall temperatures \\((70^{\\circ}\\mathrm{C},\\) and \\(95^{\\circ}\\mathrm{C})\\) not seen during training." + }, + { + "type": "table", + "bbox": [ + 0.307, + 0.585, + 0.692, + 0.783 + ], + "angle": 0, + "content": "
UNetResUNet
Rel. Error0.01910.0149
RMSE0.01890.0148
BRMSE0.05820.0364
Bubble RMSE0.1160.0726
Maxmean0.7050.553
Maxmax1.2041.154
Flow0.1050.0745
Fmid0.1130.0919
Fhigh0.02380.0182
Parameters [Millions]7.83.5
" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.917, + 0.506, + 0.93 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.115, + 0.084, + 0.882, + 0.128 + ], + "angle": 0, + "content": "Table 2: Subcooled pool boiling temperature prediction errors. The training dataset consists of simulations from eight different wall temperatures. The test dataset consists of simulations with two other wall temperatures (95°C, and 98°C) not seen during training." + }, + { + "type": "table", + "bbox": [ + 0.307, + 0.137, + 0.693, + 0.336 + ], + "angle": 0, + "content": "
UNetResUNet
Rel. Error0.05160.0295
RMSE0.05010.0288
BRMSE0.1390.0646
Bubble RMSE0.2690.127
Maxmean1.1410.837
Maxmax2.2791.433
Flow0.3460.157
Fmid0.3670.197
Fhigh0.05830.0370
Parameters [Millions]7.83.5
" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.355, + 0.883, + 0.613 + ], + "angle": 0, + "content": "The results in Tables 1 and 2 demonstrate that all the metrics are improved by simply introducing residual blocks in the network, a better optimizer, and a better normalization. For example, there is approximately \\(21\\%\\) and \\(42\\%\\) reduction of RMSE in saturated and subcooled boiling, respectively. Interestingly, the ResUNet achieves better accuracies with less than half of the number of parameters in UNet. Most of the prediction errors occur within the bubble areas and at the condensation trails. This is due to the larger gradients in the bubble areas and around condensation trails resulting into more complex patterns that are more challenging to capture with the neural operator. This is expected as the neural operators are known to have spectral bias to low-frequency modes. The high-frequency content typically exists in regions with significant gradients such as interfaces and condensation trails. In subcooled pool boiling, departing bubbles may condense after departure, creating vortices that gradually dissipate over time. These vortices form complex structures containing higher energy at high frequencies. As a result, subcooled boiling presents greater prediction challenges compared to saturated boiling. For instance, prediction spectral errors \\((F_{\\mathrm{low}}, F_{\\mathrm{mid}}, F_{\\mathrm{high}})\\) are approximately two times higher in subcooled boiling, highlighting the increased complexity with the high-frequency content." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.629, + 0.882, + 0.87 + ], + "angle": 0, + "content": "While the residual blocks improve the neural operator's ability to reduce field errors (e.g., RMSE) and over-smoothing of certain high-frequency contents, the results still suffer from significant over-smoothing (see Appendix A). Previous studies have also shown the oversmoothing issue of convolutional based neural operators for image generation tasks and scientific computing [54, 55]. Other studies demonstrated the frequency selectiveness of convolutional neural network (CNN) architectures resulting in different learning rates for low and high-frequency components [56, 57]. Wang et al. [58] demonstrated the spectral bias in vision transformers (ViT) through Fourier analysis. They showed that the problem arises by self-attention layers that act as low-pass filters and continuously reduce high-frequency information with the network depth. A feature scaling technique was proposed to decompose the attention output signal into direct and high-frequency components and scale them separately to adjust the proportion of different frequencies of the signal. We draw inspiration from this technique and propose a similar approach to separately scale low frequency and high-frequency components of the features in the latent space of the neural operator to mitigate spectral bias." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.918, + 0.506, + 0.929 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.087, + 0.351, + 0.104 + ], + "angle": 0, + "content": "2.2. High-frequency scaling" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.108, + 0.883, + 0.312 + ], + "angle": 0, + "content": "As discussed in Section 2.1, neural operators suffer from spectral bias. While residual blocks offer improvements up to some extent, they cannot effectively mitigate the spectral bias inherent in the neural operators. Hence, we propose the high-frequency scaling (HFS) approach to be applied to the output of convolutional layers. The latent feature map of each convolutional layer is first divided into non-overlapping patches, similar to the first step in vision transformers. This will break down the spatial dimensions into smaller regions, which empirically will allow for better localized processing. We consider the mean of the patches as the direct component (DC) of these signals. Then, the high-frequency component (HFC) for each patch can be defined as the difference of each patch with the DC. It should be noted that here the DC is calculated across the patches and not individually for each patch. Then, we introduce two parameter groups of \\(\\lambda_{\\mathrm{DC}}\\) and \\(\\lambda_{\\mathrm{HFC}}\\) to separately scale the DC and HFC for each patch. We then re-assemble the patches to the original latent feature size before the next operation." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.313, + 0.885, + 0.417 + ], + "angle": 0, + "content": "A more rigorous description of the method is as follows: Let \\( X \\in \\mathbb{R}^{H \\times W \\times C} \\) be the output feature map of a convolutional layer, where \\( H \\), \\( W \\), and \\( C \\) are the height, width, and number of channels, respectively. We divide \\( X \\) into \\( N \\) non-overlapping patches of size \\( p \\times p \\) denoted as \\( X^{(i)} \\in \\mathbb{R}^{p \\times p \\times C} \\), where \\( i \\in [0, N] \\). The DC is defined as the mean patch across all \\( N \\) patches as shown in Equation (4). The HFC calculation for each patch and the scaling step are shown in Equations (5-6):" + }, + { + "type": "equation", + "bbox": [ + 0.417, + 0.43, + 0.881, + 0.473 + ], + "angle": 0, + "content": "\\[\nD C (X) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} X ^ {(i)}, \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.486, + 0.881, + 0.503 + ], + "angle": 0, + "content": "\\[\nH F C \\left(X ^ {(i)}\\right) = X ^ {(i)} - D C (X), \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.527, + 0.881, + 0.546 + ], + "angle": 0, + "content": "\\[\n\\hat {X} ^ {(i)} = X ^ {(i)} + \\lambda_ {D C} \\odot D C (X) + \\lambda_ {H F C} \\odot H F C (X ^ {(i)}). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.552, + 0.767, + 0.569 + ], + "angle": 0, + "content": "The scaled feature map can be then reconstructed by re-assembling the \\(\\hat{X}^{(i)}\\mathrm{s}\\)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.57, + 0.883, + 0.793 + ], + "angle": 0, + "content": "The scaling parameters \\(\\lambda_{DC} \\in \\mathbb{R}^{1 \\times 1 \\times C}\\) and \\(\\lambda_{HFC} \\in \\mathbb{R}^{1 \\times 1 \\times C}\\) are left to be learnable parameters that are optimized using gradient descent simultaneously with the network optimization. Here, we initialized the parameters to be one and optimized them with the same learning rate used for network training. In ResUNet structure, HFS is applied to the output of both convolutional layers and the skip-connection paths with \\(1 \\times 1\\) convolutions or identity skip-connections. In practice, HFS can be seen as a new module incorporated to each layer of the encoder and decoder, as shown in Fig. 1. Fig. 1 also depicts examples of the learned feature maps for models with and without HFS. The most similar feature maps between the models from the first encoder layers and the last decoder layers are depicted. The model with HFS learns features with more pronounced high-frequency content and reduced over-smoothing, which possibly enhances the capture of high-frequency components of the solution and mitigates spectral bias of the neural operator. A summary of the improvements in prediction accuracy achieved through HFS is provided in Appendix C." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.917, + 0.506, + 0.93 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.09, + 0.359, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.09, + 0.538, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.089, + 0.692, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.203, + 0.675, + 0.377 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.089, + 0.845, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.203, + 0.866, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.396, + 0.883, + 0.51 + ], + "angle": 0, + "content": "Figure 1: Structure of the HFS-enhanced NO. (a) Schematic of the HFS module (right) integrated with the residual block (left). (b) Structure of the ResUNet with the HFS modules (blocks in front of conv layers). (c) An example of a learned latent space feature from the first layer of the encoder trained with and without HFS. The most similar feature maps of the models in the first encoder level are shown. (d) An example of a learned latent space feature from the last layer of the decoder trained with and without HFS. The most similar feature maps of the two models at the last decoder level are shown. (e-f) Examples of temperature prediction with NO and HFS-enhanced NO at two different time-steps. A region with high-frequency features (top right corner) is zoomed in for better visualization." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.532, + 0.295, + 0.549 + ], + "angle": 0, + "content": "2.3. Diffusion Model" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.553, + 0.883, + 0.725 + ], + "angle": 0, + "content": "As discussed earlier, the NO and the HFS-enhanced NO learn the solution by minimizing some variant of the Euclidean distance, such as MSE, RMSE, relative \\( L^2 \\) or relative \\( L^1 \\) norms of the errors, between the true and predicted states. Unfortunately, such a loss function effectively prioritizes the error at those wavenumbers that bear higher energy. The systems considered in this study exhibit a decaying energy spectrum, implying that the lower wavenumbers carrying higher energy will be over-represented, while the higher wavenumbers that bear lower energy will be ignored due to its minimal influence on the Euclidean distance-based loss function. The recent efforts aimed at improving the spectral bias of NO using GenAI algorithms, discussed in Section 1, motivated us to explore this route. Specifically, we investigate if diffusion models [59] can help further refine the predictions estimated by NO and HFS-enhanced NO." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.741, + 0.882, + 0.828 + ], + "angle": 0, + "content": "Diffusion models (DM) are generative frameworks capable of producing samples that align with the true underlying function distribution, \\(\\mathcal{T}\\), given a limited set of observations from \\(\\mathcal{T}\\). These models achieve sample generation by progressively refining a simple prior distribution, such as a standard normal distribution \\((\\Gamma_0 = \\mathcal{N}(0,I))\\), into the desired complex distribution \\((\\Gamma_N \\approx \\mathcal{T})\\) over \\(N\\) iterative steps." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.844, + 0.882, + 0.912 + ], + "angle": 0, + "content": "The diffusion process begins with an initial sample \\(\\mathbf{X_0}\\) drawn from \\(\\Gamma_0\\) and subsequently predicts \\(\\mathbf{X}_1\\). Since \\(\\Gamma_0 = \\mathcal{N}(0,I)\\), obtaining \\(\\mathbf{X}_0\\) is straightforward. The model then iteratively refines the sample, estimating \\(\\mathbf{X}_{i + 1}\\) from \\(\\mathbf{X}_i\\) over \\(N\\) steps. However, a key challenge arises on how to train the diffusion model to transition from \\(\\Gamma_0 = \\mathcal{N}(0,I)\\) to \\(\\Gamma_N\\approx \\mathcal{T}\\) when intermedi-" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.917, + 0.506, + 0.93 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.086, + 0.883, + 0.275 + ], + "angle": 0, + "content": "ate distributions \\(\\Gamma_{i}\\) for \\(i = \\{1,2,3,\\ldots ,N - 1\\}\\) are not explicitly available. This challenge is addressed using denoising score matching combined with Langevin dynamics [60]. The objective of a score-based diffusion model is to estimate the score function, which is defined as \\(s_{\\theta_D}(\\mathbf{X}) = \\nabla_X\\log p(\\mathbf{X})\\) , where \\(\\theta_{D}\\) represents the parameters of the diffusion model and \\(p\\) is the probability density of \\(\\mathbf{X}\\) , where \\(\\mathbf{X}\\) corresponds to continuous realizations of \\(\\mathbf{X}_i\\sim \\Gamma_i\\) . Since the exact data distribution is unknown and may reside on a lower-dimensional manifold, the score function can become ill-defined in regions lacking data. To mitigate this issue, Gaussian noise is added to perturb the data, ensuring a well-defined score function across the entire space by smoothing the distribution. The score function provides a directional gradient toward regions of higher probability. However, a direct mechanism to sample from the learned distribution is still absent." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.293, + 0.885, + 0.381 + ], + "angle": 0, + "content": "This limitation is overcome using Langevin dynamics, as proposed in [61]. Langevin dynamics ensures that the generated samples converge to the true underlying distribution by balancing deterministic motion, driven by the gradient of the log probability, with stochastic exploration introduced by noise. In our approach, we condition the score function on the output of the pre-trained NO, \\(\\mathcal{F}_{\\theta}\\), leading to the modified score function:" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.396, + 0.88, + 0.414 + ], + "angle": 0, + "content": "\\[\ns _ {\\theta_ {D}} (\\mathbf {X}, \\sigma , \\mathcal {F} _ {\\theta}) = \\nabla_ {X} \\log p (\\mathbf {X} | \\mathcal {F} _ {\\theta}), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.421, + 0.883, + 0.492 + ], + "angle": 0, + "content": "where \\(\\sigma\\) represents the noise level. This conditioned score function guides the DM to sample from the posterior distribution of \\(\\mathbf{X}\\) given \\(\\mathcal{F}_{\\theta}\\), ensuring that the generated samples are consistent with both the structures imposed by \\(\\mathcal{F}_{\\theta}\\) and the true data distribution. The update rule for Langevin dynamics is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.501, + 0.881, + 0.531 + ], + "angle": 0, + "content": "\\[\n\\mathbf {X} _ {j + 1} = \\mathbf {X} _ {j} + \\frac {\\varepsilon}{2} s _ {\\theta_ {D}} (\\mathbf {X} _ {j}, \\sigma_ {j}, \\mathcal {F} _ {\\theta}) + \\sqrt {\\varepsilon} z _ {j}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.535, + 0.883, + 0.586 + ], + "angle": 0, + "content": "where \\(\\varepsilon\\) is the step size, \\(z_{j}\\) is the noise component, and \\(\\sigma_{j}\\) denotes the noise scale at iteration \\(j\\) during the sampling process. The iterative denoising of the noised states by a diffusion model conditioned on the outputs of a pre-trained HFS-enhanced NO is illustrated in Fig 2." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.587, + 0.882, + 0.724 + ], + "angle": 0, + "content": "During training, the diffusion model learns to denoise the state of the system perturbed by a noise with zero mean and \\(\\sigma\\) standard deviation, where \\(\\ln \\sigma \\sim \\mathcal{N}(-1.2, 1.2^2)\\). When \\(\\sigma\\) is small, the score function \\(s_{\\theta_D}\\) increasingly focuses on reconstructing high-frequency details and vice versa. In this manner, the diffusion model learns to perturb and reconstruct the signal at multiple scales, unlike the NO whose scale is fixed throughout its training, and thereby learns the structure of the underlying system across all the scales. Our implementation of the DM conditioned on the NO and HFS-enhanced NO is based on [47] that adopts the training, network architecture, pre-conditioning, and sampling routine proposed in \"EDM\" [62]." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.917, + 0.507, + 0.93 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.086, + 0.87, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.629, + 0.883, + 0.677 + ], + "angle": 0, + "content": "Figure 2: Mitigating Spectral Bias with Diffusion Model. The states estimated by the NO exhibit oversmoothing. They serve as the prior that conditions the DM, which in turn reconstructs the missing frequencies iteratively through conditional sampling. The results are based on a NO with 2 million parameters." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.699, + 0.212, + 0.715 + ], + "angle": 0, + "content": "3. Results" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.727, + 0.551, + 0.743 + ], + "angle": 0, + "content": "3.1. HFS-enhanced NO for two-phase flow problems" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.747, + 0.883, + 0.918 + ], + "angle": 0, + "content": "We first conduct several experiments with different variants of ResUNet to demonstrate the advantage of HFS in spectral bias mitigation for two-phase flow operator learning problem. Given the higher complexity of subcooled boiling data compared to saturated boiling data, we will focus on the subcooled boiling experiments. Examples showing the saturated boiling predictions are shown in Appendix D. Given the flexibility of our NO structure, we investigated different variants of ResUNet by varying the NO size by changing the number of parameters in the range of \\(\\sim 2\\) to \\(\\sim 16\\) million parameters. The number of parameters was changed by simply changing the number of latent feature maps at each level of the ResUNet structure. The number of downsamplings/upsamplings was kept at five steps for all the models to achieve spatially consistent resolutions at each level across all the NOs. The subcooled pool boiling dataset" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.087, + 0.883, + 0.278 + ], + "angle": 0, + "content": "consists of 10 different simulation trajectories, two of which were used for testing. Each simulation trajectory consists of 201 time-steps. However, similar to [48], the first 30 unsteady time-steps were not included in the training and testing of the models. Fig. 3 demonstrates the variation of RMSE, BRMSE, bubble RMSE, and \\(\\mathrm{Max}_{\\mathrm{mean}}\\) metrics with NO size for results obtained from NO and HFS-enhanced NO. As expected, both NO and HFS-enhanced NO exhibit error-decreasing trends with the number of parameters. However, the HFS-enhanced NO always yields lower errors compared to NO in all metrics and irrespective of the NO size. The effect of HFS is more pronounced in the bubble RMSE due to larger high-frequency content at the bubble interface and within the bubbles. For example, HFS yields \\(8\\%\\) improvement in RMSE for the 16 million NO. This improvement is \\(16\\%\\) for the bubble RMSE metric. On average, HFS decreases the RMSE and bubble RMSE by \\(12.4\\%\\) and \\(18.2\\%\\), respectively." + }, + { + "type": "image", + "bbox": [ + 0.121, + 0.291, + 0.502, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.291, + 0.876, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.483, + 0.501, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.224, + 0.673, + 0.47, + 0.686 + ], + "angle": 0, + "content": "Neural Operator Size (Millions)" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.483, + 0.876, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.599, + 0.673, + 0.844, + 0.686 + ], + "angle": 0, + "content": "Neural Operator Size (Millions)" + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.699, + 0.883, + 0.759 + ], + "angle": 0, + "content": "Figure 3: Temperature prediction errors of NO and HFS-enhanced NO varying with NO size. (a) Root mean square error (RMSE), (b) Boundary RMSE (BRMSE), (c) Bubble RMSE, (d) Mean maximum error. All the errors are calculated over the 5 time-step temperature predictions. The legends in (a) are applicable to (b - d) as well. All the results are based on test dataset in subcooled pool boiling." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.778, + 0.482, + 0.796 + ], + "angle": 0, + "content": "3.2. Spectral analysis of HFS-enhanced NO" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.798, + 0.883, + 0.918 + ], + "angle": 0, + "content": "HFS reduces the over-smoothing effect, hence, the intricate features of vortices induced by condensation trails in subcooled boiling are better resolved. Moreover, HFS results in better alignment of the energy spectra to the ground truth signal, especially at high wave numbers attributed to the high frequency features. Fig. 4 depicts the enhancements obtained by adding HFS modules to NO. The prediction results of HFS-enhanced NO are improved compared to NO for all time-steps. However, the enhancement is more pronounced at later time-steps, where the NO predictions are significantly over-smoothed." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.128, + 0.09, + 0.874, + 0.541 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.553, + 0.883, + 0.626 + ], + "angle": 0, + "content": "Figure 4: Subcooled pool boiling transient temperature prediction. (a) Ground truth (GT) temperatures for 5 consecutive time steps (from left to right) \\((\\Delta t = 8\\) ms). (b) NO prediction results. (c) HFS-enhanced NO prediction results. (d) The corresponding energy spectra \\((p(k))\\) for each time step. For better visualization, the subplots in (d) show the energy spectra only for the high wavenumbers. The legends in first plot are applicable to other plots as well. All the results are based on a \\(\\sim 3.5\\) M parameter NO." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.644, + 0.885, + 0.886 + ], + "angle": 0, + "content": "The average energy for the high-frequency component of the latent features (e.g., excluding the first \\(12.5\\%\\) frequencies at the full resolution) is generally higher for HFS-enhanced NO. This behavior is specifically seen in all the encoder layers and the last three layers of the decoder for a five-layer decoder and five-layer encoder (e.g., five downsampling and five upsampling steps). The first two layers after the bottleneck are at very low spatial resolutions and may not represent any useful spectral information. However, more high-frequency component is generated in the later stages of the decoder that are closer to the output. The NO decoder mean feature maps at each layer show low-contrast regions at both left and right side of the maps, starting from layer two to the end. However, these regions are diminished when HFS is used, showing that a more diverse set of features is generated in the decoder (see Appendix E). However, the same behavior does not necessarily exist for the encoder mean latent features, suggesting that the mean feature map may not be a good representative of the high-frequency component. Instead, analysis of individual feature maps appears to be a more appropriate approach in this case." + }, + { + "type": "footer", + "bbox": [ + 0.145, + 0.901, + 0.88, + 0.93 + ], + "angle": 0, + "content": "Individual latent space features exhibit improved preservation and propagation of high-12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.118, + 0.086, + 0.881, + 0.325 + ], + "angle": 0, + "content": "frequency components when HFS is integrated in the NO structure. Fig. 5 depicts examples of latent features from the first layer of the encoder and the last layer of decoder. These layers are specifically chosen due to their proximity to the input and output layers, making the visualizations more understandable. When comparing similar latent feature maps, HFS reduces the excessive smoothing and increases the high-frequency component within the features in the latent space. The energy spectra plots in Fig. 5 demonstrate similar trends for both NO and HFS-enhanced NO with the later having larger spectral energy at the mid and high wave numbers (e.g. \\( k > 20 \\)). For a more robust spectral analysis of latent features, we compared the individual latent features in the NO and HFS-enhanced NO with both \\(\\sim 3.5\\) and \\(\\sim 16\\) million parameter models. The HFS-enhanced NO decreases the over-smoothing in latent features when compared with a similar feature map from NO. The normalized energy spectra of these latent features exhibit larger high-frequency component with HFS-enhanced NO. This is evident in Fig. 5(b, d, f, and h), where the HFS-enhanced NO curves surpass the NO curves after a certain wave number." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.344, + 0.881, + 0.566 + ], + "angle": 0, + "content": "Comparison of the ratio of high-frequency component energy when calculated separately for each latent feature and then averaged over all the features at each layer in the encoder also shows consistently higher values when HFS is used. The same trend is also observed in the last three layers of the decoder. These results are shown in Fig. 5i and 5j. We observed similar trends for other samples where the ratio of high-frequency component energy to total energy in the latent space is higher when HFS is integrated with the NO. However, this advantage may not be noticeable using the mean latent feature visualization at each layer. Note that for the analysis presented in Fig. 5i and 5j, we progressively increased the threshold (from \\(12.5\\%\\) to \\(50\\%\\)) for separating the low and high-frequency bands as the spatial dimension in the latent space decreases. This result is based on a random sample from the test dataset. Similar results were obtained with other samples. It should be noted that a one-to-one comparison of similar feature maps may provide a more reliable assessment, as not all feature maps carry equally significant information and some might be irrelevant for our analysis." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.584, + 0.881, + 0.67 + ], + "angle": 0, + "content": "In general, the HFS-enhanced NO contains more high-frequency component in the latent space, which can help with the propagation of high-frequency information to the output, helping with the better capture of high-frequency features. The enhancement in high-frequency component is achieved without any degradation in the low-frequency components. Therefore, both field errors such as RMSE, and the spectral errors are improved (see Appendix C)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.917, + 0.51, + 0.929 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.092, + 0.147, + 0.105 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.148, + 0.092, + 0.276, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.2, + 0.092, + 0.219, + 0.102 + ], + "angle": 0, + "content": "NO" + }, + { + "type": "image", + "bbox": [ + 0.148, + 0.193, + 0.275, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.335, + 0.092, + 0.385, + 0.101 + ], + "angle": 0, + "content": "NO + HFS" + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.101, + 0.428, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.456, + 0.095, + 0.473, + 0.105 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.454, + 0.101, + 0.599, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.292, + 0.145, + 0.302 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.292, + 0.276, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.297, + 0.198, + 0.427, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.456, + 0.193, + 0.473, + 0.204 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.198, + 0.599, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.624, + 0.096, + 0.639, + 0.107 + ], + "angle": 0, + "content": "(i)" + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.097, + 0.876, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.388, + 0.145, + 0.398 + ], + "angle": 0, + "content": "(g)" + }, + { + "type": "image", + "bbox": [ + 0.148, + 0.388, + 0.276, + 0.486 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.299, + 0.297, + 0.428, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.395, + 0.427, + 0.486 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.456, + 0.292, + 0.472, + 0.303 + ], + "angle": 0, + "content": "(f)" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.292, + 0.599, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.456, + 0.388, + 0.472, + 0.398 + ], + "angle": 0, + "content": "(h)" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.388, + 0.599, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.285, + 0.639, + 0.296 + ], + "angle": 0, + "content": "(i)" + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.286, + 0.876, + 0.483 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.499, + 0.882, + 0.628 + ], + "angle": 0, + "content": "Figure 5: Latent space features in HFS-enhanced NO. (a, b) Example of latent space feature in the first layer of encoder and the corresponding normalized energy spectra \\((p(k))\\) in the \\(\\sim 3.5\\) million parameter models. (c, d) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra for the model with \\(\\sim 3.5\\) million parameters. (e) Example of latent feature in the first layer of encoder and the corresponding normalized energy spectra in the \\(\\sim 16\\) million parameter models. (g) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra in the \\(\\sim 16\\) million parameter models. (i-j) Average ratio of high-frequency energy to total energy at each layer in encoder (i) and decoder (j). Note that the low-frequency cutoff is set to the first \\(12.5\\%\\), \\(18.75\\%\\), \\(25\\%\\), \\(37.5\\%\\), and \\(50\\%\\) of the wavenumbers, from highest to lowest spatial resolutions (384 to 24 pixels), respectively" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.65, + 0.882, + 0.837 + ], + "angle": 0, + "content": "Given the advantage of HFS in the mitigation of spectral bias towards low-frequency components, it is natural to calculate the prediction errors at different wavenumbers. Following the terminology proposed in [48], we divided the frequencies to three components including only low-frequency component (low \\( F \\)), mid-frequency component (mid \\( F \\)), and high-frequency component (high \\( F \\)). For all the NOs with varying number of parameters, the errors in the mid \\( F \\) and high \\( F \\) components are always lower for HFS-enhanced NO. The RMSE for the low \\( F \\) component is lower for HFS-enhanced NO with one exception in the NO with \\( \\sim 3.5 \\) million parameters. We attribute this to the larger enhancement observed in mid \\( F \\) and high \\( F \\) of the 3.5 million parameter HFS-enhanced NO, causing the operator showing larger error in the low \\( F \\) as it fails to reduce the errors in all three components simultaneously. Visualization of each frequency component and the average spectral errors in each component are shown in Fig. 6." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.917, + 0.511, + 0.929 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.132, + 0.094, + 0.882, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.535, + 0.883, + 0.607 + ], + "angle": 0, + "content": "Figure 6: Impact of HFS on spectral errors at different frequency bands (a-b) Examples showing the input, low, mid, and high-frequency contents of the input. (c-e) Spectral error \\((F\\). Error) of low, mid, and high-frequency bands over the test dataset. For these results, the low-frequency cutoff is set to the first \\(2\\%\\) of the frequencies. The mid frequency band includes the first \\(6.2\\%\\) of the frequencies excluding the first \\(2\\%\\). The high-frequency band includes the last \\(93.8\\%\\) of the frequencies." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.625, + 0.35, + 0.642 + ], + "angle": 0, + "content": "3.3. HFS parameter history" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.643, + 0.883, + 0.918 + ], + "angle": 0, + "content": "The DC and HFC of the signals are scaled using two learnable parameters, \\(\\lambda_{DC} \\in \\mathbb{R}^{1 \\times 1 \\times C}\\) and \\(\\lambda_{HFC} \\in \\mathbb{R}^{1 \\times 1 \\times C}\\). These parameters remain consistent across all patches in each latent space feature map, and also across all batches of the dataset. Therefore, the parameters are optimized based on all the samples in the training dataset. However, they are allowed to vary freely across the feature channels at each layer. This design enables the model to adaptively scale each channel based on its content. For instance, a feature channel with a larger high-frequency component can be scaled differently than a smoother feature channel. This flexibility enhances the effectiveness of HFS while minimizing the computational costs and reducing the optimization burden by maintaining fixed parameters across patches and samples. To better understand the learning process of \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\), the histories of these parameters during the training phase in each of the encoder and decoder layers are shown in Fig. 7. The results in Fig. 7 show the mean \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) across all latent features at each layer. The mean \\(\\lambda_{HFC}\\) is always larger than the mean \\(\\lambda_{DC}\\), demonstrating that the model is learning to scale HFC with larger weights, enhancing the representation of the HFC. Also, the optimized mean \\(\\lambda_{HFC}\\) is higher in the deeper layers of the encoder. However, no such behavior is observed in the decoder. Another interesting observation is that the abrupt change in the slope of the \\(\\lambda_{DC}\\) history curves" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.086, + 0.883, + 0.157 + ], + "angle": 0, + "content": "(\\(\\sim\\) iteration \\(160 \\times 10^{3}\\)) aligns well with the iteration when overfitting starts. After this iteration, the error over training dataset keeps decreasing but the error over validation dataset increases, leading to larger generalization gap. The dashed lines in Fig. 7 specify the iteration at which the validation dataset error is minimum." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.172, + 0.885, + 0.278 + ], + "angle": 0, + "content": "It should be noted that \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) are both free of any constraints and are automatically learned during the model optimization. However, comparing the final values of these parameters align well with the heuristic viewpoint proposed in our work. The larger values of \\(\\lambda_{HFC}\\) imply that the HFC of the signals are better preserved and propagated through layers with HFS. This could explain why the HFS-enhanced NO results resolve high-frequency features better, and why the spectral bias of the NO is mitigated." + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.295, + 0.5, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.295, + 0.876, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.493, + 0.499, + 0.698 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.493, + 0.878, + 0.697 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.709, + 0.883, + 0.81 + ], + "angle": 0, + "content": "Figure 7: \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) histories during training phase of the HFS-enhanced NO. (a, b) \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) training history in all 5 layers of encoder. Note that layer 1 and layer 5 are defined as layers at highest and lowest spatial resolution, respectively, in the encoder. (c, d) \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) training history in all 5 layers of decoder. Note that layer 1 and layer 5 are defined as layers at lowest and highest spatial resolution, respectively, in the decoder, which is the opposite terminology used in encoder. The dashed lines specify the iteration from which overfitting on the training dataset starts. The results are based on the training of a model with \\(\\sim 1.7\\) million parameters and \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) were initialized at 0.85 and 1.15, respectively." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.83, + 0.303, + 0.848 + ], + "angle": 0, + "content": "3.4. Kolmogorov flow" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.85, + 0.883, + 0.918 + ], + "angle": 0, + "content": "To evaluate the effectiveness of HFS in mitigating spectral bias in a more chaotic system, we applied it on the prediction of a standard benchmark, namely the 2D Kolmogorov flow problem. This problem is governed by the unsteady and incompressible Navier-Stokes equations for a viscous fluid subject to a forcing term. The vorticity form of the problem is defined in" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.918, + 0.511, + 0.93 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.118, + 0.086, + 0.885, + 0.414 + ], + "angle": 0, + "content": "Appendix H. We generated the dataset [63] using a publicly available pseudo-spectral solver [6]. The dataset consisted of 1000 samples with \\(80\\%\\), \\(10\\%\\), and \\(10\\%\\) of them being used for training, validation, and testing respectively. We trained the NO with and without HFS to learn the mapping \\(\\omega(x,y,t)\\big|_{t\\in[0,10]} \\to \\omega(x,y,t)\\big|_{t\\in[10,t_{final}]}\\), where \\(\\omega\\) is the vorticity. Here, we used \\(t_{final} = 12.5\\) s, and a NO with \\(\\sim 1.7\\) million parameters as the benchmark. We optimized the hyperparameters based on the NO performance without HFS and then used the same hyperparameters for training the NO with HFS. This ensured that any improvement achieved with HFS was solely attributed to its effect and not simply due to differences in optimization strategies or hyperparameters. Although not specifically designed for turbulent problems, the HFS-enhanced NO demonstrated improvements over the NO for the 2D Kolmogorov problem, reducing the relative error from \\(5.3\\%\\) to \\(4.7\\%\\). Comparison of the energy spectra of the HFS-enhanced NO predictions also demonstrated better alignment with the ground truth solutions at high wavenumbers. The prediction results for snapshots chosen through random sampling from the test dataset are shown in Fig. 8. High-frequency features are more accurately captured and the energy spectra alignment at high wavenumbers is enhanced with the HFS-enhanced NO. We should acknowledge that HFS was effective for this problem only when the NO already provided reasonably accurate predictions. If the NO produced extremely over-smoothed predictions, integrating HFS offered little to no improvement. More detailed results showing the temporal predictions are shown in Appendix H." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.43, + 0.885, + 0.584 + ], + "angle": 0, + "content": "The improvements in predicting Komogorov flow are less pronounced compared to the two-phase flow problem. This is due to the different underlying structures of the solution maps. The HFS approach operates by decomposing the feature maps into low-frequency and high-frequency components through observing the patches as different signals. This approach is most effective for the data with localized features, making the DC and HFC of the signals significantly different. For example, this is true for the subcooled pool boiling dataset with localized features at the bubble interface and condensation trails. For the data with similar features across all regions, the distinction between DC and HFC diminishes, thus reducing the impact of HFS." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.917, + 0.51, + 0.929 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.13, + 0.089, + 0.878, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.626, + 0.883, + 0.673 + ], + "angle": 0, + "content": "Figure 8: HFS-enhanced Kolmogorov flow predictions. (a-c) denote different samples chosen randomly from the test dataset. Each example shows the ground truth (GT), NO and HFS-enhanced NO predictions along with the energy spectra \\((p(k))\\) for each prediction." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.691, + 0.357, + 0.708 + ], + "angle": 0, + "content": "3.5. Diffusion Model Results" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.712, + 0.883, + 0.884 + ], + "angle": 0, + "content": "We investigated further mitigation of spectral bias using the score-based diffusion model (DM) with HFS-enhanced NO predictions as the prior. Specifically, we first conducted a systematic study to investigate the effect of NO prediction accuracy, obtained by varying the number of parameters in the NO, on the diffusion model performance. Second, we demonstrated that using HFS-enhanced NO can further help the diffusion model to match the correct energy spectra of the solutions without degrading the mean prediction errors. Since the NO predictions are used as priors to diffusion model, the accuracy of diffusion model predictions is strongly influenced by the reliability of these priors. For example, if the prior information is significantly erroneous or over-smoothed, then the diffusion model struggles to accurately recover the missing frequencies without compromising the accuracy of the mean predictions." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.9, + 0.882, + 0.917 + ], + "angle": 0, + "content": "Fig. 9 shows the subcooled pool boiling prediction results of DM conditioned on NO and" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.118, + 0.086, + 0.885, + 0.396 + ], + "angle": 0, + "content": "HFS-enhanced NO predictions. Other prediction examples with DM integrated with NO and HFS-enhanced NO are visualized in Appendix F. When the NO predictions have significant errors, the DM can barely mitigate those errors. However, when HFS is integrated with the NO, the significant errors at large structures are reduced, and high-frequency components of the solutions are captured more accurately compared to \\(\\mathrm{NO} + \\mathrm{DM}\\) predictions. In addition, when the DM is integrated with the HFS-enhanced NO predictions, the DM is able to more accurately reconstruct intricate features that are already enhanced through more accurate predictions provided by HFS-enhanced NO. Therefore, less over-smoothing is observed in the \\(\\mathrm{NO} + \\mathrm{HFS} + \\mathrm{DM}\\) predictions and spectral bias is further reduced. It can be seen that both HFS and DM are helping with the capture of high-frequency features. DM cannot fix significant errors caused by NO predictions at large scale features (e.g., bubble interfaces). However, HFS reduces the errors around large scale features in addition to enhancing the smaller scale features. When DM is integrated with HFS-enhanced NO, it further enhances the small scale features. The quantitative metrics are shown in Fig. 10. It should be noted that the models are trained with a different set of hyperparameters for the results shown in Fig. 10 compared to the previous results (Fig. 3). However, HFS enhanced the prediction results of NO, irrespective of hyperparameters (either optimal or non-optimal hyperparameters), as long as the same hyperparameters are used for training both NO and HFS-enhanced NO." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.09, + 0.149, + 0.103 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.09, + 0.278, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.09, + 0.419, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.428, + 0.09, + 0.558, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.09, + 0.7, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.09, + 0.865, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.19, + 0.147, + 0.202 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.196, + 0.278, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.195, + 0.418, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.427, + 0.195, + 0.558, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.195, + 0.699, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.196, + 0.84, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.128, + 0.337, + 0.147, + 0.349 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.341, + 0.278, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.29, + 0.342, + 0.417, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.43, + 0.342, + 0.558, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.569, + 0.342, + 0.697, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.71, + 0.341, + 0.864, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.129, + 0.427, + 0.147, + 0.439 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.436, + 0.278, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.436, + 0.418, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.436, + 0.558, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.568, + 0.436, + 0.697, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.436, + 0.838, + 0.541 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.555, + 0.882, + 0.613 + ], + "angle": 0, + "content": "Figure 9: Visualization of the prediction by DM integrated with NO and HFS-enhanced NO. (a) Example showing ground truth (GT) solution and predictions by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Zoomed-in visualization of (a) focusing on the high-frequency contents. (c) Predictions of another randomly selected sample. (d) Zoomed-in visualization of (c) focusing on high-frequency contents." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.632, + 0.774, + 0.649 + ], + "angle": 0, + "content": "The results presented in Fig. 9 and Fig. 10 illustrate the following key points:" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.657, + 0.882, + 0.725 + ], + "angle": 0, + "content": "- HFS reduces the prediction errors in both physical and spectral domains, irrespective of NO size. On average, the relative errors (e.g., RMSE) and energy spectrum errors \\((\\mathcal{E}_F)\\) (see Appendix B) are reduced by \\(23.5\\%\\) and \\(15.2\\%\\), respectively, with HFS-enhanced NOs." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.736, + 0.881, + 0.822 + ], + "angle": 0, + "content": "- Generally, DM does not change the prediction field errors (Fig. 10a). However, DM reduces the energy spectrum error, showing better energy spectra alignment with the correct solutions. On average, \\(\\mathrm{NO} + \\mathrm{DM}\\) has \\(27.8\\%\\) lower relative \\(\\varepsilon_{F}\\) compared to NO. The only exception is the NO with 16 millions parameters. On average, \\(\\mathrm{NO} + \\mathrm{HFS} + \\mathrm{DM}\\) has \\(23.2\\%\\) lower relative \\(\\varepsilon_{F}\\) compared to \\(\\mathrm{NO} + \\mathrm{HFS}\\) (Fig. 10b)." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.833, + 0.882, + 0.917 + ], + "angle": 0, + "content": "- HFS reduces the energy spectrum errors at all different frequency bands \\((\\mathcal{E}_{F_{\\mathrm{low}}}, \\mathcal{E}_{F_{\\mathrm{mid}}},\\) and \\(\\mathcal{E}_{F_{\\mathrm{high}}})\\), containing only the low, mid, and high-frequency components of the solutions, respectively. We refer to Fig. 6 for visualization of solutions at these frequency bands. However, DM does not enhance the results at \\(\\mathcal{E}_{F_{\\mathrm{low}}}\\) and \\(\\mathcal{E}_{F_{\\mathrm{mid}}}\\) when integrated with HFS-enhanced NO. Indeed, the results at these two frequency bands are sometimes the best" + }, + { + "type": "list", + "bbox": [ + 0.145, + 0.657, + 0.882, + 0.917 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.918, + 0.511, + 0.93 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.164, + 0.086, + 0.883, + 0.139 + ], + "angle": 0, + "content": "for HFS-enhanced NO without DM, depending on the NO size. However, the advantage of DM is taken into action at \\(\\mathcal{E}_{F_{\\mathrm{high}}}\\) (Fig. 10e) with improved results compared to NO and HFS-enhanced NO. This explains the role of DM in further mitigation of spectral bias." + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.171, + 0.48, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.489, + 0.171, + 0.822, + 0.391 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.397, + 0.368, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.397, + 0.611, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.612, + 0.398, + 0.864, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.572, + 0.883, + 0.645 + ], + "angle": 0, + "content": "Figure 10: Diffusion model prediction results. (a) Relative errors (Rel. Error) of prediction by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Relative energy spectrum errors (Rel. \\(\\mathcal{E}_F\\)). (c) Relative energy spectrum errors in the low frequency band (Rel. \\(\\mathcal{E}_{F_{\\mathrm{low}}}\\)). (d) Relative energy spectrum errors in the mid frequency band (Rel. \\(\\mathcal{E}_{F_{\\mathrm{mid}}}\\)). (e) Relative energy spectrum errors in the high-frequency band (Rel. \\(\\mathcal{E}_{F_{\\mathrm{high}}}\\)). Low, mid, and high-frequency thresholds are set to the first \\(2\\%\\), the first \\(6.2\\%\\) excluding the first \\(2\\%\\), and the last \\(93.8\\%\\) of the wavenumbers." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.67, + 0.238, + 0.685 + ], + "angle": 0, + "content": "4. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.698, + 0.883, + 0.887 + ], + "angle": 0, + "content": "HFS works by preserving more high-frequency components in the latent space after each convolutional layer in the NO. The flexibility of learning to scale the DC and HFC of the signals allows the model to enhance the predictions in mid and high-frequency contents without any degradation in the low frequency content of the solutions. As a result, both field metrics suh as RMSE and bubble RMSE, and the spectral errors are reduced in two-phase flow predictions. The enhancements observed in HFS-enhanced NO prediction results are more pronounced in areas with larger high-frequency features such as within the bubbles and at condensation trails seen in subcooled boiling solutions. This emphasizes the role of HFS in spectral bias mitigation, which helps with better capture of intricate features and sharp gradients. Similarly, both the relative errors and spectral errors are reduced, and high-frequency features are enhanced in the Kolmogorov flow predictions." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.917, + 0.509, + 0.93 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.087, + 0.883, + 0.293 + ], + "angle": 0, + "content": "The scaling parameters \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) in the HFS method are optimized during the network training. Notably, the optimized values for \\(\\lambda_{HFC}\\) are consistently larger than \\(\\lambda_{DC}\\), indicating that the model is trying to pay more attention to the HFC in the latent space. This biased attention helps mitigating the persistent challenge of spectral bias in the NO. To reduce the optimization burden, the scaling parameters were consistent across all the patches but were allowed to vary across different feature maps. This flexibility enables the model to automatically adjust the scaling of the HFC of the feature map depending on its content and significance. The learned \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) for each of the latent feature maps in the HFS-enhanced NO with \\(\\sim 1.7\\) million parameters are shown in Appendix G. In our work, all the scaling parameters were initialized at one and they were optimized using gradient descent with the same learning rate used for training the NO (\\(\\sim 8\\times 10^{-4}\\)). It would be interesting to explore faster convergence by using different initializations and optimization frameworks for the scaling parameters in future work." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.31, + 0.885, + 0.584 + ], + "angle": 0, + "content": "Another method for spectral bias mitigation is through diffusion models conditioned on NO predictions as prior information. However, using diffusion models has two drawbacks. First, the diffusion model predictions are strongly dependent on the prior information. Therefore, it can only reduce over-smoothing from reasonably accurate NO predictions. If the NO predictions are not sufficiently accurate, then the diffusion model cannot perform well. Second, training diffusion models requires extensive computational cost as each training iteration involves \\( n(= 32) \\) auto-regressive denoising steps to estimate the state of the solution at each time-step. In our experiments, the diffusion model training cost is approximately 2 to 4 times higher than the NO training itself. On the other hand, the HFS method requires only a small additional computational cost and negligible additional memory for training along the NO. The number of parameters added by HFS modules varies depending on the underlying NO size. However, it is generally less than \\( 0.1\\% \\) of the number of parameters in the NO. In our experimentation, the HFS module parameters vary between \\( 0.018\\% \\) to \\( 0.045\\% \\) of the number of parameters in NO, depending on the underlying NO size. Based on our experiments, the computational time for each training iteration is within \\( 10\\% \\) to \\( 30\\% \\) higher, depending on the NO size and the computational resource." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.601, + 0.883, + 0.807 + ], + "angle": 0, + "content": "In addition to the enhancements observed in field metrics such as RMSE and bubble RMSE, our investigation revealed that HFS also helps with reducing the spectral errors. We demonstrated that matching the correct energy spectra at mid and high wavenumbers is directly correlated with capturing the complex features in the solutions. We would like to emphasize the importance of considering both field errors and correct energy spectra alignment in scientific machine learning problems. The field analysis demonstrates the average performance of the predictions. However, the energy spectra analysis reveals useful information about the prediction accuracies at different frequencies and thereby explaining the possible spectral bias and loss of useful information near interfaces, vortices, and sharp gradient areas in two-phase flow and turbulence problems. It should be noted that the predictions with enhanced energy spectra alignment is beneficial when accompanied by improved mean field predictions (e.g., RMSE) and HFS-enhanced NO results satisfy this requirement." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.825, + 0.882, + 0.911 + ], + "angle": 0, + "content": "When aiming to scale the different frequency bands of the signals, a logical alternative would be to perform the scaling directly in the frequency domain rather than the physical domain. As a comparison, we implemented and compared scaling in the frequency domain with our proposed method (HFS). In this regard, let \\(\\mathbf{X}^{(l)}\\in \\mathbb{R}^{H\\times W\\times C}\\) be the output of \\(l\\)-th convolutional layer, then the feature maps can be transferred to frequency domain using a 2D Fourier" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.116, + 0.087, + 0.241, + 0.103 + ], + "angle": 0, + "content": "transform \\((\\mathcal{F})\\)" + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.118, + 0.881, + 0.141 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {X}} ^ {(l)} (:,:, c) = \\mathcal {F} \\left(\\mathbf {X} ^ {(l)} (:,: c)\\right), \\quad c = 1, 2, \\dots , C, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.148, + 0.881, + 0.218 + ], + "angle": 0, + "content": "where \\(\\hat{\\mathbf{X}}^{(l)}\\in \\mathbb{C}^{H\\times W\\times C}\\) includes the Fourier-transformed feature maps. The low frequency and high-frequency component of features maps can be generated by truncating \\(\\hat{\\mathbf{X}}^{(l)}\\) at a frequency threshold of \\(\\tau\\). We name these components as \\(\\hat{\\mathbf{X}}_{\\mathrm{low}}^{(l)}\\) and \\(\\hat{\\mathbf{X}}_{\\mathrm{high}}^{(l)}\\). Each Fourier-transformed feature will be scaled separately:" + }, + { + "type": "equation", + "bbox": [ + 0.352, + 0.229, + 0.881, + 0.253 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {X}} _ {\\text {s c a l e d}} ^ {(l)} = \\lambda_ {\\text {l o w}} \\odot \\hat {\\mathbf {X}} _ {\\text {l o w}} ^ {(l)} + \\lambda_ {\\text {h i g h}} \\odot \\hat {\\mathbf {X}} _ {\\text {h i g h}} ^ {(l)}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.265, + 0.881, + 0.316 + ], + "angle": 0, + "content": "where \\(\\lambda_{\\mathrm{low}} \\in \\mathbb{R}^{1 \\times 1 \\times C}\\) and \\(\\lambda_{\\mathrm{high}} \\in \\mathbb{R}^{1 \\times 1 \\times C}\\) are learnable parameters that are optimized simultaneously with the network training. Finally, the scaled feature map is reconstructed using the inverse Fourier transform:" + }, + { + "type": "equation", + "bbox": [ + 0.281, + 0.331, + 0.881, + 0.354 + ], + "angle": 0, + "content": "\\[\n\\mathbf {X} _ {\\text {s c a l e d}} ^ {(l)} (:, :, c) = \\mathcal {F} ^ {- 1} \\left(\\hat {\\mathbf {X}} _ {\\text {s c a l e d}} ^ {(l)} (:, :, c)\\right), \\quad c = 1, 2, \\dots , C. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.359, + 0.881, + 0.53 + ], + "angle": 0, + "content": "Our preliminary results demonstrate that scaling in the frequency domain also improves the two-phase flow prediction results, thus helping with the spectral bias mitigation. However, the enhancements are lower than the proposed HFS method, while the computational cost is significantly higher. This is due to the Fourier and Fourier inverse transforms required in this method. Consequently, we did not proceed with the second method. However, it may worth investigating this method in future work. There is one hyperparameter for each of these scaling methods. For the proposed HFS method, the patch size is the hyperparameter, and for scaling in the frequency domain the truncation frequency is the hyperparameter. A comparison of the prediction errors and computation costs of the two methods with a NO with \\(\\sim 1.7\\) million parameters is shown in Table 3." + }, + { + "type": "table_caption", + "bbox": [ + 0.158, + 0.56, + 0.838, + 0.575 + ], + "angle": 0, + "content": "Table 3: Comparison of the proposed HFS (Method 1) and scaling in frequency domain (Method 2)." + }, + { + "type": "table", + "bbox": [ + 0.204, + 0.585, + 0.795, + 0.713 + ], + "angle": 0, + "content": "
NONO + Method 1NO + Method 2
Rel. Error0.0440.0330.034
RMSE0.0430.0330.034
BRMSE0.1160.0720.076
Maxmean1.140.890.92
Parameters [Millions]1.7111.7121.712
Iteration time (s)31.434.552.6
" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.732, + 0.338, + 0.749 + ], + "angle": 0, + "content": "4.1. Effectiveness Criteria" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.752, + 0.881, + 0.907 + ], + "angle": 0, + "content": "The HFS approach operates by spatially decomposing the features into several patches and scaling the common DC and individual HFC of the patches separately. Our investigation showed that HFS is mostly effective on datasets with localized features such as those in subcooled pool boiling dataset. For extremely chaotic systems with globally small-scale features, the DC and HFC cannot be directly separated from spatial patching as all the patches may contain similar frequency components. To better quantify this limitation, we directly applied HFS to the samples from three different case studies with inherently different features. The samples were chosen from the subcooled pool boiling, Kolmogorov flow, and a turbulent jet problem. We found that HFS is effective for the first two problems (with the effect being less" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.116, + 0.087, + 0.656, + 0.103 + ], + "angle": 0, + "content": "pronounced on the later one), but is not effective for the third case." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.121, + 0.885, + 0.345 + ], + "angle": 0, + "content": "The turbulent jet data is from the experimental Schlieren velocimetry of turbulent helium jet in air. More details about the dataset is available in the previous work [64]. We directly used the publicly available Schlieren velocimetry dataset [64] in the raw .tif format. All the regions in the turbulent jet have similar small-scale features (see Fig. 11), which are different from the more localized features in the subcooled pool boiling and less localized features in the Kolmogorov flow. We directly applied HFS to these datasets and visualized the gradient magnitude in a region with high-frequency features. Additionally, we visualized the ratio of the gradient strength on a high frequency region with and without HFS, as defined by \\(\\frac{|\\nabla x_{\\mathrm{HFS}}|}{|\\nabla x_{\\mathrm{baseline}}|}\\) where \\(x\\) is the chosen region, \\(\\nabla\\) is the gradient operator, and baseline refers to the case without HFS. This ratio compares the selectiveness in scaling the gradient of the features. The HFS approach is effective for cases where it can selectively scale the gradients across the localized features. In contrast, HFS may not be effective if it results in a uniform gradient scaling, as it can be seen in the sample from the turbulent jet dataset." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.361, + 0.885, + 0.516 + ], + "angle": 0, + "content": "Specifically, as shown in Fig. 11, the HFS approach successfully increases the gradient strength at high frequency regions in subcooled pool boiling and Kolmogorov flow. However, it scales the gradient uniformly in the turbulent jet case. Therefore, the ratio of the gradient strength with HFS to the baseline shows a less uniform solution on the subcooled pool boiling sample, followed by the Kolmogorov flow sample. However, this ratio is almost uniform for the turbulent jet case. Selective enhancement of the gradient near the edges and high-frequency features helps with the better representation of these local regions which helps the NO to better capture the high-frequency details. Since HFS is applied in the latent space, the artifacts caused by patching are mitigated and ultimately discarded in the deeper levels of the NO." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.134, + 0.097, + 0.161, + 0.114 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.163, + 0.09, + 0.345, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.097, + 0.521, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.097, + 0.679, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.084, + 0.866, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.132, + 0.233, + 0.159, + 0.252 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.23, + 0.338, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.382, + 0.251, + 0.506, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.25, + 0.665, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.25, + 0.853, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.132, + 0.373, + 0.158, + 0.394 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.161, + 0.372, + 0.447, + 0.386 + ], + "angle": 0, + "content": "Turbulent jet (Schlieren velocimetry)" + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.386, + 0.39, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.39, + 0.489, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.559, + 0.389, + 0.648, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.72, + 0.388, + 0.837, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.48, + 0.882, + 0.553 + ], + "angle": 0, + "content": "Figure 11: HFS impact on gradient magnitude for different problems. (a) Subcooled pool boiling. (b) Kolmogorov flow. (c) Schlieren velocimetry of turbulent jet. For each case, the first column shows the sample and the chosen region with high frequency features (dashed boxes), the second column shows the gradient magnitude, the third column shows the gradient magnitude after applying HFS to the sample, and the fourth column shows the ratio of the HFS-enhanced gradient magnitude to the baseline gradient magnitude." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.579, + 0.231, + 0.595 + ], + "angle": 0, + "content": "5. Summary" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.606, + 0.883, + 0.914 + ], + "angle": 0, + "content": "In this work, we proposed a new method named high-frequency scaling (HFS) to mitigate the spectral bias in convolutional-based neural operators. We demonstrated that integrating HFS with feature maps in the latent space of the neural operator reduces the prediction errors in two-phase flow problems and the Kolmogorov flow problem. Through spectral bias mitigation, HFS helps to better capture intricate features and sharp gradients commonly seen within the bubbles and induced vortices in subcooled pool boiling problem, and the small-scale features in the Kolmogorov flow. These high-frequency features are prone to over-smoothing when predicted with neural operators without HFS. HFS-enhanced neural operators can improve neural operator performance irrespective of the neural operator size. We showed that for different variants of ResUNet with number of parameters varying within \\(\\sim 2\\) to \\(\\sim 16\\) millions, HFS consistently reduces the prediction errors. Furthermore, a better energy spectra alignment is observed for the results of the neural operator with HFS. Additionally, we showed that the diffusion model predictions are strongly dependent on the quality of the prior neural operator predictions. Therefore, it is important to improve the neural operator prediction accuracy using HFS so that the diffusion model can further recover the missing high-frequencies in the solutions. Otherwise, the diffusion model can barely improve the erroneous large features or significantly over-smoothed predictions of the neural operator. The advantages of HFS are obtained with a negligible memory requirement and a small computational cost trade-off." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.917, + 0.51, + 0.93 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.104, + 0.883, + 0.241 + ], + "angle": 0, + "content": "Finally, we investigated the effectiveness criteria for HFS approach by visualizing the gradient magnitudes of high-frequency regions of three different problems. We showed that HFS works the best on the subcooled pool boiling dataset due to the more localized features, which result in a selective gradient enhancement near the edges and high-frequency features. The HFS approach effectiveness decreases in the Kolmogorov flow problem, and is negligible in the turbulent jet problem. The gradient magnitude is scaled more uniformly in the Kolmogorov flow data and almost completely uniform in the turbulent jet problem, hence explaining why HFS is ineffective for this problem." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.261, + 0.492, + 0.279 + ], + "angle": 0, + "content": "CRediT authorship contribution statement" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.289, + 0.883, + 0.41 + ], + "angle": 0, + "content": "Siavash Khodakarami: Writing - review & editing, Writing - original draft, Visualization, Validation, Software, Methodology, Investigation, Formal analysis, Data Curation, Conceptualization. Vivek Oommen: Writing - review & editing, Writing - original draft, Visualization, Validation, Methodology, Investigation, Formal analysis, Data curation. Aniruddha Bora: Writing - review & editing, Writing - original draft, Validation, Methodology, Investigation. George Em Karniadakis Writing - review & editing, Writing - original draft, Supervision, Funding acquisition, Conceptualization." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.43, + 0.411, + 0.447 + ], + "angle": 0, + "content": "Declaration of competing interest" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.457, + 0.882, + 0.493 + ], + "angle": 0, + "content": "The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.511, + 0.283, + 0.528 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.538, + 0.883, + 0.678 + ], + "angle": 0, + "content": "We would like to acknowledge funding from the Office of Naval Research as part of MURI-METHODS project with grant number N00014242545. The authors would like to acknowledge the computational resources and services at the Center for Computation and Visualization (CCV), Brown University. The experiments were also partly conducted using the Delta AI computational resources at the National Center for Supercomputing Applications at the University of Illinois Urbana-Champaign through allocation CIS240932 from the Advanced Cyberinfrastructure Coordination Ecosystem: Services & Support (ACCESS) program, which is supported by the National Science Foundation." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.696, + 0.27, + 0.715 + ], + "angle": 0, + "content": "Data Availability" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.723, + 0.882, + 0.759 + ], + "angle": 0, + "content": "All codes and datasets will be made publicly available at https://github.com/SiaK4/HFS_ResUNet.git upon publication." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.778, + 0.218, + 0.794 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.803, + 0.882, + 0.832 + ], + "angle": 0, + "content": "[1] S. K. Godunov, I. Bohachevsky, Finite difference method for numerical computation of discontinuous solutions of the equations of fluid dynamics, Matematicheskij Sbornik 47 (1959) 271-306." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.833, + 0.882, + 0.859 + ], + "angle": 0, + "content": "[2] R. Eymard, T. Gallouet, R. Herbin, Finite volume methods, Handbook of Numerical Analysis 7 (2000) 713-1018." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.861, + 0.881, + 0.888 + ], + "angle": 0, + "content": "[3] G. Karniadakis, S. J. Sherwin, Spectral/hp element methods for computational fluid dynamics, Oxford University Press, USA, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.89, + 0.881, + 0.917 + ], + "angle": 0, + "content": "[4] T. J. Hughes, The Finite Element Method: Linear Static and Dynamic Finite Element Analysis, Courier Corporation, 2012." + }, + { + "type": "list", + "bbox": [ + 0.125, + 0.803, + 0.882, + 0.917 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.918, + 0.511, + 0.93 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.088, + 0.882, + 0.131 + ], + "angle": 0, + "content": "[5] L. Lu, P. Jin, G. Pang, Z. Zhang, G. E. Karniadakis, Learning nonlinear operators via deeponet based on the universal approximation theorem of operators, Nature Machine Intelligence 3 (2021) 218-229. URL: https://doi.org/10.1038/s42256-021-00302-5. doi:10.1038/s42256-021-00302-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.132, + 0.881, + 0.16 + ], + "angle": 0, + "content": "[6] Z. Li, N. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, A. Anandkumar, Fourier neural operator for parametric partial differential equations, arXiv preprint arXiv:2010.08895 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.16, + 0.881, + 0.188 + ], + "angle": 0, + "content": "[7] Q. Cao, S. Goswami, G. E. Karniadakis, Laplace neural operator for solving differential equations, Nature Machine Intelligence 6 (2024) 631-640." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.188, + 0.881, + 0.229 + ], + "angle": 0, + "content": "[8] T. Tripura, S. Chakraborty, Wavelet neural operator for solving parametric partial differential equations in computational mechanics problems, Computer Methods in Applied Mechanics and Engineering 404 (2023) 115783." + }, + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.23, + 0.881, + 0.273 + ], + "angle": 0, + "content": "[9] O. Ovadia, V. Oommen, A. Kahana, A. Peyvan, E. Turkel, G. E. Karniadakis, Real-time inference and extrapolation via a diffusion-inspired temporal transformer operator (ditto), arXiv preprint arXiv:2307.09072 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.273, + 0.881, + 0.302 + ], + "angle": 0, + "content": "[10] Z. Li, K. Meidani, A. B. Farimani, Transformer for partial differential equations' operator learning, arXiv preprint arXiv:2205.13671 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.302, + 0.881, + 0.33 + ], + "angle": 0, + "content": "[11] A. Sharma, S. Singh, S. Ratna, Graph neural network operators: a review, Multimedia Tools and Applications 83 (2024) 23413-23436." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.33, + 0.881, + 0.372 + ], + "angle": 0, + "content": "[12] T. Chen, H. Chen, Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems, IEEE Transactions on Neural Networks 6 (1995) 911-917. doi:10.1109/72.392253." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.372, + 0.881, + 0.415 + ], + "angle": 0, + "content": "[13] R. Wan, E. Kharazmi, M. S. Triantafyllou, G. E. Karniadakis, Deepvivonet: Using deep neural operators to optimize sensor locations with application to vortex-induced vibrations, arXiv preprint arXiv:2501.04105 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.415, + 0.881, + 0.458 + ], + "angle": 0, + "content": "[14] E. Kiyani, M. Manav, N. Kadivar, L. De Lorenzis, G. E. Karniadakis, Predicting crack nucleation and propagation in brittle materials using deep operator networks with diverse trunk architectures, arXiv preprint arXiv:2501.00016 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.458, + 0.881, + 0.486 + ], + "angle": 0, + "content": "[15] A. Peyvan, V. Oommen, A. D. Jagtap, G. E. Karniadakis, Riemannonets: Interpretable neural operators for riemann problems, Computer Methods in Applied Mechanics and Engineering 426 (2024) 116996." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.486, + 0.881, + 0.514 + ], + "angle": 0, + "content": "[16] Z. Li, W. Peng, Z. Yuan, J. Wang, Long-term predictions of turbulence by implicit u-net enhanced fourier neural operator, Physics of Fluids 35 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.514, + 0.881, + 0.543 + ], + "angle": 0, + "content": "[17] Y. Jiang, Z. Li, Y. Wang, H. Yang, J. Wang, An implicit adaptive fourier neural operator for long-term predictions of three-dimensional turbulence, arXiv preprint arXiv:2501.12740 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.543, + 0.881, + 0.572 + ], + "angle": 0, + "content": "[18] V. Gopakumar, S. Pamela, L. Zanisi, Z. Li, A. Anandkumar, M. Team, Fourier neural operator for plasma modelling, arXiv preprint arXiv:2302.06542 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.572, + 0.881, + 0.613 + ], + "angle": 0, + "content": "[19] D. Montes de Oca Zapiain, J. A. Stewart, R. Dingreville, Accelerating phase-field-based microstructure evolution predictions via surrogate models trained by machine learning methods, npj Computational Materials 7 (2021) 3." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.613, + 0.881, + 0.643 + ], + "angle": 0, + "content": "[20] V. Oommen, K. Shukla, S. Goswami, R. Dingreville, G. E. Karniadakis, Learning two-phase microstructure evolution using neural operators and autoencoder architectures, npj Computational Materials 8 (2022) 190." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.643, + 0.881, + 0.672 + ], + "angle": 0, + "content": "[21] V. Oommen, K. Shukla, S. Desai, R. Dingreville, G. E. Karniadakis, Rethinking materials simulations: Blending direct numerical simulations with neural operators, npj Computational Materials 10 (2024) 145." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.672, + 0.881, + 0.712 + ], + "angle": 0, + "content": "[22] S. Khodakarami, Y. Suh, Y. Won, N. Miljkovic, An intelligent strategy for phase change heat and mass transfer: Application of machine learning, in: Advances in Heat Transfer, volume 56, Elsevier, 2023, pp. 113-168." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.713, + 0.881, + 0.77 + ], + "angle": 0, + "content": "[23] N. Rahaman, A. Baratin, D. Arpit, F. Draxler, M. Lin, F. Hamprecht, Y. Bengio, A. Courville, On the spectral bias of neural networks, in: K. Chaudhuri, R. Salakhutdinov (Eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, PMLR, 2019, pp. 5301-5310. URL: https://proceedings.mlr.press/v97/rahaman19a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.77, + 0.881, + 0.799 + ], + "angle": 0, + "content": "[24] Z.-Q. J. Xu, Y. Zhang, T. Luo, Y. Xiao, Z. Ma, Frequency principle: Fourier analysis sheds light on deep neural networks, arXiv preprint arXiv:1901.06523 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.799, + 0.881, + 0.828 + ], + "angle": 0, + "content": "[25] Z.-Q. J. Xu, L. Zhang, W. Cai, On understanding and overcoming spectral biases of deep neural network learning methods for solving pdes, arXiv preprint arXiv:2501.09987 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.828, + 0.881, + 0.856 + ], + "angle": 0, + "content": "[26] C. Lin, Z. Li, L. Lu, S. Cai, M. Maxey, G. E. Karniadakis, Operator learning for predicting multiscale bubble growth dynamics, The Journal of Chemical Physics 154 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.856, + 0.881, + 0.884 + ], + "angle": 0, + "content": "[27] N. Jain, S. Roy, H. Kodamana, P. Nair, Scaling the predictions of multiphase flow through porous media using operator learning, Chemical Engineering Journal 503 (2025) 157671." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.884, + 0.881, + 0.913 + ], + "angle": 0, + "content": "[28] O. Ronneberger, P. Fischer, T. Brox, U-net: Convolutional networks for biomedical image segmentation, in: Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international confer" + }, + { + "type": "list", + "bbox": [ + 0.119, + 0.088, + 0.882, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.51, + 0.929 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.155, + 0.088, + 0.819, + 0.102 + ], + "angle": 0, + "content": "ence, Munich, Germany, October 5-9, 2015, proceedings, part III 18, Springer, 2015, pp. 234-241." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.103, + 0.88, + 0.145 + ], + "angle": 0, + "content": "[29] S. Qin, F. Lyu, W. Peng, D. Geng, J. Wang, N. Gao, X. Liu, L. L. Wang, Toward a better understanding of fourier neural operators: Analysis and improvement from a spectral perspective, arXiv preprint arXiv:2404.07200 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.146, + 0.881, + 0.188 + ], + "angle": 0, + "content": "[30] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: A multiphase multiphysics dataset and benchmarks for machine learning, Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.189, + 0.881, + 0.216 + ], + "angle": 0, + "content": "[31] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.217, + 0.881, + 0.244 + ], + "angle": 0, + "content": "[32] X. Liu, B. Xu, S. Cao, L. Zhang, Mitigating spectral bias for the multiscale operator learning, Journal of Computational Physics 506 (2024) 112944." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.245, + 0.881, + 0.273 + ], + "angle": 0, + "content": "[33] W. Cai, Z.-Q. J. Xu, Multi-scale deep neural networks for solving high dimensional pdes, arXiv preprint arXiv:1910.11710 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.274, + 0.881, + 0.316 + ], + "angle": 0, + "content": "[34] M. Tancik, P. Srinivasan, B. Mildenhall, S. Fridovich-Keil, N. Raghavan, U. Singhal, R. Ramamoorthi, J. Barron, R. Ng, Fourier features let networks learn high frequency functions in low dimensional domains, Advances in neural information processing systems 33 (2020) 7537-7547." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.317, + 0.881, + 0.358 + ], + "angle": 0, + "content": "[35] S. Wang, H. Wang, P. Perdikaris, On the eigenvector bias of fourier feature networks: From regression to solving multi-scale pdes with physics-informed neural networks, Computer Methods in Applied Mechanics and Engineering 384 (2021) 113938." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.359, + 0.881, + 0.401 + ], + "angle": 0, + "content": "[36] M. Raissi, P. Perdikaris, G. E. Karniadakis, Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations, Journal of Computational physics 378 (2019) 686-707." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.402, + 0.881, + 0.442 + ], + "angle": 0, + "content": "[37] J. D. Toscano, V. Oommen, A. J. Varghese, Z. Zou, N. A. Daryakenari, C. Wu, G. E. Karniadakis, From pinns to pikans: Recent advances in physics-informed machine learning, arXiv preprint arXiv:2410.13228 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.444, + 0.881, + 0.471 + ], + "angle": 0, + "content": "[38] S. Liang, L. Lyu, C. Wang, H. Yang, Reproducing activation function for deep learning, arXiv preprint arXiv:2101.04844 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.473, + 0.881, + 0.5 + ], + "angle": 0, + "content": "[39] A. D. Jagtap, K. Kawaguchi, G. E. Karniadakis, Adaptive activation functions accelerate convergence in deep and physics-informed neural networks, Journal of Computational Physics 404 (2020) 109136." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.501, + 0.881, + 0.529 + ], + "angle": 0, + "content": "[40] W. Cai, X. Li, L. Liu, A phase shift deep neural network for high frequency approximation and wave problems, SIAM Journal on Scientific Computing 42 (2020) A3285-A3312." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.53, + 0.881, + 0.557 + ], + "angle": 0, + "content": "[41] P. Lippe, B. Veeling, P. Perdikaris, R. Turner, J. Brandstetter, Pde-refiner: Achieving accurate long rollouts with neural pde solvers, Advances in Neural Information Processing Systems 36 (2023) 67398-67433." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.558, + 0.881, + 0.586 + ], + "angle": 0, + "content": "[42] E. Zhang, A. Kahana, A. Kopaničáková, E. Turkel, R. Ranade, J. Pathak, G. E. Karniadakis, Blending neural operators and relaxation methods in pde numerical solvers, Nature Machine Intelligence (2024) 1-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.587, + 0.881, + 0.627 + ], + "angle": 0, + "content": "[43] H. Wu, K. Zhang, D. Zhou, W.-L. Chen, Z. Han, Y. Cao, High-flexibility reconstruction of small-scale motions in wall turbulence using a generalized zero-shot learning, Journal of Fluid Mechanics 990 (2024) R1." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.628, + 0.881, + 0.657 + ], + "angle": 0, + "content": "[44] Z. Wang, X. Li, L. Liu, X. Wu, P. Hao, X. Zhang, F. He, Deep-learning-based super-resolution reconstruction of high-speed imaging in fluids, Physics of Fluids 34 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.658, + 0.881, + 0.699 + ], + "angle": 0, + "content": "[45] R. Molinaro, S. Lanthaler, B. Raonic, T. Rohner, V. Armegioiu, Z. Y. Wan, F. Sha, S. Mishra, L. Zepeda-Nuñez, Generative ai for fast and accurate statistical computation of fluids, arXiv preprint arXiv:2409.18359 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.7, + 0.881, + 0.741 + ], + "angle": 0, + "content": "[46] J. W. Lockwood, A. Gori, P. Gentine, A generative super-resolution model for enhancing tropical cyclone wind field intensity and resolution, Journal of Geophysical Research: Machine Learning and Computation 1 (2024) e2024JH000375." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.742, + 0.881, + 0.77 + ], + "angle": 0, + "content": "[47] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Integrating neural operators with diffusion models improves spectral representation in turbulence modeling, arXiv preprint arXiv:2409.08477 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.771, + 0.881, + 0.799 + ], + "angle": 0, + "content": "[48] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: a multi-physics dataset and benchmarks for machine learning, arXiv preprint arXiv:2307.14623 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.8, + 0.881, + 0.84 + ], + "angle": 0, + "content": "[49] F. I. Diakogiannis, F. Waldner, P. Caccetta, C. Wu, Resunet-a: A deep learning framework for semantic segmentation of remotely sensed data, ISPRS Journal of Photogrammetry and Remote Sensing 162 (2020) 94-114." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.842, + 0.881, + 0.87 + ], + "angle": 0, + "content": "[50] H. Li, Z. Xu, G. Taylor, C. Studer, T. Goldstein, Visualizing the loss landscape of neural nets, Advances in neural information processing systems 31 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.871, + 0.881, + 0.912 + ], + "angle": 0, + "content": "[51] X. Chen, C. Liang, D. Huang, E. Real, K. Wang, H. Pham, X. Dong, T. Luong, C.-J. Hsieh, Y. Lu, et al., Symbolic discovery of optimization algorithms, Advances in neural information processing systems 36 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.118, + 0.088, + 0.881, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.511, + 0.929 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.088, + 0.836, + 0.102 + ], + "angle": 0, + "content": "[52] D. P. Kingma, Adam: A method for stochastic optimization, arXiv preprint arXiv:1412.6980 (2014)." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.103, + 0.879, + 0.131 + ], + "angle": 0, + "content": "[53] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.132, + 0.88, + 0.159 + ], + "angle": 0, + "content": "[54] M. Wei, X. Zhang, Super-resolution neural operator, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 18247-18256." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.16, + 0.88, + 0.202 + ], + "angle": 0, + "content": "[55] R. Wang, K. Kashinath, M. Mustafa, A. Albert, R. Yu, Towards physics-informed deep learning for turbulent flow prediction, in: Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining, 2020, pp. 1457-1466." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.203, + 0.879, + 0.216 + ], + "angle": 0, + "content": "[56] P. Chakrabarty, S. Maji, The spectral bias of the deep image prior, arXiv preprint arXiv:1912.08905 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.217, + 0.88, + 0.244 + ], + "angle": 0, + "content": "[57] A. M. Saxe, P. W. Koh, Z. Chen, M. Bhand, B. Suresh, A. Y. Ng, On random weights and unsupervised feature learning., in: Icml, volume 2, 2011, p. 6." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.245, + 0.88, + 0.273 + ], + "angle": 0, + "content": "[58] P. Wang, W. Zheng, T. Chen, Z. Wang, Anti-oversmoothing in deep vision transformers via the fourier domain analysis: From theory to practice, arXiv preprint arXiv:2203.05962 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.274, + 0.88, + 0.3 + ], + "angle": 0, + "content": "[59] J. Ho, A. Jain, P. Abbeel, Denoising diffusion probabilistic models, Advances in neural information processing systems 33 (2020) 6840-6851." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.302, + 0.88, + 0.33 + ], + "angle": 0, + "content": "[60] Y. Song, J. Sohl-Dickstein, D. P. Kingma, A. Kumar, S. Ermon, B. Poole, Score-based generative modeling through stochastic differential equations, arXiv preprint arXiv:2011.13456 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.331, + 0.88, + 0.358 + ], + "angle": 0, + "content": "[61] Y. Song, S. Ermon, Generative modeling by estimating gradients of the data distribution, Advances in neural information processing systems 32 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.359, + 0.88, + 0.387 + ], + "angle": 0, + "content": "[62] T. Karras, M. Aittala, T. Aila, S. Laine, Elucidating the design space of diffusion-based generative models, Advances in neural information processing systems 35 (2022) 26565-26577." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.388, + 0.88, + 0.442 + ], + "angle": 0, + "content": "[63] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Data for \"integrating neural operators with diffusion models improves spectral representation in turbulence modeling\" (kolmogorov flow case), 2025. URL: https://doi.org/10.6084/m9.figshare.28250960.v1. doi:10.6084/m9.figshare.28250960.v1." + }, + { + "type": "ref_text", + "bbox": [ + 0.119, + 0.444, + 0.88, + 0.472 + ], + "angle": 0, + "content": "[64] G. S. Settles, A. Liberzon, Schlieren and bos velocimetry of a round turbulent helium jet in air, Optics and Lasers in Engineering 156 (2022) 107104." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.088, + 0.88, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.494, + 0.681, + 0.512 + ], + "angle": 0, + "content": "Appendix A. Training strategies and ResUNet prediction results" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.522, + 0.882, + 0.713 + ], + "angle": 0, + "content": "All the models were trained for \\(\\sim 1000\\) epochs (convergence typically happened earlier). The initial learning rate was set to \\(8\\times 10^{-4}\\) and it was reduced after the first 700 epochs using a linear step scheduler. We used GELU activation function and group normalization after convolutional layers. Lion optimizer with weight decay of 0.02 to 0.1 were used, depending on the neural operator size. Batch size of 4 or 8 was used, depending on the neural operator size. We found that gradient clipping at maximum gradient norm of 0.4 to 1 (depending on neural operator size) helps with the optimization. Our preliminary findings showed better results with the Lion optimizer compared to Adam and AdamW optimizers. Therefore, all the trainings for this work were conducted with the Lion optimizer. For all the neural operators, the number of layers in the encoder and decoder were kept constant and the number of parameters at each layer was modified to change the neural operator size." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.138, + 0.131, + 0.159, + 0.15 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.098, + 0.275, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.098, + 0.414, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.098, + 0.555, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.098, + 0.695, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.718, + 0.097, + 0.855, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.178, + 0.157, + 0.193 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.187, + 0.275, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.187, + 0.414, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.187, + 0.554, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.187, + 0.695, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.187, + 0.855, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.134, + 0.268, + 0.156, + 0.283 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.277, + 0.274, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.277, + 0.414, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.277, + 0.554, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.277, + 0.695, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.276, + 0.855, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.135, + 0.359, + 0.157, + 0.375 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.365, + 0.275, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.365, + 0.414, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.365, + 0.554, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.365, + 0.695, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.365, + 0.855, + 0.447 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.448, + 0.157, + 0.463 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image", + "bbox": [ + 0.161, + 0.452, + 0.275, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.453, + 0.414, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.453, + 0.554, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.453, + 0.695, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.453, + 0.855, + 0.536 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.56, + 0.882, + 0.605 + ], + "angle": 0, + "content": "Figure A.12: Example of subcooled pool boiling temperature prediction results by neural operators. (a) Ground truth results, (b) UNet prediction results, (c) ResUNet prediction results, (d) UNet prediction errors, (e) ResUNet prediction errors. The results show five time-step predictions from left to right." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.626, + 0.646, + 0.659 + ], + "angle": 0, + "content": "Appendix B. Boundary RMSE, Bubble RMSE, and Spectral Errors" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.668, + 0.882, + 0.703 + ], + "angle": 0, + "content": "Boundary RMSE (BRMSE) for a single sample and time-step is defined by calculating the errors only at the boundaries of the domain:" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.708, + 0.88, + 0.752 + ], + "angle": 0, + "content": "\\[\n\\operatorname {B R M S E} = \\sqrt {\\frac {1}{| \\partial \\Omega |} \\sum_ {\\mathbf {x} _ {i} \\in \\partial \\Omega} \\left(\\hat {T} _ {i} - T _ {i}\\right) ^ {2}}, \\tag {B.1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.76, + 0.88, + 0.811 + ], + "angle": 0, + "content": "where \\(\\mathbf{x}_i\\in \\partial \\Omega\\) specifies the points at the boundaries, \\(\\hat{T}_i\\) is the predicted temperature, and \\(T_{i}\\) is the actual temperature. Similarly, bubble RMSE is defined by calculating the errors only within the bubble areas. These areas are specified through a level-set function in the simulations." + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.818, + 0.88, + 0.862 + ], + "angle": 0, + "content": "\\[\n\\text {B u b b l e} = \\sqrt {\\frac {1}{| \\Omega_ {\\text {b u b b l e}} \\cup \\partial \\Omega_ {\\text {b u b b l e}} |} \\sum_ {\\mathbf {x} _ {i} \\in \\Omega_ {\\text {b u b b l e}} \\cup \\partial \\Omega_ {\\text {b u b b l e}}} (\\hat {y} _ {i} - y _ {i}) ^ {2}}, \\tag {B.2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.867, + 0.88, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathbf{x}_i\\in \\Omega_{\\mathrm{bubble}}\\) and \\(\\partial \\Omega_{\\mathrm{bubble}}\\) specify the points inside the bubble areas and at the interfaces, respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.145, + 0.087, + 0.88, + 0.105 + ], + "angle": 0, + "content": "The spectral errors in each of the low, mid, and high-frequency bands are defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.117, + 0.881, + 0.162 + ], + "angle": 0, + "content": "\\[\nF _ {\\text {b a n d}} = \\sqrt {\\frac {1}{N _ {\\text {b a n d}}} \\sum_ {k \\in \\text {b a n d}} \\left| \\mathcal {F} (T) (\\mathbf {k}) - \\mathcal {F} (\\hat {T}) (\\mathbf {k}) \\right| ^ {2}}, \\quad \\text {b a n d} \\in \\{\\text {l o w}, \\text {m i d}, \\text {h i g h} \\}, \\tag {B.3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.174, + 0.882, + 0.261 + ], + "angle": 0, + "content": "where \\( k \\) is the spatial frequency component of the Fourier transformed solutions, \\( \\mathcal{F} \\) denotes the Fourier transform, and \\( N_{\\mathrm{band}} \\) specifies the number of components at each frequency band. The low, mid, and high bands may be defined differently based on the underlying dataset and the amount of high-frequency components. In this work, these bands were set to the first 2%, the first 6.2% excluding the low band components, and the last 93.8% of the components." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.278, + 0.882, + 0.312 + ], + "angle": 0, + "content": "Similarly, the energy spectrum error, showing the energy spectra misalignment at each frequency band is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.156, + 0.324, + 0.881, + 0.372 + ], + "angle": 0, + "content": "\\[\n\\mathcal {E} _ {F _ {\\mathrm {b a n d}}} = \\sqrt {\\frac {1}{N _ {\\mathrm {b a n d}}} \\sum_ {k \\in \\mathrm {b a n d}} \\left(\\left| \\mathcal {F} (T) (\\mathbf {k}) \\right| ^ {2} - \\left| \\mathcal {F} (\\hat {T}) (\\mathbf {k}) \\right| ^ {2}\\right) ^ {2}}, \\quad \\mathrm {b a n d} \\in \\{\\text {l o w , m i d , h i g h} \\}, \\tag {B.4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.382, + 0.501, + 0.399 + ], + "angle": 0, + "content": "where \\(\\mathcal{E}\\) denotes the energy spectrum error." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.42, + 0.882, + 0.454 + ], + "angle": 0, + "content": "Appendix C. Summary of subcooled pool boiling prediction results with HFS-enhanced NO" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.465, + 0.883, + 0.621 + ], + "angle": 0, + "content": "In this work, we tested different variants of ResUNet by varying number of parameters in the range of \\(\\sim 2\\) millions to \\(\\sim 16\\) millions. In the following table, we summarized the results of the two of the models (smallest and largest models), trained with optimal hyperparameters. Note that the same hyperparameters were used for training a neural operator with and without HFS. The parameters were first optimized for the NO without HFS and the same set of parameters were used for training the HFS-enhanced NO. The results of the other models are not included in this table for easier comparison and interpretation. We refer the reader to Figure 4 for observing the effect of HFS on all the tested models. Similar to the rest of the paper, the results are based on five time-step predictions." + }, + { + "type": "table_caption", + "bbox": [ + 0.115, + 0.634, + 0.882, + 0.692 + ], + "angle": 0, + "content": "Table C.4: Subcooled pool boiling temperature prediction errors with neural operator (NO) with and without high-frequency scaling (HFS) The columns correspond to the metrics, NO with \\(\\sim 1.7\\) millions parameters, HFS-enhanced NO with \\(\\sim 1.7\\) millions parameters, NO with \\(\\sim 16.2\\) millions parameters, and HFS-enhanced NO with \\(\\sim 16.2\\) millions parameters." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.702, + 0.882, + 0.88 + ], + "angle": 0, + "content": "
NO, 1.7 MNO+HFS, 1.7 MNO, 16.2 MNO+HFS, 16.2 M
Rel. Error0.04140.03330.02510.0238
RMSE0.04030.03240.02440.0232
BRMSE0.09730.07290.05620.0505
Bubble RMSE0.19240.1430.1090.0985
Maxmean1.0190.8970.6850.656
Flow0.3230.2370.2120.141
Fmid0.2820.2180.1850.148
Fhigh0.04760.04000.03920.0296
Parameters [Millions]1.7111.71216.26316.268
" + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.509, + 0.93 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.087, + 0.592, + 0.104 + ], + "angle": 0, + "content": "Appendix D. Saturated pool boiling prediction results" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.114, + 0.883, + 0.234 + ], + "angle": 0, + "content": "Saturated pool boiling dataset involves less complexity due to lower high-frequency components and small scale features. Therefore, a well-optimized NO without HFS can successfully resolve the solutions. However, HFS still enhances the prediction accuracies, especially at bubble areas. The following figure demonstrates an example of predictions using NO and HFS-enhanced NO for saturated pool boiling dataset. Generally, the errors are much smaller than subcooled pool boiling predictions. However, it can be seen that the errors in the regions with departed bubbles are reduced with HFS-enhanced NO." + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.263, + 0.87, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.728, + 0.885, + 0.801 + ], + "angle": 0, + "content": "Figure D.13: Examples of saturated pool boiling temperature prediction results by NO and HFS-enhanced NO (a) Ground truth (GT) results. (b) NO predictions. (c) \\(\\mathrm{NO} + \\mathrm{HFS}\\) predictions. (d) Absolute prediction errors of NO \\((E_{\\mathrm{NO}})\\). (e) Absolute prediction errors of \\(\\mathrm{NO} + \\mathrm{HFS}\\) \\((E_{\\mathrm{NO} + \\mathrm{HFS}})\\). The results are shown for five time-step predictions from left to right. The departed bubbles areas are circled (dashed red circles) in error maps for easier interpretation and comparison. The results are based on a NO with \\(\\sim 3.5\\) millions parameter." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.822, + 0.885, + 0.907 + ], + "angle": 0, + "content": "To further investigate if HFS can enhance the predictions with smaller NO on this simpler dataset, we trained another NO with the same structure (ResUNet) but with only \\(\\sim 0.6\\) millions parameters with and without HFS. Consistent with previous results, HFS enhanced the predictions by reducing the field errors such as RMSE and bubble RMSE as well the spectral errors. The prediction results of saturated pool boiling dataset using two different NOs with" + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.087, + 0.882, + 0.123 + ], + "angle": 0, + "content": "and without HFS are summarized in the following table. Similar to the rest of the paper, the results are based on five time-step predictions." + }, + { + "type": "table_caption", + "bbox": [ + 0.115, + 0.136, + 0.883, + 0.181 + ], + "angle": 0, + "content": "Table D.5: Saturated pool boiling temperature prediction errors of NO with and without HFS. The columns correspond to the metrics, NO with \\(\\sim 0.6\\) millions parameters, HFS-enhanced NO with \\(\\sim 0.6\\) millions parameters, NO with \\(\\sim 3.5\\) millions parameters, and HFS-enhanced NO with \\(\\sim 3.5\\) millions parameters." + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.189, + 0.884, + 0.374 + ], + "angle": 0, + "content": "
NO, 0.6 MNO+HFS, 0.6 MNO, 3.5 MNO+HFS, 3.5 M
Rel. Error0.01730.01650.01490.0145
RMSE0.01710.01640.01480.0144
BRMSE0.04620.04500.03640.0355
Bubble RMSE0.09180.08980.07260.0692
Maxmean0.5920.5950.5530.544
Flow0.09640.08350.07450.0736
Fmid0.10860.09980.09190.0855
Fhigh0.02090.02080.01820.0180
Parameters [Millions]0.6140.6153.4803.481
" + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.117, + 0.087, + 0.485, + 0.104 + ], + "angle": 0, + "content": "Appendix E. Visualization of latent space" + }, + { + "type": "image_caption", + "bbox": [ + 0.152, + 0.154, + 0.184, + 0.171 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.154, + 0.173, + 0.504, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.153, + 0.544, + 0.173 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.171, + 0.86, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.719, + 0.883, + 0.763 + ], + "angle": 0, + "content": "Figure E.14: Effect of HFS on the latent space mean features. (a) Mean latent feature maps in decoder (downsampling) with five layers. (b) Mean latent feature maps in decoder (upsampling) with five layers. The results are based on a NO with \\(\\sim 16\\) millions parameters." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.511, + 0.93 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.086, + 0.804, + 0.105 + ], + "angle": 0, + "content": "Appendix F. Additional visualizations of the subcooled pool boiling predictions" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.134, + 0.866, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.638, + 0.883, + 0.697 + ], + "angle": 0, + "content": "Figure F.15: Examples of subcooled pool boiling prediction results by DM integrated with NO and HFS-enhanced NO. (a) Ground truth (GT) results. (b) NO predictions. (c) NO + DM predictions. (d) NO + HFS predictions. (e) NO + HFS + DM predictions. The results are shown for five time-step predictions from left to right." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.717, + 0.63, + 0.736 + ], + "angle": 0, + "content": "Appendix G. Optimized scaling parameters, \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\)" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.745, + 0.883, + 0.798 + ], + "angle": 0, + "content": "The following figure demonstrates the learned \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) across all the feature maps in the latent space of the encoder and decoder. The results are based on the training of a HFS-enhanced NO with \\(\\sim 1.7\\) million parameters for the subcooled pool boiling problem." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.51, + 0.93 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.172, + 0.105, + 0.832, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.171, + 0.377, + 0.832, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.681, + 0.883, + 0.725 + ], + "angle": 0, + "content": "Figure G.16: (a) learned values of \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) in the encoder of the NO. (b) learned values of \\(\\lambda_{DC}\\) and \\(\\lambda_{HFC}\\) in the decoder of NO. Layers start from highest spatial resolution to the lowest in the encoder and vice versa for the decoder." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.75, + 0.55, + 0.768 + ], + "angle": 0, + "content": "Appendix H. Kolmogorov flow prediction results" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.776, + 0.883, + 0.829 + ], + "angle": 0, + "content": "The vorticity formulation of the unsteady 2D incompressible Navier-Stokes equation for a viscous and incompressible fluid with the Kolmogorov forcing term is given as follows, where \\(\\omega\\) is the vorticity, \\(u\\) is the velocity vector, and \\(\\nu\\) is the kinematic viscosity." + }, + { + "type": "equation", + "bbox": [ + 0.161, + 0.85, + 0.881, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l l} \\partial_ {t} \\omega + \\mathbf {u} \\cdot \\nabla \\omega = \\nu \\Delta \\omega + f (x, y), & (x, y) \\in (0, 2 \\pi) ^ {2}, t \\in (0, t _ {\\text {f i n a l}} ] \\\\ f (x, y) = \\chi (\\sin (2 \\pi (x + y)) + \\cos (2 \\pi (x + y))), & (x, y) \\in (0, 2 \\pi) ^ {2} \\\\ \\nabla \\cdot \\mathbf {u} = 0, & (x, y) \\in (0, 2 \\pi) ^ {2}, t \\in (0, t _ {\\text {f i n a l}} ] \\\\ \\omega (x, y, 0) = \\omega_ {0}, & (x, y) \\in (0, 2 \\pi) ^ {2} \\end{array} \\right. \\tag {H.1}\n\\]" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.086, + 0.883, + 0.157 + ], + "angle": 0, + "content": "In this study, we used \\(\\chi = 0.1\\), \\(\\nu = 10^{-5}\\), and periodic boundary conditions. The vorticity initial condition was sampled from a Gaussian random field according to the distribution \\(\\mathcal{N}(0,14^{0.5}(-\\Delta +196I)^{-1.5})\\). The following figure demonstrate an example of the prediction results of the neural operator with and without HFS." + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.175, + 0.877, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.405, + 0.87, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.54, + 0.869, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.725, + 0.883, + 0.784 + ], + "angle": 0, + "content": "Figure H.17: 2D Kolmogorov flow prediction results. (a) Ground truth solutions. (b) NO predictions. (c) HFS-enhanced NO predictions. (d) The corresponding energy spectra \\(((p(k))\\) for predictions at each time-step. (e) Zoomed-in view of energy spectra showing only the high wavenumbers for better visualization of the differences. The legends in (d) are applicable to (e) as well." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.917, + 0.51, + 0.929 + ], + "angle": 0, + "content": "37" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_origin.pdf b/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..115a0fe1fa4db8da29bca86f0c6af7b25aa8f7b8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/56a188b9-c8a3-4011-b41c-a815bc66d1a0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12ae1ecf56c0846dbadb932ba64920c60494df9dd20e0138520ff345d7a7d0db +size 11442464 diff --git a/data/2025/2503_13xxx/2503.13695/full.md b/data/2025/2503_13xxx/2503.13695/full.md new file mode 100644 index 0000000000000000000000000000000000000000..37591f2321d432dd1b5f136b57afedaf490552ae --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/full.md @@ -0,0 +1,653 @@ +# Mitigating Spectral Bias in Neural Operators via High-Frequency Scaling for Physical Systems + +Siavash Khodakaramia, Vivek Oommenb, Aniruddha Boraa, George Em Karniadakisa,c,* + +$^{a}$ Division of Applied Mathematics, Brown University, Providence, RI, 02912, USA + +$^{b}$ School of Engineering, Brown University, Providence, RI, 02912, USA + +$^{c}$ Pacific Northwest National Laboratory, Richland, WA, 99354, USA + +# Abstract + +Neural operators have emerged as powerful surrogates for modeling complex physical problems. However, they suffer from spectral bias making them oblivious to high-frequency modes, which are present in multiscale physical systems. Therefore, they tend to produce over-smoothed solutions, which is particularly problematic in modeling turbulence and for systems with intricate patterns and sharp gradients such as multi-phase flow systems. In this work, we introduce a new approach named high-frequency scaling (HFS) to mitigate spectral bias in convolutional-based neural operators. By integrating HFS with proper variants of UNet neural operators, we demonstrate a higher prediction accuracy by mitigating spectral bias in single and two-phase flow problems. Unlike Fourier-based techniques, HFS is directly applied to the latent space, thus eliminating the computational cost associated with the Fourier transform. Additionally, we investigate alternative spectral bias mitigation through diffusion models conditioned on neural operators. While the diffusion model integrated with the standard neural operator may still suffer from significant errors, these errors are substantially reduced when the diffusion model is integrated with a HFS-enhanced neural operator. + +Keywords: Neural operator, Spectral Bias, Two-phase flow, Boiling, Kolmogorov flow, Diffusion model + +# 1. Introduction + +Design and control problems in engineering often require repeated simulation of the underlying physical system, necessitating the solution of governing partial differential equations (PDEs) multiple times. For a wide range of applications from fluid dynamics to material science, classical discretization-based direct numerical simulation (DNS) [1, 2, 3, 4] has been the cornerstone of scientific computing. While the methods for DNS have matured significantly over the past several decades, their computational cost becomes prohibitive when performing repeated simulations over varying parametric conditions or configurations. This challenge has fueled a growing interest in developing computationally efficient surrogate models capable of approximating these simulations at only a fraction of the cost. + +In particular, the classical DNS can estimate the solution for a given set of conditions. If one of these conditions is modified, the solver has to be re-run, further aggravating the computational cost. To mitigate this issue, neural operators were developed to handle a plurality of + +conditions and parametric settings [5, 6, 7, 8, 9, 10, 11]. Neural operators, which are based on the universal operator approximation theorem [12], are trained to learn the mapping between infinite-dimensional functional spaces. Although it is expensive to train such surrogates offline, a trained neural operator can efficiently estimate solutions of unseen conditions almost instantaneously during inference. Many studies have used neural operators as surrogates to learn physical problems in space and time. Various physical problems such as vortex-induced vibration [13], crack nucleation and propagation [14], Riemann problems [15], turbulence [16, 17], plasma modeling [18], and many more have been solved, at least under limited conditions, by neural operators. Furthermore, other studies [19, 20, 21] attempted to learn the temporal evolution of two-phase microstructures in diffusion-driven processes such as spinodal decomposition and dendritic growth. However, very few studies have investigated the application of neural operators for buoyancy-dominated or advection-dominated two-phase flow problems, such as those encountered in boiling and condensation [22]. + +# 1.1. Neural operators and applications in two-phase flow modeling + +Modeling and predicting two-phase flow during boiling is one of the most challenging problems in computational fluid dynamics. These phenomena involve complex interface dynamics and phase transitions, resulting in high-frequency spatio-temporal variations that are both challenging and computationally expensive to capture. Analyzing the solutions of such a system reveals a slowly decaying energy spectrum, where even the high wavenumbers carry a nontrivial amount of energy that cannot be neglected. Effective modeling of a two-phase flow system requires the neural operators to accurately predict spatio-temporal evolution of both low and high wavenumber modes. Unfortunately, neural networks and neural operators suffer from spectral bias [23, 24, 25], which makes them oblivious to high wavenumber modes. Consequently, the neural operators can only offer an over-smoothed prediction that fails to capture the intricate features near the interfaces where the sharp gradients are commonly observed. + +Previous studies in boiling modeling with neural operators also confirm the spectral bias problem. [26] used DeepONet [5] to solve for the transient solution of a single bubble growth. Their findings demonstrate that DeepONet can effectively capture the mean component of the solution in the microscale regime, but it fails to accurately predict the stochastic fluctuations described by high-frequency components of the solution. A study by Jain et al. [27] on the prediction of multiphase flow through porous media with UNet [28] also showed that larger errors occurred near the interfaces. The Fourier neural operator (FNO) [6] also suffers from spectral bias [29]. The common practice of truncating high-frequency modes in FNOs leads to the loss of rich information, hindering the accurate modeling of chaotic systems in multi-phase heat transfer and turbulence. However, without truncation, training FNOs becomes unstable [9]. + +A recent study by Hassan et al.[30] collected a valuable boiling dataset based on Flash-X simulations [31] and developed neural operators based on different structures such as UNet, FNO, and group equivariant FNO (GFNO) for prediction in boiling problems. As shown in the results of our work, the previously best neural operator still struggles to capture high-frequency modes, which are prominently observed within the bubbles, along the interfaces, and in condensation traces in subcooled pool boiling. These over-smoothened solutions highlight the need for further advancements to mitigate spectral bias in modeling phase-change and multi-phase flow phenomena. Similarly, spectral bias of neural operators cannot be overlooked when mod- + +eling other chaotic systems like turbulence [32], where small-scale, low-energy features play a crucial role. + +# 1.2. Spectral bias mitigation strategies + +Previous studies have proposed various methods to mitigate spectral bias and over-smoothing in deep neural networks (DNNs). Cai et al. [33] proposed a multi-scale DNN (MscaleDNN) to enhance approximations over a wide range of frequencies for the solution of PDEs. Tancik et al. [34] proposed Fourier feature mapping for coordinate-based multilayer perceptron (MLP) to tackle spectral bias in image regression tasks in low dimensional domains. Wang et al. [35] used Fourier feature mapping along with Physics-informed Neural Networks (PINNs) [36, 37] to enhance the multi-scale PDE solutions by mitigating the spectral bias compared to vanilla PINN. A better optimization of activation functions have been also shown to slightly reduce spectral bias of DNNs and PINNs [38, 39]. Phase shift DNN is another method converting high-frequency component of the data into low frequency spectrum, which can be learned and represented by a DNN. Subsequently, the learned representation is converted into the original high-frequency. However, phase shift DNN suffers from the curse of dimensionality [40]. + +Efforts have also been made to mitigate the spectral bias encountered by neural operators trained to learn spatiotemporal systems. Lippe at al. [41] developed PDE-Refiner, which iteratively adds noise to perturb different scales of the system and trains the neural operator to correct these corrupted states. Zhang et al. [42] developed Hybrid Iterative Numerical Transferable Solver (HINTS) to exploit the spectral bias in solving large linear systems by blending neural operators and relaxation methods. Generative Artificial Intelligence (GenAI)-based algorithms are also emerging as effective methods to overcome the spectral bias barrier. Wu et al. [43] accurately reconstructed the small-scale structures accompanying turbulent boundary layers in wall turbulence using Super Resolution Generative Adversarial Networks (SRGANs). Wang et al. [44] developed a framework based on GANs to reconstruct high spatiotemporal resolution supersonic flow states from sparse measurements. Molinaro et al. [45] developed GenCFD using score-based diffusion models to learn three-dimensional turbulence in compressible and incompressible flows. Lockwood et al. [46] used denoising diffusion probabilistic models to refine the estimates of tropical cyclone wind intensities. Oommen et al. [47] addressed the spectral limitations of neural operators in modeling a series of turbulent systems by training a conditional score-based diffusion model conditioned on the neural operator as prior. + +In this work, we first propose the use of UNet with residual blocks (ResUNet) to achieve more accurate two-phase flow predictions compared to the state-of-the-art neural operators. Subsequently, we present a new method named high-frequency scaling (HFS) to mitigate spectral bias in two-phase flow predictions. Our approach demonstrates higher accuracy and better alignment of energy spectra, with negligible additional memory requirements and only a small computational overhead on the neural operator. We applied HFS to different variants of ResUNet. Finally, we explore the dependency of diffusion models on the prior accuracies when integrated with neural operators. Specifically, we show that the integration of the diffusion model with neural operators equipped with HFS results in further mitigation of spectral bias without compromising prediction accuracy. We demonstrate the effectiveness of our methodology for both two-phase and single-phase flows. + +The manuscript is organized as follows. We start with providing an in-depth description about neural operators, HFS, and diffusion models in Section 2. We present the results of our + +investigations in Section 3, followed by discussion and summary in Sections 4 and 5, respectively. In the Appendix, we include more technical details and additional results. + +# 2. Methods + +# 2.1. Neural Operators + +The mathematical operator $\mathcal{N}$ that governs the temporal evolution of a time-dependent system can be expressed as, + +$$ +\boldsymbol {u} (\boldsymbol {x}, t + \Delta t) \approx \mathcal {N} (\boldsymbol {u} (\boldsymbol {x}, t)) (\Delta t), \tag {1} +$$ + +where $\mathbf{u}$ is the representative state variable(s) of interest. The objective here is to train a neural operator $\mathcal{F}_{\theta}$ to learn the true underlying operator $(\mathcal{N})$ by, typically, minimizing the mean of an error norm such as $\| \pmb {u}(\pmb {x},t + \Delta t) - \mathcal{F}_{\theta}(u(\pmb {x},t))(\Delta t)\| _2$ . + +In this work, we focus on resolving solutions in pool boiling problems and single-phase turbulent flows. We start our analysis with pool boiling problems. Then, we investigate the application of our method on single-phase turbulent flows. There have been several efforts to use neural operators to learn temperature and flow dynamics in two-phase flow problems. Here, we demonstrate the advantage of using the ResUNet structure compared to previously developed neural operators such as UNet and FNO for two-phase flow problems with high-frequency features [48]. The models are trained to predict future temperatures based on temperature history and velocity information. The problem configuration is shown in Equation 2, where $x$ is the spatial mesh, $T$ is the temperature, $V$ is the velocity, $k$ specifies the prediction time interval length, and $\mathcal{F}_{\theta}$ is the trained neural operator. + +$$ +T (\boldsymbol {x}, t: t + k \Delta t) = \mathcal {F} _ {\theta} (T (\boldsymbol {x}, t - k \Delta t: t), V (\boldsymbol {x}, t - k \Delta t: t + k \Delta t)) \tag {2} +$$ + +UNet with residual blocks (ResUNet) was first introduced for a semantic segmentation task by imposing skip connections between convolutional layers in a UNet-like structure [49]. We use the same idea to add skip connections in the form of residual blocks to both the encoder and decoder side of the UNet. The residual blocks have been shown to mitigate vanishing gradient problems by offering a smoother optimization landscape [50]. We also demonstrate that they help with the better flow of information in the network for complex datasets such as two-phase flows, which results in a better capture of localized features, possibly reducing the spectral bias towards low-frequency components. We also introduced several modifications, such as the GELU activation function and group normalization, that demonstrated superior prediction accuracy. We used the mean squared error (MSE) loss function in all prediction time steps (Equation 3) as the objective criterion to train the model, i.e., + +$$ +L (\theta) = \frac {1}{N _ {u} k} \sum_ {i = 1} ^ {N _ {u}} \sum_ {j = 1} ^ {k} \| T ^ {i} (\boldsymbol {x}, t + j \Delta t) - \mathcal {F} _ {\theta} (T ^ {i} (\boldsymbol {x}, t)) (j \Delta t) \| _ {2} ^ {2} \tag {3} +$$ + +We used the Lion optimizer [51] to perform the optimization as we observed superior performance with this optimizer compared to the conventional Adam optimizer [52]. More details about the ResUNet structure, the training hyperparameters, and comparison with UNet predictions are included in Appendix A. + +We evaluated our baseline neural operator on both saturated and subcooled pool boiling datasets from the BubbleML data repository, which is generated through Flash-X simulations + +[53] and were collected in a previous study [48]. It should be noted that predictions in sub-cooled boiling is more difficult due to the vortices generated by condensation trails. Therefore, the errors are higher in subcooled boiling predictions, and the results look more over-smoothed compared to saturated boiling prediction results. A visualization of the subcooled boiling prediction results is shown in Appendix A. A comprehensive comparison of our baseline model with the previous best baseline model developed by [48] is included in Table 1 and Table 2 for saturated and subcooled pool boiling dataset, respectively. The ResUNet improves the resolution of high-frequency features, resulting in higher prediction accuracy. We note that given the possible differences in the testing dataset, the one-to-one comparison with the previously reported numbers may not be fair. Therefore, we trained and tested the previously reported best model (e.g., UNet) with our dataset configuration, which consists of a larger test dataset and smaller training dataset compared to the previous work. + +We evaluated our model using six different field metrics relevant to two-phase problems. These metrics include relative error (Rel. Error), root mean square error (RMSE), boundary RMSE (BRMSE) showing the error on the boundaries, bubble RMSE showing the error in the bubble areas and at the interfaces, mean maximum error $(\mathrm{Max}_{\mathrm{mean}})$ showing the mean of the maximum error for each prediction, and overall maximum error $(\mathrm{Max}_{\mathrm{max}})$ showing the maximum error over the test dataset. We also evaluated the predictions in three different frequency bands using spectral errors at low frequency $(F_{\mathrm{low}})$ , medium frequency $(F_{\mathrm{mid}})$ , and high frequency $(F_{\mathrm{high}})$ . Exact definitions of BRMSE and bubble RMSE, as well as spectral errors are described in Appendix B. All the metrics are computed on the normalized dataset $(T^{i}(\boldsymbol{x},t + j\Delta t)\in [-1,1]\forall \{i,j\})$ . For all the results, the state of the temperature at five future time-steps are predicted based on five time-step previous temperature history and the velocity information in two spatial dimensions. + +Table 1: Saturated pool boiling temperature prediction errors. The training dataset consists of simulations from 11 different wall temperatures. The test dataset consists of simulations with two other wall temperatures $(70^{\circ}\mathrm{C},$ and $95^{\circ}\mathrm{C})$ not seen during training. + +
UNetResUNet
Rel. Error0.01910.0149
RMSE0.01890.0148
BRMSE0.05820.0364
Bubble RMSE0.1160.0726
Maxmean0.7050.553
Maxmax1.2041.154
Flow0.1050.0745
Fmid0.1130.0919
Fhigh0.02380.0182
Parameters [Millions]7.83.5
+ +Table 2: Subcooled pool boiling temperature prediction errors. The training dataset consists of simulations from eight different wall temperatures. The test dataset consists of simulations with two other wall temperatures (95°C, and 98°C) not seen during training. + +
UNetResUNet
Rel. Error0.05160.0295
RMSE0.05010.0288
BRMSE0.1390.0646
Bubble RMSE0.2690.127
Maxmean1.1410.837
Maxmax2.2791.433
Flow0.3460.157
Fmid0.3670.197
Fhigh0.05830.0370
Parameters [Millions]7.83.5
+ +The results in Tables 1 and 2 demonstrate that all the metrics are improved by simply introducing residual blocks in the network, a better optimizer, and a better normalization. For example, there is approximately $21\%$ and $42\%$ reduction of RMSE in saturated and subcooled boiling, respectively. Interestingly, the ResUNet achieves better accuracies with less than half of the number of parameters in UNet. Most of the prediction errors occur within the bubble areas and at the condensation trails. This is due to the larger gradients in the bubble areas and around condensation trails resulting into more complex patterns that are more challenging to capture with the neural operator. This is expected as the neural operators are known to have spectral bias to low-frequency modes. The high-frequency content typically exists in regions with significant gradients such as interfaces and condensation trails. In subcooled pool boiling, departing bubbles may condense after departure, creating vortices that gradually dissipate over time. These vortices form complex structures containing higher energy at high frequencies. As a result, subcooled boiling presents greater prediction challenges compared to saturated boiling. For instance, prediction spectral errors $(F_{\mathrm{low}}, F_{\mathrm{mid}}, F_{\mathrm{high}})$ are approximately two times higher in subcooled boiling, highlighting the increased complexity with the high-frequency content. + +While the residual blocks improve the neural operator's ability to reduce field errors (e.g., RMSE) and over-smoothing of certain high-frequency contents, the results still suffer from significant over-smoothing (see Appendix A). Previous studies have also shown the oversmoothing issue of convolutional based neural operators for image generation tasks and scientific computing [54, 55]. Other studies demonstrated the frequency selectiveness of convolutional neural network (CNN) architectures resulting in different learning rates for low and high-frequency components [56, 57]. Wang et al. [58] demonstrated the spectral bias in vision transformers (ViT) through Fourier analysis. They showed that the problem arises by self-attention layers that act as low-pass filters and continuously reduce high-frequency information with the network depth. A feature scaling technique was proposed to decompose the attention output signal into direct and high-frequency components and scale them separately to adjust the proportion of different frequencies of the signal. We draw inspiration from this technique and propose a similar approach to separately scale low frequency and high-frequency components of the features in the latent space of the neural operator to mitigate spectral bias. + +# 2.2. High-frequency scaling + +As discussed in Section 2.1, neural operators suffer from spectral bias. While residual blocks offer improvements up to some extent, they cannot effectively mitigate the spectral bias inherent in the neural operators. Hence, we propose the high-frequency scaling (HFS) approach to be applied to the output of convolutional layers. The latent feature map of each convolutional layer is first divided into non-overlapping patches, similar to the first step in vision transformers. This will break down the spatial dimensions into smaller regions, which empirically will allow for better localized processing. We consider the mean of the patches as the direct component (DC) of these signals. Then, the high-frequency component (HFC) for each patch can be defined as the difference of each patch with the DC. It should be noted that here the DC is calculated across the patches and not individually for each patch. Then, we introduce two parameter groups of $\lambda_{\mathrm{DC}}$ and $\lambda_{\mathrm{HFC}}$ to separately scale the DC and HFC for each patch. We then re-assemble the patches to the original latent feature size before the next operation. + +A more rigorous description of the method is as follows: Let $X \in \mathbb{R}^{H \times W \times C}$ be the output feature map of a convolutional layer, where $H$ , $W$ , and $C$ are the height, width, and number of channels, respectively. We divide $X$ into $N$ non-overlapping patches of size $p \times p$ denoted as $X^{(i)} \in \mathbb{R}^{p \times p \times C}$ , where $i \in [0, N]$ . The DC is defined as the mean patch across all $N$ patches as shown in Equation (4). The HFC calculation for each patch and the scaling step are shown in Equations (5-6): + +$$ +D C (X) = \frac {1}{N} \sum_ {i = 1} ^ {N} X ^ {(i)}, \tag {4} +$$ + +$$ +H F C \left(X ^ {(i)}\right) = X ^ {(i)} - D C (X), \tag {5} +$$ + +$$ +\hat {X} ^ {(i)} = X ^ {(i)} + \lambda_ {D C} \odot D C (X) + \lambda_ {H F C} \odot H F C (X ^ {(i)}). \tag {6} +$$ + +The scaled feature map can be then reconstructed by re-assembling the $\hat{X}^{(i)}\mathrm{s}$ + +The scaling parameters $\lambda_{DC} \in \mathbb{R}^{1 \times 1 \times C}$ and $\lambda_{HFC} \in \mathbb{R}^{1 \times 1 \times C}$ are left to be learnable parameters that are optimized using gradient descent simultaneously with the network optimization. Here, we initialized the parameters to be one and optimized them with the same learning rate used for network training. In ResUNet structure, HFS is applied to the output of both convolutional layers and the skip-connection paths with $1 \times 1$ convolutions or identity skip-connections. In practice, HFS can be seen as a new module incorporated to each layer of the encoder and decoder, as shown in Fig. 1. Fig. 1 also depicts examples of the learned feature maps for models with and without HFS. The most similar feature maps between the models from the first encoder layers and the last decoder layers are depicted. The model with HFS learns features with more pronounced high-frequency content and reduced over-smoothing, which possibly enhances the capture of high-frequency components of the solution and mitigates spectral bias of the neural operator. A summary of the improvements in prediction accuracy achieved through HFS is provided in Appendix C. + +![](images/cb76dc3b8182510948260c393fd0b1837e9635110ffd915e5f5771e0712f0739.jpg) +Figure 1: Structure of the HFS-enhanced NO. (a) Schematic of the HFS module (right) integrated with the residual block (left). (b) Structure of the ResUNet with the HFS modules (blocks in front of conv layers). (c) An example of a learned latent space feature from the first layer of the encoder trained with and without HFS. The most similar feature maps of the models in the first encoder level are shown. (d) An example of a learned latent space feature from the last layer of the decoder trained with and without HFS. The most similar feature maps of the two models at the last decoder level are shown. (e-f) Examples of temperature prediction with NO and HFS-enhanced NO at two different time-steps. A region with high-frequency features (top right corner) is zoomed in for better visualization. + +![](images/686e2d01f5e6a349e72871fbccfb325e6cbbd5c81366e8d859267b2c0dfd9c41.jpg) + +![](images/0b11dca30d91ab7cc7f583b42ca3b48537df8a367e51724eab149a411f6447b1.jpg) + +![](images/86cb337727b5fed499f55faf0f9b8ba65d56df1ad1aefcc124e2d92f7f7ac2c2.jpg) + +![](images/4c7260a04619c0b1d7dbd6af86e9d60a152d47c4eda51b99716497509cce91f6.jpg) + +![](images/5920e7c922da97a60a54c9bbdc9099dafa8582f64a270ac461208c70d6889c24.jpg) + +# 2.3. Diffusion Model + +As discussed earlier, the NO and the HFS-enhanced NO learn the solution by minimizing some variant of the Euclidean distance, such as MSE, RMSE, relative $L^2$ or relative $L^1$ norms of the errors, between the true and predicted states. Unfortunately, such a loss function effectively prioritizes the error at those wavenumbers that bear higher energy. The systems considered in this study exhibit a decaying energy spectrum, implying that the lower wavenumbers carrying higher energy will be over-represented, while the higher wavenumbers that bear lower energy will be ignored due to its minimal influence on the Euclidean distance-based loss function. The recent efforts aimed at improving the spectral bias of NO using GenAI algorithms, discussed in Section 1, motivated us to explore this route. Specifically, we investigate if diffusion models [59] can help further refine the predictions estimated by NO and HFS-enhanced NO. + +Diffusion models (DM) are generative frameworks capable of producing samples that align with the true underlying function distribution, $\mathcal{T}$ , given a limited set of observations from $\mathcal{T}$ . These models achieve sample generation by progressively refining a simple prior distribution, such as a standard normal distribution $(\Gamma_0 = \mathcal{N}(0,I))$ , into the desired complex distribution $(\Gamma_N \approx \mathcal{T})$ over $N$ iterative steps. + +The diffusion process begins with an initial sample $\mathbf{X_0}$ drawn from $\Gamma_0$ and subsequently predicts $\mathbf{X}_1$ . Since $\Gamma_0 = \mathcal{N}(0,I)$ , obtaining $\mathbf{X}_0$ is straightforward. The model then iteratively refines the sample, estimating $\mathbf{X}_{i + 1}$ from $\mathbf{X}_i$ over $N$ steps. However, a key challenge arises on how to train the diffusion model to transition from $\Gamma_0 = \mathcal{N}(0,I)$ to $\Gamma_N\approx \mathcal{T}$ when intermedi- + +ate distributions $\Gamma_{i}$ for $i = \{1,2,3,\ldots ,N - 1\}$ are not explicitly available. This challenge is addressed using denoising score matching combined with Langevin dynamics [60]. The objective of a score-based diffusion model is to estimate the score function, which is defined as $s_{\theta_D}(\mathbf{X}) = \nabla_X\log p(\mathbf{X})$ , where $\theta_{D}$ represents the parameters of the diffusion model and $p$ is the probability density of $\mathbf{X}$ , where $\mathbf{X}$ corresponds to continuous realizations of $\mathbf{X}_i\sim \Gamma_i$ . Since the exact data distribution is unknown and may reside on a lower-dimensional manifold, the score function can become ill-defined in regions lacking data. To mitigate this issue, Gaussian noise is added to perturb the data, ensuring a well-defined score function across the entire space by smoothing the distribution. The score function provides a directional gradient toward regions of higher probability. However, a direct mechanism to sample from the learned distribution is still absent. + +This limitation is overcome using Langevin dynamics, as proposed in [61]. Langevin dynamics ensures that the generated samples converge to the true underlying distribution by balancing deterministic motion, driven by the gradient of the log probability, with stochastic exploration introduced by noise. In our approach, we condition the score function on the output of the pre-trained NO, $\mathcal{F}_{\theta}$ , leading to the modified score function: + +$$ +s _ {\theta_ {D}} (\mathbf {X}, \sigma , \mathcal {F} _ {\theta}) = \nabla_ {X} \log p (\mathbf {X} | \mathcal {F} _ {\theta}), \tag {7} +$$ + +where $\sigma$ represents the noise level. This conditioned score function guides the DM to sample from the posterior distribution of $\mathbf{X}$ given $\mathcal{F}_{\theta}$ , ensuring that the generated samples are consistent with both the structures imposed by $\mathcal{F}_{\theta}$ and the true data distribution. The update rule for Langevin dynamics is given by: + +$$ +\mathbf {X} _ {j + 1} = \mathbf {X} _ {j} + \frac {\varepsilon}{2} s _ {\theta_ {D}} (\mathbf {X} _ {j}, \sigma_ {j}, \mathcal {F} _ {\theta}) + \sqrt {\varepsilon} z _ {j}, \tag {8} +$$ + +where $\varepsilon$ is the step size, $z_{j}$ is the noise component, and $\sigma_{j}$ denotes the noise scale at iteration $j$ during the sampling process. The iterative denoising of the noised states by a diffusion model conditioned on the outputs of a pre-trained HFS-enhanced NO is illustrated in Fig 2. + +During training, the diffusion model learns to denoise the state of the system perturbed by a noise with zero mean and $\sigma$ standard deviation, where $\ln \sigma \sim \mathcal{N}(-1.2, 1.2^2)$ . When $\sigma$ is small, the score function $s_{\theta_D}$ increasingly focuses on reconstructing high-frequency details and vice versa. In this manner, the diffusion model learns to perturb and reconstruct the signal at multiple scales, unlike the NO whose scale is fixed throughout its training, and thereby learns the structure of the underlying system across all the scales. Our implementation of the DM conditioned on the NO and HFS-enhanced NO is based on [47] that adopts the training, network architecture, pre-conditioning, and sampling routine proposed in "EDM" [62]. + +![](images/faa78b851598a3a8e280e4c419776b1b8ff245583ac31e14424ca9a458edbe62.jpg) +Figure 2: Mitigating Spectral Bias with Diffusion Model. The states estimated by the NO exhibit oversmoothing. They serve as the prior that conditions the DM, which in turn reconstructs the missing frequencies iteratively through conditional sampling. The results are based on a NO with 2 million parameters. + +# 3. Results + +# 3.1. HFS-enhanced NO for two-phase flow problems + +We first conduct several experiments with different variants of ResUNet to demonstrate the advantage of HFS in spectral bias mitigation for two-phase flow operator learning problem. Given the higher complexity of subcooled boiling data compared to saturated boiling data, we will focus on the subcooled boiling experiments. Examples showing the saturated boiling predictions are shown in Appendix D. Given the flexibility of our NO structure, we investigated different variants of ResUNet by varying the NO size by changing the number of parameters in the range of $\sim 2$ to $\sim 16$ million parameters. The number of parameters was changed by simply changing the number of latent feature maps at each level of the ResUNet structure. The number of downsamplings/upsamplings was kept at five steps for all the models to achieve spatially consistent resolutions at each level across all the NOs. The subcooled pool boiling dataset + +consists of 10 different simulation trajectories, two of which were used for testing. Each simulation trajectory consists of 201 time-steps. However, similar to [48], the first 30 unsteady time-steps were not included in the training and testing of the models. Fig. 3 demonstrates the variation of RMSE, BRMSE, bubble RMSE, and $\mathrm{Max}_{\mathrm{mean}}$ metrics with NO size for results obtained from NO and HFS-enhanced NO. As expected, both NO and HFS-enhanced NO exhibit error-decreasing trends with the number of parameters. However, the HFS-enhanced NO always yields lower errors compared to NO in all metrics and irrespective of the NO size. The effect of HFS is more pronounced in the bubble RMSE due to larger high-frequency content at the bubble interface and within the bubbles. For example, HFS yields $8\%$ improvement in RMSE for the 16 million NO. This improvement is $16\%$ for the bubble RMSE metric. On average, HFS decreases the RMSE and bubble RMSE by $12.4\%$ and $18.2\%$ , respectively. + +![](images/404739d4e1a7bce81a61b07954c246561d4c0b5d4e5df4224d919983d7e0719e.jpg) + +![](images/ce9d9915b51cc6b01cef8d4fd9ad5004076ee8c64d31f8dc18cd381d62f2850c.jpg) + +![](images/8b521be2ad6db402353a683c8e72a14a74947a986a7ba99f5da56787754d3d44.jpg) +Neural Operator Size (Millions) + +![](images/5bd552819042709e8514a471c06051655dec81d3d173212e98e78c9851f40c04.jpg) +Neural Operator Size (Millions) +Figure 3: Temperature prediction errors of NO and HFS-enhanced NO varying with NO size. (a) Root mean square error (RMSE), (b) Boundary RMSE (BRMSE), (c) Bubble RMSE, (d) Mean maximum error. All the errors are calculated over the 5 time-step temperature predictions. The legends in (a) are applicable to (b - d) as well. All the results are based on test dataset in subcooled pool boiling. + +# 3.2. Spectral analysis of HFS-enhanced NO + +HFS reduces the over-smoothing effect, hence, the intricate features of vortices induced by condensation trails in subcooled boiling are better resolved. Moreover, HFS results in better alignment of the energy spectra to the ground truth signal, especially at high wave numbers attributed to the high frequency features. Fig. 4 depicts the enhancements obtained by adding HFS modules to NO. The prediction results of HFS-enhanced NO are improved compared to NO for all time-steps. However, the enhancement is more pronounced at later time-steps, where the NO predictions are significantly over-smoothed. + +![](images/629dde0d1af87ae717d9f62eb3415d917ebe53718ff14baae00b6da5edc7a8fa.jpg) +Figure 4: Subcooled pool boiling transient temperature prediction. (a) Ground truth (GT) temperatures for 5 consecutive time steps (from left to right) $(\Delta t = 8$ ms). (b) NO prediction results. (c) HFS-enhanced NO prediction results. (d) The corresponding energy spectra $(p(k))$ for each time step. For better visualization, the subplots in (d) show the energy spectra only for the high wavenumbers. The legends in first plot are applicable to other plots as well. All the results are based on a $\sim 3.5$ M parameter NO. + +The average energy for the high-frequency component of the latent features (e.g., excluding the first $12.5\%$ frequencies at the full resolution) is generally higher for HFS-enhanced NO. This behavior is specifically seen in all the encoder layers and the last three layers of the decoder for a five-layer decoder and five-layer encoder (e.g., five downsampling and five upsampling steps). The first two layers after the bottleneck are at very low spatial resolutions and may not represent any useful spectral information. However, more high-frequency component is generated in the later stages of the decoder that are closer to the output. The NO decoder mean feature maps at each layer show low-contrast regions at both left and right side of the maps, starting from layer two to the end. However, these regions are diminished when HFS is used, showing that a more diverse set of features is generated in the decoder (see Appendix E). However, the same behavior does not necessarily exist for the encoder mean latent features, suggesting that the mean feature map may not be a good representative of the high-frequency component. Instead, analysis of individual feature maps appears to be a more appropriate approach in this case. + +frequency components when HFS is integrated in the NO structure. Fig. 5 depicts examples of latent features from the first layer of the encoder and the last layer of decoder. These layers are specifically chosen due to their proximity to the input and output layers, making the visualizations more understandable. When comparing similar latent feature maps, HFS reduces the excessive smoothing and increases the high-frequency component within the features in the latent space. The energy spectra plots in Fig. 5 demonstrate similar trends for both NO and HFS-enhanced NO with the later having larger spectral energy at the mid and high wave numbers (e.g. $k > 20$ ). For a more robust spectral analysis of latent features, we compared the individual latent features in the NO and HFS-enhanced NO with both $\sim 3.5$ and $\sim 16$ million parameter models. The HFS-enhanced NO decreases the over-smoothing in latent features when compared with a similar feature map from NO. The normalized energy spectra of these latent features exhibit larger high-frequency component with HFS-enhanced NO. This is evident in Fig. 5(b, d, f, and h), where the HFS-enhanced NO curves surpass the NO curves after a certain wave number. + +Comparison of the ratio of high-frequency component energy when calculated separately for each latent feature and then averaged over all the features at each layer in the encoder also shows consistently higher values when HFS is used. The same trend is also observed in the last three layers of the decoder. These results are shown in Fig. 5i and 5j. We observed similar trends for other samples where the ratio of high-frequency component energy to total energy in the latent space is higher when HFS is integrated with the NO. However, this advantage may not be noticeable using the mean latent feature visualization at each layer. Note that for the analysis presented in Fig. 5i and 5j, we progressively increased the threshold (from $12.5\%$ to $50\%$ ) for separating the low and high-frequency bands as the spatial dimension in the latent space decreases. This result is based on a random sample from the test dataset. Similar results were obtained with other samples. It should be noted that a one-to-one comparison of similar feature maps may provide a more reliable assessment, as not all feature maps carry equally significant information and some might be irrelevant for our analysis. + +In general, the HFS-enhanced NO contains more high-frequency component in the latent space, which can help with the propagation of high-frequency information to the output, helping with the better capture of high-frequency features. The enhancement in high-frequency component is achieved without any degradation in the low-frequency components. Therefore, both field errors such as RMSE, and the spectral errors are improved (see Appendix C). + +![](images/bd897e21140790da8c2beed7842475c14e96f323cdd2ec392c15e182b1ce4a42.jpg) +(a) +NO + +![](images/26821bc824c2e55cf6de7c68946bbee89a3839676996ae1a9d5b3a48a7ec2804.jpg) + +![](images/05fba3cc6e4ff3f30a68095ffe33390bd08dc560864db76ec9a2afb800f5746a.jpg) +NO + HFS + +![](images/f60206ede39643fc5c044f9f4610b8380888747ad4c4e5778e0eb56aee2fce8c.jpg) +(b) +(d) + +![](images/32ecc7c14abb8460e4d6c4033cf8cd415b55ec2ac589b266e28f285a8d99e10a.jpg) +(e) +(g) + +![](images/675a8f9336f44185e5a4a8f988d3a39818f20adc98510f75efb9bd2996a69884.jpg) + +![](images/e7f016f46bca5309b8f0172ae1f4e4919c83884da81073d94df649bce739af4e.jpg) + +![](images/7ad97d2813f0aaf6bea9c40b37d06895f3bb84041a8c318c716f437749e17e13.jpg) +(i) + +![](images/3c80f7d8bae04c690074b0bd039bbc2ccb94c86c53fb41a6d536ea842e012cfb.jpg) + +![](images/c56982feee0b6b0776494c62e84144cadf9d1c1c5ea6d8927a0b1794c6c97899.jpg) + +![](images/b862fe7585e9b671c2dd19d6695b0f98522b4600cbfca9788e317af5b5c1ff45.jpg) + +![](images/f761d30cc71f1613eca2ac1939256b2e144c0b60cd211db5ea28ffe6a928d863.jpg) +(f) + +![](images/5f217779f4ebeb8edefda5e739dab23bb283949bec47b55a0e34af427e3e5413.jpg) +(h) +Figure 5: Latent space features in HFS-enhanced NO. (a, b) Example of latent space feature in the first layer of encoder and the corresponding normalized energy spectra $(p(k))$ in the $\sim 3.5$ million parameter models. (c, d) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra for the model with $\sim 3.5$ million parameters. (e) Example of latent feature in the first layer of encoder and the corresponding normalized energy spectra in the $\sim 16$ million parameter models. (g) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra in the $\sim 16$ million parameter models. (i-j) Average ratio of high-frequency energy to total energy at each layer in encoder (i) and decoder (j). Note that the low-frequency cutoff is set to the first $12.5\%$ , $18.75\%$ , $25\%$ , $37.5\%$ , and $50\%$ of the wavenumbers, from highest to lowest spatial resolutions (384 to 24 pixels), respectively + +![](images/8d4ce1826ae5537dfd8426399bd05bca2a0010646e57a820e373e7035e1a5285.jpg) +(i) + +Given the advantage of HFS in the mitigation of spectral bias towards low-frequency components, it is natural to calculate the prediction errors at different wavenumbers. Following the terminology proposed in [48], we divided the frequencies to three components including only low-frequency component (low $F$ ), mid-frequency component (mid $F$ ), and high-frequency component (high $F$ ). For all the NOs with varying number of parameters, the errors in the mid $F$ and high $F$ components are always lower for HFS-enhanced NO. The RMSE for the low $F$ component is lower for HFS-enhanced NO with one exception in the NO with $\sim 3.5$ million parameters. We attribute this to the larger enhancement observed in mid $F$ and high $F$ of the 3.5 million parameter HFS-enhanced NO, causing the operator showing larger error in the low $F$ as it fails to reduce the errors in all three components simultaneously. Visualization of each frequency component and the average spectral errors in each component are shown in Fig. 6. + +![](images/62f709bfa82bfda8d899c4456f8d18cb4aaae24a3a3354dc86d148ef45498e67.jpg) +Figure 6: Impact of HFS on spectral errors at different frequency bands (a-b) Examples showing the input, low, mid, and high-frequency contents of the input. (c-e) Spectral error $(F$ . Error) of low, mid, and high-frequency bands over the test dataset. For these results, the low-frequency cutoff is set to the first $2\%$ of the frequencies. The mid frequency band includes the first $6.2\%$ of the frequencies excluding the first $2\%$ . The high-frequency band includes the last $93.8\%$ of the frequencies. + +# 3.3. HFS parameter history + +The DC and HFC of the signals are scaled using two learnable parameters, $\lambda_{DC} \in \mathbb{R}^{1 \times 1 \times C}$ and $\lambda_{HFC} \in \mathbb{R}^{1 \times 1 \times C}$ . These parameters remain consistent across all patches in each latent space feature map, and also across all batches of the dataset. Therefore, the parameters are optimized based on all the samples in the training dataset. However, they are allowed to vary freely across the feature channels at each layer. This design enables the model to adaptively scale each channel based on its content. For instance, a feature channel with a larger high-frequency component can be scaled differently than a smoother feature channel. This flexibility enhances the effectiveness of HFS while minimizing the computational costs and reducing the optimization burden by maintaining fixed parameters across patches and samples. To better understand the learning process of $\lambda_{DC}$ and $\lambda_{HFC}$ , the histories of these parameters during the training phase in each of the encoder and decoder layers are shown in Fig. 7. The results in Fig. 7 show the mean $\lambda_{DC}$ and $\lambda_{HFC}$ across all latent features at each layer. The mean $\lambda_{HFC}$ is always larger than the mean $\lambda_{DC}$ , demonstrating that the model is learning to scale HFC with larger weights, enhancing the representation of the HFC. Also, the optimized mean $\lambda_{HFC}$ is higher in the deeper layers of the encoder. However, no such behavior is observed in the decoder. Another interesting observation is that the abrupt change in the slope of the $\lambda_{DC}$ history curves + +( $\sim$ iteration $160 \times 10^{3}$ ) aligns well with the iteration when overfitting starts. After this iteration, the error over training dataset keeps decreasing but the error over validation dataset increases, leading to larger generalization gap. The dashed lines in Fig. 7 specify the iteration at which the validation dataset error is minimum. + +It should be noted that $\lambda_{DC}$ and $\lambda_{HFC}$ are both free of any constraints and are automatically learned during the model optimization. However, comparing the final values of these parameters align well with the heuristic viewpoint proposed in our work. The larger values of $\lambda_{HFC}$ imply that the HFC of the signals are better preserved and propagated through layers with HFS. This could explain why the HFS-enhanced NO results resolve high-frequency features better, and why the spectral bias of the NO is mitigated. + +![](images/836bc2f95d5c77926d61af7c14d4b1113300a5ed667f5ba49106606f9e5c07d1.jpg) + +![](images/dda5aca73cc52ff6e026a688772971bb0db28707f5dbb8771a941a8018016d77.jpg) + +![](images/eac42b74e203c3984876f446ef93ec5dd5fca48a451c96c024fa9dd7291c3fc6.jpg) +Figure 7: $\lambda_{DC}$ and $\lambda_{HFC}$ histories during training phase of the HFS-enhanced NO. (a, b) $\lambda_{DC}$ and $\lambda_{HFC}$ training history in all 5 layers of encoder. Note that layer 1 and layer 5 are defined as layers at highest and lowest spatial resolution, respectively, in the encoder. (c, d) $\lambda_{DC}$ and $\lambda_{HFC}$ training history in all 5 layers of decoder. Note that layer 1 and layer 5 are defined as layers at lowest and highest spatial resolution, respectively, in the decoder, which is the opposite terminology used in encoder. The dashed lines specify the iteration from which overfitting on the training dataset starts. The results are based on the training of a model with $\sim 1.7$ million parameters and $\lambda_{DC}$ and $\lambda_{HFC}$ were initialized at 0.85 and 1.15, respectively. + +![](images/1a0ef49c31cd391865df11f05db9cb02c6029f97bb662e0074c4d4064dacdf5c.jpg) + +# 3.4. Kolmogorov flow + +To evaluate the effectiveness of HFS in mitigating spectral bias in a more chaotic system, we applied it on the prediction of a standard benchmark, namely the 2D Kolmogorov flow problem. This problem is governed by the unsteady and incompressible Navier-Stokes equations for a viscous fluid subject to a forcing term. The vorticity form of the problem is defined in + +Appendix H. We generated the dataset [63] using a publicly available pseudo-spectral solver [6]. The dataset consisted of 1000 samples with $80\%$ , $10\%$ , and $10\%$ of them being used for training, validation, and testing respectively. We trained the NO with and without HFS to learn the mapping $\omega(x,y,t)\big|_{t\in[0,10]} \to \omega(x,y,t)\big|_{t\in[10,t_{final}]}$ , where $\omega$ is the vorticity. Here, we used $t_{final} = 12.5$ s, and a NO with $\sim 1.7$ million parameters as the benchmark. We optimized the hyperparameters based on the NO performance without HFS and then used the same hyperparameters for training the NO with HFS. This ensured that any improvement achieved with HFS was solely attributed to its effect and not simply due to differences in optimization strategies or hyperparameters. Although not specifically designed for turbulent problems, the HFS-enhanced NO demonstrated improvements over the NO for the 2D Kolmogorov problem, reducing the relative error from $5.3\%$ to $4.7\%$ . Comparison of the energy spectra of the HFS-enhanced NO predictions also demonstrated better alignment with the ground truth solutions at high wavenumbers. The prediction results for snapshots chosen through random sampling from the test dataset are shown in Fig. 8. High-frequency features are more accurately captured and the energy spectra alignment at high wavenumbers is enhanced with the HFS-enhanced NO. We should acknowledge that HFS was effective for this problem only when the NO already provided reasonably accurate predictions. If the NO produced extremely over-smoothed predictions, integrating HFS offered little to no improvement. More detailed results showing the temporal predictions are shown in Appendix H. + +The improvements in predicting Komogorov flow are less pronounced compared to the two-phase flow problem. This is due to the different underlying structures of the solution maps. The HFS approach operates by decomposing the feature maps into low-frequency and high-frequency components through observing the patches as different signals. This approach is most effective for the data with localized features, making the DC and HFC of the signals significantly different. For example, this is true for the subcooled pool boiling dataset with localized features at the bubble interface and condensation trails. For the data with similar features across all regions, the distinction between DC and HFC diminishes, thus reducing the impact of HFS. + +![](images/90f68fd7a6962a77f49e5e72d7250151fc08ad8098feb7692cd4ae7c3d8d8efd.jpg) +Figure 8: HFS-enhanced Kolmogorov flow predictions. (a-c) denote different samples chosen randomly from the test dataset. Each example shows the ground truth (GT), NO and HFS-enhanced NO predictions along with the energy spectra $(p(k))$ for each prediction. + +# 3.5. Diffusion Model Results + +We investigated further mitigation of spectral bias using the score-based diffusion model (DM) with HFS-enhanced NO predictions as the prior. Specifically, we first conducted a systematic study to investigate the effect of NO prediction accuracy, obtained by varying the number of parameters in the NO, on the diffusion model performance. Second, we demonstrated that using HFS-enhanced NO can further help the diffusion model to match the correct energy spectra of the solutions without degrading the mean prediction errors. Since the NO predictions are used as priors to diffusion model, the accuracy of diffusion model predictions is strongly influenced by the reliability of these priors. For example, if the prior information is significantly erroneous or over-smoothed, then the diffusion model struggles to accurately recover the missing frequencies without compromising the accuracy of the mean predictions. + +Fig. 9 shows the subcooled pool boiling prediction results of DM conditioned on NO and + +HFS-enhanced NO predictions. Other prediction examples with DM integrated with NO and HFS-enhanced NO are visualized in Appendix F. When the NO predictions have significant errors, the DM can barely mitigate those errors. However, when HFS is integrated with the NO, the significant errors at large structures are reduced, and high-frequency components of the solutions are captured more accurately compared to $\mathrm{NO} + \mathrm{DM}$ predictions. In addition, when the DM is integrated with the HFS-enhanced NO predictions, the DM is able to more accurately reconstruct intricate features that are already enhanced through more accurate predictions provided by HFS-enhanced NO. Therefore, less over-smoothing is observed in the $\mathrm{NO} + \mathrm{HFS} + \mathrm{DM}$ predictions and spectral bias is further reduced. It can be seen that both HFS and DM are helping with the capture of high-frequency features. DM cannot fix significant errors caused by NO predictions at large scale features (e.g., bubble interfaces). However, HFS reduces the errors around large scale features in addition to enhancing the smaller scale features. When DM is integrated with HFS-enhanced NO, it further enhances the small scale features. The quantitative metrics are shown in Fig. 10. It should be noted that the models are trained with a different set of hyperparameters for the results shown in Fig. 10 compared to the previous results (Fig. 3). However, HFS enhanced the prediction results of NO, irrespective of hyperparameters (either optimal or non-optimal hyperparameters), as long as the same hyperparameters are used for training both NO and HFS-enhanced NO. + +![](images/f77d7cde018d6c74bb0d627654ab043227cdb2f775c121acb59c5727d34539a7.jpg) +(a) + +![](images/14d2c43c9f26237111d97d362572890aad6cd1ac9400cb8eb6973553a8a9c8d1.jpg) + +![](images/824288cda52c2891f4d10fcc048556f5025e12a9a08916a114c2879b54a5a4e0.jpg) + +![](images/d434f1115c71df8fc03f6ff24ee680bd8baf845b45ea59fc5cacb1faa46bfbf1.jpg) + +![](images/cfcb1f30c856f0902c80a6ca75eb09c6526ab9c4fc6d870998c725e3266546de.jpg) + +![](images/4b5053f22f5d1045d4bc1f94ad7a3d9e743281a60a70f0fe826b8afd6b118513.jpg) +(b) + +![](images/4077f969a3d5c28f84a171a7ac2a8f647114a1140ff1cab20e790c3e6be946c6.jpg) + +![](images/bfd8e918449ad4e2c5b9c9505fa08483327fc17c132d390ea6522a9ebfd8e7c7.jpg) + +![](images/2e662c6924e4dc916926501fd33cf6fb7658ebfdf77a643b1876c42b62c9c1aa.jpg) + +![](images/e9182f9b9d24aea5d10f9e8d93f5432f03b021167555925e5f3ae7c3a8199e16.jpg) + +![](images/b5e173640877cb0cef234d0b4ae167124b4fc270eef3d2f99ffc99eb772ed691.jpg) +(c) +(d) + +![](images/9dd2b8fb0cc2df44a4b3dffb169d3d12295ff795a4bc3448676f90a768ee5d25.jpg) + +![](images/21d6dea1825b413f89957217e3a9148548072fa1907281353e79782273e0fc08.jpg) + +![](images/5724b436a86300be2784fa68ea7aec5bd1eb7e60f905356e9a61d6ce2dea4070.jpg) + +![](images/7acbf978c26e0e5745e06313089bfbd821791e207c3cd601e0ff50804ab6dc4b.jpg) + +![](images/f0284ad1958098bbf58a3a6ea0491e38ff5e9e5a2a1d05a08a833dda237df2d2.jpg) +Figure 9: Visualization of the prediction by DM integrated with NO and HFS-enhanced NO. (a) Example showing ground truth (GT) solution and predictions by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Zoomed-in visualization of (a) focusing on the high-frequency contents. (c) Predictions of another randomly selected sample. (d) Zoomed-in visualization of (c) focusing on high-frequency contents. + +![](images/8f424c4056077457e9ba4f82ff71b2dceebe8e63582d2e9e2910553e7c5a91a2.jpg) + +![](images/ba25d5e98b4b75fd94526f7be742d92551619db9606fb8a8bb8ed6f2f35060f3.jpg) + +![](images/6acce8a74d6789197fed4cf6e66d8f7ce554c25121db815c611f22a624e7aa8c.jpg) + +![](images/9baf9f6b18eb233208ff948902b5fec1c3ee7677059013bbd5eea3505b509fce.jpg) + +The results presented in Fig. 9 and Fig. 10 illustrate the following key points: + +- HFS reduces the prediction errors in both physical and spectral domains, irrespective of NO size. On average, the relative errors (e.g., RMSE) and energy spectrum errors $(\mathcal{E}_F)$ (see Appendix B) are reduced by $23.5\%$ and $15.2\%$ , respectively, with HFS-enhanced NOs. +- Generally, DM does not change the prediction field errors (Fig. 10a). However, DM reduces the energy spectrum error, showing better energy spectra alignment with the correct solutions. On average, $\mathrm{NO} + \mathrm{DM}$ has $27.8\%$ lower relative $\varepsilon_{F}$ compared to NO. The only exception is the NO with 16 millions parameters. On average, $\mathrm{NO} + \mathrm{HFS} + \mathrm{DM}$ has $23.2\%$ lower relative $\varepsilon_{F}$ compared to $\mathrm{NO} + \mathrm{HFS}$ (Fig. 10b). +- HFS reduces the energy spectrum errors at all different frequency bands $(\mathcal{E}_{F_{\mathrm{low}}}, \mathcal{E}_{F_{\mathrm{mid}}},$ and $\mathcal{E}_{F_{\mathrm{high}}})$ , containing only the low, mid, and high-frequency components of the solutions, respectively. We refer to Fig. 6 for visualization of solutions at these frequency bands. However, DM does not enhance the results at $\mathcal{E}_{F_{\mathrm{low}}}$ and $\mathcal{E}_{F_{\mathrm{mid}}}$ when integrated with HFS-enhanced NO. Indeed, the results at these two frequency bands are sometimes the best + +for HFS-enhanced NO without DM, depending on the NO size. However, the advantage of DM is taken into action at $\mathcal{E}_{F_{\mathrm{high}}}$ (Fig. 10e) with improved results compared to NO and HFS-enhanced NO. This explains the role of DM in further mitigation of spectral bias. + +![](images/adcc8ac420e67795e1500fd44cec9e0c040709c3a1c3f3f3c6afdf73a317f59a.jpg) + +![](images/c61ee8c84c709ba417474b42e09eec116bd2299f768602183e70caadd447b32c.jpg) + +![](images/829637f19e543333f93c6632ec44ddd9bda78de8bfca3bc1bff0bc0e7512c61c.jpg) +Figure 10: Diffusion model prediction results. (a) Relative errors (Rel. Error) of prediction by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Relative energy spectrum errors (Rel. $\mathcal{E}_F$ ). (c) Relative energy spectrum errors in the low frequency band (Rel. $\mathcal{E}_{F_{\mathrm{low}}}$ ). (d) Relative energy spectrum errors in the mid frequency band (Rel. $\mathcal{E}_{F_{\mathrm{mid}}}$ ). (e) Relative energy spectrum errors in the high-frequency band (Rel. $\mathcal{E}_{F_{\mathrm{high}}}$ ). Low, mid, and high-frequency thresholds are set to the first $2\%$ , the first $6.2\%$ excluding the first $2\%$ , and the last $93.8\%$ of the wavenumbers. + +![](images/24e9419cdc031bbe8ee845630b852d0f762ff0f707d7ec8c2a0be986d2140e2a.jpg) + +![](images/fb0cd2d796a34b11d747483d3064e7f18b5a3e9c93c722d66bc954154eeb9ef0.jpg) + +# 4. Discussion + +HFS works by preserving more high-frequency components in the latent space after each convolutional layer in the NO. The flexibility of learning to scale the DC and HFC of the signals allows the model to enhance the predictions in mid and high-frequency contents without any degradation in the low frequency content of the solutions. As a result, both field metrics suh as RMSE and bubble RMSE, and the spectral errors are reduced in two-phase flow predictions. The enhancements observed in HFS-enhanced NO prediction results are more pronounced in areas with larger high-frequency features such as within the bubbles and at condensation trails seen in subcooled boiling solutions. This emphasizes the role of HFS in spectral bias mitigation, which helps with better capture of intricate features and sharp gradients. Similarly, both the relative errors and spectral errors are reduced, and high-frequency features are enhanced in the Kolmogorov flow predictions. + +The scaling parameters $\lambda_{DC}$ and $\lambda_{HFC}$ in the HFS method are optimized during the network training. Notably, the optimized values for $\lambda_{HFC}$ are consistently larger than $\lambda_{DC}$ , indicating that the model is trying to pay more attention to the HFC in the latent space. This biased attention helps mitigating the persistent challenge of spectral bias in the NO. To reduce the optimization burden, the scaling parameters were consistent across all the patches but were allowed to vary across different feature maps. This flexibility enables the model to automatically adjust the scaling of the HFC of the feature map depending on its content and significance. The learned $\lambda_{DC}$ and $\lambda_{HFC}$ for each of the latent feature maps in the HFS-enhanced NO with $\sim 1.7$ million parameters are shown in Appendix G. In our work, all the scaling parameters were initialized at one and they were optimized using gradient descent with the same learning rate used for training the NO ( $\sim 8\times 10^{-4}$ ). It would be interesting to explore faster convergence by using different initializations and optimization frameworks for the scaling parameters in future work. + +Another method for spectral bias mitigation is through diffusion models conditioned on NO predictions as prior information. However, using diffusion models has two drawbacks. First, the diffusion model predictions are strongly dependent on the prior information. Therefore, it can only reduce over-smoothing from reasonably accurate NO predictions. If the NO predictions are not sufficiently accurate, then the diffusion model cannot perform well. Second, training diffusion models requires extensive computational cost as each training iteration involves $n(= 32)$ auto-regressive denoising steps to estimate the state of the solution at each time-step. In our experiments, the diffusion model training cost is approximately 2 to 4 times higher than the NO training itself. On the other hand, the HFS method requires only a small additional computational cost and negligible additional memory for training along the NO. The number of parameters added by HFS modules varies depending on the underlying NO size. However, it is generally less than $0.1\%$ of the number of parameters in the NO. In our experimentation, the HFS module parameters vary between $0.018\%$ to $0.045\%$ of the number of parameters in NO, depending on the underlying NO size. Based on our experiments, the computational time for each training iteration is within $10\%$ to $30\%$ higher, depending on the NO size and the computational resource. + +In addition to the enhancements observed in field metrics such as RMSE and bubble RMSE, our investigation revealed that HFS also helps with reducing the spectral errors. We demonstrated that matching the correct energy spectra at mid and high wavenumbers is directly correlated with capturing the complex features in the solutions. We would like to emphasize the importance of considering both field errors and correct energy spectra alignment in scientific machine learning problems. The field analysis demonstrates the average performance of the predictions. However, the energy spectra analysis reveals useful information about the prediction accuracies at different frequencies and thereby explaining the possible spectral bias and loss of useful information near interfaces, vortices, and sharp gradient areas in two-phase flow and turbulence problems. It should be noted that the predictions with enhanced energy spectra alignment is beneficial when accompanied by improved mean field predictions (e.g., RMSE) and HFS-enhanced NO results satisfy this requirement. + +When aiming to scale the different frequency bands of the signals, a logical alternative would be to perform the scaling directly in the frequency domain rather than the physical domain. As a comparison, we implemented and compared scaling in the frequency domain with our proposed method (HFS). In this regard, let $\mathbf{X}^{(l)}\in \mathbb{R}^{H\times W\times C}$ be the output of $l$ -th convolutional layer, then the feature maps can be transferred to frequency domain using a 2D Fourier + +transform $(\mathcal{F})$ + +$$ +\hat {\mathbf {X}} ^ {(l)} (:,:, c) = \mathcal {F} \left(\mathbf {X} ^ {(l)} (:,: c)\right), \quad c = 1, 2, \dots , C, \tag {9} +$$ + +where $\hat{\mathbf{X}}^{(l)}\in \mathbb{C}^{H\times W\times C}$ includes the Fourier-transformed feature maps. The low frequency and high-frequency component of features maps can be generated by truncating $\hat{\mathbf{X}}^{(l)}$ at a frequency threshold of $\tau$ . We name these components as $\hat{\mathbf{X}}_{\mathrm{low}}^{(l)}$ and $\hat{\mathbf{X}}_{\mathrm{high}}^{(l)}$ . Each Fourier-transformed feature will be scaled separately: + +$$ +\hat {\mathbf {X}} _ {\text {s c a l e d}} ^ {(l)} = \lambda_ {\text {l o w}} \odot \hat {\mathbf {X}} _ {\text {l o w}} ^ {(l)} + \lambda_ {\text {h i g h}} \odot \hat {\mathbf {X}} _ {\text {h i g h}} ^ {(l)}, \tag {10} +$$ + +where $\lambda_{\mathrm{low}} \in \mathbb{R}^{1 \times 1 \times C}$ and $\lambda_{\mathrm{high}} \in \mathbb{R}^{1 \times 1 \times C}$ are learnable parameters that are optimized simultaneously with the network training. Finally, the scaled feature map is reconstructed using the inverse Fourier transform: + +$$ +\mathbf {X} _ {\text {s c a l e d}} ^ {(l)} (:, :, c) = \mathcal {F} ^ {- 1} \left(\hat {\mathbf {X}} _ {\text {s c a l e d}} ^ {(l)} (:, :, c)\right), \quad c = 1, 2, \dots , C. \tag {11} +$$ + +Our preliminary results demonstrate that scaling in the frequency domain also improves the two-phase flow prediction results, thus helping with the spectral bias mitigation. However, the enhancements are lower than the proposed HFS method, while the computational cost is significantly higher. This is due to the Fourier and Fourier inverse transforms required in this method. Consequently, we did not proceed with the second method. However, it may worth investigating this method in future work. There is one hyperparameter for each of these scaling methods. For the proposed HFS method, the patch size is the hyperparameter, and for scaling in the frequency domain the truncation frequency is the hyperparameter. A comparison of the prediction errors and computation costs of the two methods with a NO with $\sim 1.7$ million parameters is shown in Table 3. + +Table 3: Comparison of the proposed HFS (Method 1) and scaling in frequency domain (Method 2). + +
NONO + Method 1NO + Method 2
Rel. Error0.0440.0330.034
RMSE0.0430.0330.034
BRMSE0.1160.0720.076
Maxmean1.140.890.92
Parameters [Millions]1.7111.7121.712
Iteration time (s)31.434.552.6
+ +# 4.1. Effectiveness Criteria + +The HFS approach operates by spatially decomposing the features into several patches and scaling the common DC and individual HFC of the patches separately. Our investigation showed that HFS is mostly effective on datasets with localized features such as those in subcooled pool boiling dataset. For extremely chaotic systems with globally small-scale features, the DC and HFC cannot be directly separated from spatial patching as all the patches may contain similar frequency components. To better quantify this limitation, we directly applied HFS to the samples from three different case studies with inherently different features. The samples were chosen from the subcooled pool boiling, Kolmogorov flow, and a turbulent jet problem. We found that HFS is effective for the first two problems (with the effect being less + +pronounced on the later one), but is not effective for the third case. + +The turbulent jet data is from the experimental Schlieren velocimetry of turbulent helium jet in air. More details about the dataset is available in the previous work [64]. We directly used the publicly available Schlieren velocimetry dataset [64] in the raw .tif format. All the regions in the turbulent jet have similar small-scale features (see Fig. 11), which are different from the more localized features in the subcooled pool boiling and less localized features in the Kolmogorov flow. We directly applied HFS to these datasets and visualized the gradient magnitude in a region with high-frequency features. Additionally, we visualized the ratio of the gradient strength on a high frequency region with and without HFS, as defined by $\frac{|\nabla x_{\mathrm{HFS}}|}{|\nabla x_{\mathrm{baseline}}|}$ where $x$ is the chosen region, $\nabla$ is the gradient operator, and baseline refers to the case without HFS. This ratio compares the selectiveness in scaling the gradient of the features. The HFS approach is effective for cases where it can selectively scale the gradients across the localized features. In contrast, HFS may not be effective if it results in a uniform gradient scaling, as it can be seen in the sample from the turbulent jet dataset. + +Specifically, as shown in Fig. 11, the HFS approach successfully increases the gradient strength at high frequency regions in subcooled pool boiling and Kolmogorov flow. However, it scales the gradient uniformly in the turbulent jet case. Therefore, the ratio of the gradient strength with HFS to the baseline shows a less uniform solution on the subcooled pool boiling sample, followed by the Kolmogorov flow sample. However, this ratio is almost uniform for the turbulent jet case. Selective enhancement of the gradient near the edges and high-frequency features helps with the better representation of these local regions which helps the NO to better capture the high-frequency details. Since HFS is applied in the latent space, the artifacts caused by patching are mitigated and ultimately discarded in the deeper levels of the NO. + +![](images/a3920278fe9862529af92facf4befe880ca3cdb6ffd2727b6fd4c8c5dd3e3403.jpg) +(a) + +![](images/b20b61142a3bce86ad36ea7d9f2a959ee5be79e20456608ea2474ed5fc37e15f.jpg) + +![](images/3af287cf23e6f53219a3589e04c57a037bb16dd30b3685c7cc69a05bbec05bb1.jpg) + +![](images/06a9908dbf997c468a6ff901a52a01cdf5091d706ec7f0c0dae199bce729c2d8.jpg) + +![](images/e6da86530a472579fba0dd641c2521881c22ad246da13baba0f7a778d0d158dd.jpg) +(b) + +![](images/02e29bc84d1768810e1282a65f1138753dd896b1b55969f5a4838f82670d53f6.jpg) + +![](images/a2d0d2a7c6fc7b04c17becb19f33300d8aa8cf147d4405358f82dc9231fef45b.jpg) + +![](images/bde31bbafc0dd86d2cacce81eadabd84b6642b05c7685ef1db226bcddcc21013.jpg) + +![](images/b8520abce2c4b102c3587cdadf9a75faceafd934ce44f67cb0d3a2e2c69cb107.jpg) +(c) +Turbulent jet (Schlieren velocimetry) +Figure 11: HFS impact on gradient magnitude for different problems. (a) Subcooled pool boiling. (b) Kolmogorov flow. (c) Schlieren velocimetry of turbulent jet. For each case, the first column shows the sample and the chosen region with high frequency features (dashed boxes), the second column shows the gradient magnitude, the third column shows the gradient magnitude after applying HFS to the sample, and the fourth column shows the ratio of the HFS-enhanced gradient magnitude to the baseline gradient magnitude. + +![](images/b6f0c5dd6c47a2958767bffd09c994b0edb2e6ba4a6e8bf3fdcb4e17cb43447e.jpg) + +![](images/54d45f249993056243a6c7a732dd99069452cb3aadac4e89970dbe83138eb984.jpg) + +![](images/cd911c74dd595db620775568094d4217a9e0788b4a95e0be4c48f5928145f1f8.jpg) + +# 5. Summary + +In this work, we proposed a new method named high-frequency scaling (HFS) to mitigate the spectral bias in convolutional-based neural operators. We demonstrated that integrating HFS with feature maps in the latent space of the neural operator reduces the prediction errors in two-phase flow problems and the Kolmogorov flow problem. Through spectral bias mitigation, HFS helps to better capture intricate features and sharp gradients commonly seen within the bubbles and induced vortices in subcooled pool boiling problem, and the small-scale features in the Kolmogorov flow. These high-frequency features are prone to over-smoothing when predicted with neural operators without HFS. HFS-enhanced neural operators can improve neural operator performance irrespective of the neural operator size. We showed that for different variants of ResUNet with number of parameters varying within $\sim 2$ to $\sim 16$ millions, HFS consistently reduces the prediction errors. Furthermore, a better energy spectra alignment is observed for the results of the neural operator with HFS. Additionally, we showed that the diffusion model predictions are strongly dependent on the quality of the prior neural operator predictions. Therefore, it is important to improve the neural operator prediction accuracy using HFS so that the diffusion model can further recover the missing high-frequencies in the solutions. Otherwise, the diffusion model can barely improve the erroneous large features or significantly over-smoothed predictions of the neural operator. The advantages of HFS are obtained with a negligible memory requirement and a small computational cost trade-off. + +Finally, we investigated the effectiveness criteria for HFS approach by visualizing the gradient magnitudes of high-frequency regions of three different problems. We showed that HFS works the best on the subcooled pool boiling dataset due to the more localized features, which result in a selective gradient enhancement near the edges and high-frequency features. The HFS approach effectiveness decreases in the Kolmogorov flow problem, and is negligible in the turbulent jet problem. The gradient magnitude is scaled more uniformly in the Kolmogorov flow data and almost completely uniform in the turbulent jet problem, hence explaining why HFS is ineffective for this problem. + +# CRediT authorship contribution statement + +Siavash Khodakarami: Writing - review & editing, Writing - original draft, Visualization, Validation, Software, Methodology, Investigation, Formal analysis, Data Curation, Conceptualization. Vivek Oommen: Writing - review & editing, Writing - original draft, Visualization, Validation, Methodology, Investigation, Formal analysis, Data curation. Aniruddha Bora: Writing - review & editing, Writing - original draft, Validation, Methodology, Investigation. George Em Karniadakis Writing - review & editing, Writing - original draft, Supervision, Funding acquisition, Conceptualization. + +# Declaration of competing interest + +The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper. + +# Acknowledgments + +We would like to acknowledge funding from the Office of Naval Research as part of MURI-METHODS project with grant number N00014242545. The authors would like to acknowledge the computational resources and services at the Center for Computation and Visualization (CCV), Brown University. The experiments were also partly conducted using the Delta AI computational resources at the National Center for Supercomputing Applications at the University of Illinois Urbana-Champaign through allocation CIS240932 from the Advanced Cyberinfrastructure Coordination Ecosystem: Services & Support (ACCESS) program, which is supported by the National Science Foundation. + +# Data Availability + +All codes and datasets will be made publicly available at https://github.com/SiaK4/HFS_ResUNet.git upon publication. + +# References + +[1] S. K. Godunov, I. Bohachevsky, Finite difference method for numerical computation of discontinuous solutions of the equations of fluid dynamics, Matematicheskij Sbornik 47 (1959) 271-306. +[2] R. Eymard, T. Gallouet, R. Herbin, Finite volume methods, Handbook of Numerical Analysis 7 (2000) 713-1018. +[3] G. Karniadakis, S. J. Sherwin, Spectral/hp element methods for computational fluid dynamics, Oxford University Press, USA, 2005. +[4] T. J. Hughes, The Finite Element Method: Linear Static and Dynamic Finite Element Analysis, Courier Corporation, 2012. + +[5] L. Lu, P. Jin, G. Pang, Z. Zhang, G. E. Karniadakis, Learning nonlinear operators via deeponet based on the universal approximation theorem of operators, Nature Machine Intelligence 3 (2021) 218-229. URL: https://doi.org/10.1038/s42256-021-00302-5. doi:10.1038/s42256-021-00302-5. +[6] Z. Li, N. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, A. Anandkumar, Fourier neural operator for parametric partial differential equations, arXiv preprint arXiv:2010.08895 (2020). +[7] Q. Cao, S. Goswami, G. E. Karniadakis, Laplace neural operator for solving differential equations, Nature Machine Intelligence 6 (2024) 631-640. +[8] T. Tripura, S. Chakraborty, Wavelet neural operator for solving parametric partial differential equations in computational mechanics problems, Computer Methods in Applied Mechanics and Engineering 404 (2023) 115783. +[9] O. Ovadia, V. Oommen, A. Kahana, A. Peyvan, E. Turkel, G. E. Karniadakis, Real-time inference and extrapolation via a diffusion-inspired temporal transformer operator (ditto), arXiv preprint arXiv:2307.09072 (2023). +[10] Z. Li, K. Meidani, A. B. Farimani, Transformer for partial differential equations' operator learning, arXiv preprint arXiv:2205.13671 (2022). +[11] A. Sharma, S. Singh, S. Ratna, Graph neural network operators: a review, Multimedia Tools and Applications 83 (2024) 23413-23436. +[12] T. Chen, H. Chen, Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems, IEEE Transactions on Neural Networks 6 (1995) 911-917. doi:10.1109/72.392253. +[13] R. Wan, E. Kharazmi, M. S. Triantafyllou, G. E. Karniadakis, Deepvivonet: Using deep neural operators to optimize sensor locations with application to vortex-induced vibrations, arXiv preprint arXiv:2501.04105 (2025). +[14] E. Kiyani, M. Manav, N. Kadivar, L. De Lorenzis, G. E. Karniadakis, Predicting crack nucleation and propagation in brittle materials using deep operator networks with diverse trunk architectures, arXiv preprint arXiv:2501.00016 (2024). +[15] A. Peyvan, V. Oommen, A. D. Jagtap, G. E. Karniadakis, Riemannonets: Interpretable neural operators for riemann problems, Computer Methods in Applied Mechanics and Engineering 426 (2024) 116996. +[16] Z. Li, W. Peng, Z. Yuan, J. Wang, Long-term predictions of turbulence by implicit u-net enhanced fourier neural operator, Physics of Fluids 35 (2023). +[17] Y. Jiang, Z. Li, Y. Wang, H. Yang, J. Wang, An implicit adaptive fourier neural operator for long-term predictions of three-dimensional turbulence, arXiv preprint arXiv:2501.12740 (2025). +[18] V. Gopakumar, S. Pamela, L. Zanisi, Z. Li, A. Anandkumar, M. Team, Fourier neural operator for plasma modelling, arXiv preprint arXiv:2302.06542 (2023). +[19] D. Montes de Oca Zapiain, J. A. Stewart, R. Dingreville, Accelerating phase-field-based microstructure evolution predictions via surrogate models trained by machine learning methods, npj Computational Materials 7 (2021) 3. +[20] V. Oommen, K. Shukla, S. Goswami, R. Dingreville, G. E. Karniadakis, Learning two-phase microstructure evolution using neural operators and autoencoder architectures, npj Computational Materials 8 (2022) 190. +[21] V. Oommen, K. Shukla, S. Desai, R. Dingreville, G. E. Karniadakis, Rethinking materials simulations: Blending direct numerical simulations with neural operators, npj Computational Materials 10 (2024) 145. +[22] S. Khodakarami, Y. Suh, Y. Won, N. Miljkovic, An intelligent strategy for phase change heat and mass transfer: Application of machine learning, in: Advances in Heat Transfer, volume 56, Elsevier, 2023, pp. 113-168. +[23] N. Rahaman, A. Baratin, D. Arpit, F. Draxler, M. Lin, F. Hamprecht, Y. Bengio, A. Courville, On the spectral bias of neural networks, in: K. Chaudhuri, R. Salakhutdinov (Eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, PMLR, 2019, pp. 5301-5310. URL: https://proceedings.mlr.press/v97/rahaman19a.html. +[24] Z.-Q. J. Xu, Y. Zhang, T. Luo, Y. Xiao, Z. Ma, Frequency principle: Fourier analysis sheds light on deep neural networks, arXiv preprint arXiv:1901.06523 (2019). +[25] Z.-Q. J. Xu, L. Zhang, W. Cai, On understanding and overcoming spectral biases of deep neural network learning methods for solving pdes, arXiv preprint arXiv:2501.09987 (2025). +[26] C. Lin, Z. Li, L. Lu, S. Cai, M. Maxey, G. E. Karniadakis, Operator learning for predicting multiscale bubble growth dynamics, The Journal of Chemical Physics 154 (2021). +[27] N. Jain, S. Roy, H. Kodamana, P. Nair, Scaling the predictions of multiphase flow through porous media using operator learning, Chemical Engineering Journal 503 (2025) 157671. +[28] O. Ronneberger, P. Fischer, T. Brox, U-net: Convolutional networks for biomedical image segmentation, in: Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international confer + +ence, Munich, Germany, October 5-9, 2015, proceedings, part III 18, Springer, 2015, pp. 234-241. +[29] S. Qin, F. Lyu, W. Peng, D. Geng, J. Wang, N. Gao, X. Liu, L. L. Wang, Toward a better understanding of fourier neural operators: Analysis and improvement from a spectral perspective, arXiv preprint arXiv:2404.07200 (2024). +[30] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: A multiphase multiphysics dataset and benchmarks for machine learning, Advances in Neural Information Processing Systems 36 (2024). +[31] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168. +[32] X. Liu, B. Xu, S. Cao, L. Zhang, Mitigating spectral bias for the multiscale operator learning, Journal of Computational Physics 506 (2024) 112944. +[33] W. Cai, Z.-Q. J. Xu, Multi-scale deep neural networks for solving high dimensional pdes, arXiv preprint arXiv:1910.11710 (2019). +[34] M. Tancik, P. Srinivasan, B. Mildenhall, S. Fridovich-Keil, N. Raghavan, U. Singhal, R. Ramamoorthi, J. Barron, R. Ng, Fourier features let networks learn high frequency functions in low dimensional domains, Advances in neural information processing systems 33 (2020) 7537-7547. +[35] S. Wang, H. Wang, P. Perdikaris, On the eigenvector bias of fourier feature networks: From regression to solving multi-scale pdes with physics-informed neural networks, Computer Methods in Applied Mechanics and Engineering 384 (2021) 113938. +[36] M. Raissi, P. Perdikaris, G. E. Karniadakis, Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations, Journal of Computational physics 378 (2019) 686-707. +[37] J. D. Toscano, V. Oommen, A. J. Varghese, Z. Zou, N. A. Daryakenari, C. Wu, G. E. Karniadakis, From pinns to pikans: Recent advances in physics-informed machine learning, arXiv preprint arXiv:2410.13228 (2024). +[38] S. Liang, L. Lyu, C. Wang, H. Yang, Reproducing activation function for deep learning, arXiv preprint arXiv:2101.04844 (2021). +[39] A. D. Jagtap, K. Kawaguchi, G. E. Karniadakis, Adaptive activation functions accelerate convergence in deep and physics-informed neural networks, Journal of Computational Physics 404 (2020) 109136. +[40] W. Cai, X. Li, L. Liu, A phase shift deep neural network for high frequency approximation and wave problems, SIAM Journal on Scientific Computing 42 (2020) A3285-A3312. +[41] P. Lippe, B. Veeling, P. Perdikaris, R. Turner, J. Brandstetter, Pde-refiner: Achieving accurate long rollouts with neural pde solvers, Advances in Neural Information Processing Systems 36 (2023) 67398-67433. +[42] E. Zhang, A. Kahana, A. Kopaničáková, E. Turkel, R. Ranade, J. Pathak, G. E. Karniadakis, Blending neural operators and relaxation methods in pde numerical solvers, Nature Machine Intelligence (2024) 1-11. +[43] H. Wu, K. Zhang, D. Zhou, W.-L. Chen, Z. Han, Y. Cao, High-flexibility reconstruction of small-scale motions in wall turbulence using a generalized zero-shot learning, Journal of Fluid Mechanics 990 (2024) R1. +[44] Z. Wang, X. Li, L. Liu, X. Wu, P. Hao, X. Zhang, F. He, Deep-learning-based super-resolution reconstruction of high-speed imaging in fluids, Physics of Fluids 34 (2022). +[45] R. Molinaro, S. Lanthaler, B. Raonic, T. Rohner, V. Armegioiu, Z. Y. Wan, F. Sha, S. Mishra, L. Zepeda-Nuñez, Generative ai for fast and accurate statistical computation of fluids, arXiv preprint arXiv:2409.18359 (2024). +[46] J. W. Lockwood, A. Gori, P. Gentine, A generative super-resolution model for enhancing tropical cyclone wind field intensity and resolution, Journal of Geophysical Research: Machine Learning and Computation 1 (2024) e2024JH000375. +[47] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Integrating neural operators with diffusion models improves spectral representation in turbulence modeling, arXiv preprint arXiv:2409.08477 (2024). +[48] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: a multi-physics dataset and benchmarks for machine learning, arXiv preprint arXiv:2307.14623 (2023). +[49] F. I. Diakogiannis, F. Waldner, P. Caccetta, C. Wu, Resunet-a: A deep learning framework for semantic segmentation of remotely sensed data, ISPRS Journal of Photogrammetry and Remote Sensing 162 (2020) 94-114. +[50] H. Li, Z. Xu, G. Taylor, C. Studer, T. Goldstein, Visualizing the loss landscape of neural nets, Advances in neural information processing systems 31 (2018). +[51] X. Chen, C. Liang, D. Huang, E. Real, K. Wang, H. Pham, X. Dong, T. Luong, C.-J. Hsieh, Y. Lu, et al., Symbolic discovery of optimization algorithms, Advances in neural information processing systems 36 (2024). + +[52] D. P. Kingma, Adam: A method for stochastic optimization, arXiv preprint arXiv:1412.6980 (2014). +[53] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168. +[54] M. Wei, X. Zhang, Super-resolution neural operator, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 18247-18256. +[55] R. Wang, K. Kashinath, M. Mustafa, A. Albert, R. Yu, Towards physics-informed deep learning for turbulent flow prediction, in: Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining, 2020, pp. 1457-1466. +[56] P. Chakrabarty, S. Maji, The spectral bias of the deep image prior, arXiv preprint arXiv:1912.08905 (2019). +[57] A. M. Saxe, P. W. Koh, Z. Chen, M. Bhand, B. Suresh, A. Y. Ng, On random weights and unsupervised feature learning., in: Icml, volume 2, 2011, p. 6. +[58] P. Wang, W. Zheng, T. Chen, Z. Wang, Anti-oversmoothing in deep vision transformers via the fourier domain analysis: From theory to practice, arXiv preprint arXiv:2203.05962 (2022). +[59] J. Ho, A. Jain, P. Abbeel, Denoising diffusion probabilistic models, Advances in neural information processing systems 33 (2020) 6840-6851. +[60] Y. Song, J. Sohl-Dickstein, D. P. Kingma, A. Kumar, S. Ermon, B. Poole, Score-based generative modeling through stochastic differential equations, arXiv preprint arXiv:2011.13456 (2020). +[61] Y. Song, S. Ermon, Generative modeling by estimating gradients of the data distribution, Advances in neural information processing systems 32 (2019). +[62] T. Karras, M. Aittala, T. Aila, S. Laine, Elucidating the design space of diffusion-based generative models, Advances in neural information processing systems 35 (2022) 26565-26577. +[63] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Data for "integrating neural operators with diffusion models improves spectral representation in turbulence modeling" (kolmogorov flow case), 2025. URL: https://doi.org/10.6084/m9.figshare.28250960.v1. doi:10.6084/m9.figshare.28250960.v1. +[64] G. S. Settles, A. Liberzon, Schlieren and bos velocimetry of a round turbulent helium jet in air, Optics and Lasers in Engineering 156 (2022) 107104. + +# Appendix A. Training strategies and ResUNet prediction results + +All the models were trained for $\sim 1000$ epochs (convergence typically happened earlier). The initial learning rate was set to $8\times 10^{-4}$ and it was reduced after the first 700 epochs using a linear step scheduler. We used GELU activation function and group normalization after convolutional layers. Lion optimizer with weight decay of 0.02 to 0.1 were used, depending on the neural operator size. Batch size of 4 or 8 was used, depending on the neural operator size. We found that gradient clipping at maximum gradient norm of 0.4 to 1 (depending on neural operator size) helps with the optimization. Our preliminary findings showed better results with the Lion optimizer compared to Adam and AdamW optimizers. Therefore, all the trainings for this work were conducted with the Lion optimizer. For all the neural operators, the number of layers in the encoder and decoder were kept constant and the number of parameters at each layer was modified to change the neural operator size. + +![](images/216db5d1060828f55173fc75cc3f6bdd527f94ae0d29cc7ad38e6bb2fa212e64.jpg) +5 +(b) + +![](images/f9f5641b6c82f11306de0d3689768b2b13e8457b6f1cd3662c1b66e60e3ed261.jpg) + +![](images/d3c67e83e17b5fd46d7760afc8200d37ecb7166597966030724c7e43b7db60ed.jpg) + +![](images/96e9fe952ba04a71427afb1dec43bde98adb15cd8c90862e8d75dc70147f8e70.jpg) + +![](images/f986092ac9f6ad1661f7ee50cf594bcb849746ce965b8ad4798f4b49976d2df6.jpg) + +![](images/9c5bc0ee49ccb11782a786fdf420a16404e092d08ea9b8b167f0def97ae4f912.jpg) +(c) + +![](images/32f06be7849e92231d12edb1f8aab5d30df58ae3d8707e22c1fe4771b3eb8a5e.jpg) + +![](images/cd6f9024ad18056f68340ec20642f220bc4cfe1c04907395459421d74c229cb8.jpg) + +![](images/24a5a142fba9df36a266ff532a6dd6f10514925e5ceb3121f29d52b03610d45f.jpg) + +![](images/59f496ffcbc52b4a9b0fa7ec0bf05bdff02cdd881eb5c24ff6271a98046d3169.jpg) + +![](images/6af72eec91e07a50d37a4a08fa6586c52969c4291b266bc7426bfb1b0c1c374a.jpg) +(d) + +![](images/68dddade02c2590e94e546d373570e8a15684914e477cd49190c23707cd2bc18.jpg) + +![](images/6c7b0c2e0c10c5a798fdcfa928b49ce73045efbd3b1cd8453688f7c99d17cd8a.jpg) + +![](images/3379c693ec231bdecec176e4940ec55fb41c91a3584394e5c718fa44b432e3e9.jpg) + +![](images/58537f9f0b24681dc7cbeaeca1b1a9b091ce3452f0c6cb59fec6e162e5eb03b3.jpg) + +![](images/7afd174a61dc3d1de47273ea402d57ec42d4b4855f6e00716ed25610f3bf2e99.jpg) + +![](images/bf303478b135b4702e050d5bc329eafb8ed3bee1cea9c893e56c2e3acfcdfe30.jpg) + +![](images/46f47561b47a1d8e82a51bf46d73c89d671b14142e75b748b48a9f23c4bc18e5.jpg) + +![](images/fe38d8420ef1f2b61297d2ff15a9cd6a35da167a630d84b91c2c48f27b959981.jpg) + +![](images/0a2207b92d4a876252be664d76541511a159339b9f752eac681e2c9f2f8745fa.jpg) + +![](images/296cbe932f78eae1ce2bd146deed18f92446c6a21cc9c53a07e445e9df2d2011.jpg) +(e) + +![](images/a758c825f19ab19d8b155d31e8b085f5d408ecd312bcde014754a5ce38a102a6.jpg) + +![](images/c33f2ead32bb0936798071bc0c00592261094bdf3e45f08d526a0fbf6f153427.jpg) + +![](images/0d8bcff3f5dad1dfd457e9f5dd4d81f799dc0b6bcee229617b3874b126d9c772.jpg) + +![](images/0255dc7ff44b63e60640617dbe202f47d5e43c9354551d69843c1a39ce725f0f.jpg) +Figure A.12: Example of subcooled pool boiling temperature prediction results by neural operators. (a) Ground truth results, (b) UNet prediction results, (c) ResUNet prediction results, (d) UNet prediction errors, (e) ResUNet prediction errors. The results show five time-step predictions from left to right. + +# Appendix B. Boundary RMSE, Bubble RMSE, and Spectral Errors + +Boundary RMSE (BRMSE) for a single sample and time-step is defined by calculating the errors only at the boundaries of the domain: + +$$ +\operatorname {B R M S E} = \sqrt {\frac {1}{| \partial \Omega |} \sum_ {\mathbf {x} _ {i} \in \partial \Omega} \left(\hat {T} _ {i} - T _ {i}\right) ^ {2}}, \tag {B.1} +$$ + +where $\mathbf{x}_i\in \partial \Omega$ specifies the points at the boundaries, $\hat{T}_i$ is the predicted temperature, and $T_{i}$ is the actual temperature. Similarly, bubble RMSE is defined by calculating the errors only within the bubble areas. These areas are specified through a level-set function in the simulations. + +$$ +\text {B u b b l e} = \sqrt {\frac {1}{| \Omega_ {\text {b u b b l e}} \cup \partial \Omega_ {\text {b u b b l e}} |} \sum_ {\mathbf {x} _ {i} \in \Omega_ {\text {b u b b l e}} \cup \partial \Omega_ {\text {b u b b l e}}} (\hat {y} _ {i} - y _ {i}) ^ {2}}, \tag {B.2} +$$ + +where $\mathbf{x}_i\in \Omega_{\mathrm{bubble}}$ and $\partial \Omega_{\mathrm{bubble}}$ specify the points inside the bubble areas and at the interfaces, respectively. + +The spectral errors in each of the low, mid, and high-frequency bands are defined as follows: + +$$ +F _ {\text {b a n d}} = \sqrt {\frac {1}{N _ {\text {b a n d}}} \sum_ {k \in \text {b a n d}} \left| \mathcal {F} (T) (\mathbf {k}) - \mathcal {F} (\hat {T}) (\mathbf {k}) \right| ^ {2}}, \quad \text {b a n d} \in \{\text {l o w}, \text {m i d}, \text {h i g h} \}, \tag {B.3} +$$ + +where $k$ is the spatial frequency component of the Fourier transformed solutions, $\mathcal{F}$ denotes the Fourier transform, and $N_{\mathrm{band}}$ specifies the number of components at each frequency band. The low, mid, and high bands may be defined differently based on the underlying dataset and the amount of high-frequency components. In this work, these bands were set to the first 2%, the first 6.2% excluding the low band components, and the last 93.8% of the components. + +Similarly, the energy spectrum error, showing the energy spectra misalignment at each frequency band is defined as follows: + +$$ +\mathcal {E} _ {F _ {\mathrm {b a n d}}} = \sqrt {\frac {1}{N _ {\mathrm {b a n d}}} \sum_ {k \in \mathrm {b a n d}} \left(\left| \mathcal {F} (T) (\mathbf {k}) \right| ^ {2} - \left| \mathcal {F} (\hat {T}) (\mathbf {k}) \right| ^ {2}\right) ^ {2}}, \quad \mathrm {b a n d} \in \{\text {l o w , m i d , h i g h} \}, \tag {B.4} +$$ + +where $\mathcal{E}$ denotes the energy spectrum error. + +# Appendix C. Summary of subcooled pool boiling prediction results with HFS-enhanced NO + +In this work, we tested different variants of ResUNet by varying number of parameters in the range of $\sim 2$ millions to $\sim 16$ millions. In the following table, we summarized the results of the two of the models (smallest and largest models), trained with optimal hyperparameters. Note that the same hyperparameters were used for training a neural operator with and without HFS. The parameters were first optimized for the NO without HFS and the same set of parameters were used for training the HFS-enhanced NO. The results of the other models are not included in this table for easier comparison and interpretation. We refer the reader to Figure 4 for observing the effect of HFS on all the tested models. Similar to the rest of the paper, the results are based on five time-step predictions. + +Table C.4: Subcooled pool boiling temperature prediction errors with neural operator (NO) with and without high-frequency scaling (HFS) The columns correspond to the metrics, NO with $\sim 1.7$ millions parameters, HFS-enhanced NO with $\sim 1.7$ millions parameters, NO with $\sim 16.2$ millions parameters, and HFS-enhanced NO with $\sim 16.2$ millions parameters. + +
NO, 1.7 MNO+HFS, 1.7 MNO, 16.2 MNO+HFS, 16.2 M
Rel. Error0.04140.03330.02510.0238
RMSE0.04030.03240.02440.0232
BRMSE0.09730.07290.05620.0505
Bubble RMSE0.19240.1430.1090.0985
Maxmean1.0190.8970.6850.656
Flow0.3230.2370.2120.141
Fmid0.2820.2180.1850.148
Fhigh0.04760.04000.03920.0296
Parameters [Millions]1.7111.71216.26316.268
+ +# Appendix D. Saturated pool boiling prediction results + +Saturated pool boiling dataset involves less complexity due to lower high-frequency components and small scale features. Therefore, a well-optimized NO without HFS can successfully resolve the solutions. However, HFS still enhances the prediction accuracies, especially at bubble areas. The following figure demonstrates an example of predictions using NO and HFS-enhanced NO for saturated pool boiling dataset. Generally, the errors are much smaller than subcooled pool boiling predictions. However, it can be seen that the errors in the regions with departed bubbles are reduced with HFS-enhanced NO. + +![](images/c875edc340d8e0893926ec11727820b8909097c64ebe04e21e7e0b12c6ba14a6.jpg) +Figure D.13: Examples of saturated pool boiling temperature prediction results by NO and HFS-enhanced NO (a) Ground truth (GT) results. (b) NO predictions. (c) $\mathrm{NO} + \mathrm{HFS}$ predictions. (d) Absolute prediction errors of NO $(E_{\mathrm{NO}})$ . (e) Absolute prediction errors of $\mathrm{NO} + \mathrm{HFS}$ $(E_{\mathrm{NO} + \mathrm{HFS}})$ . The results are shown for five time-step predictions from left to right. The departed bubbles areas are circled (dashed red circles) in error maps for easier interpretation and comparison. The results are based on a NO with $\sim 3.5$ millions parameter. + +To further investigate if HFS can enhance the predictions with smaller NO on this simpler dataset, we trained another NO with the same structure (ResUNet) but with only $\sim 0.6$ millions parameters with and without HFS. Consistent with previous results, HFS enhanced the predictions by reducing the field errors such as RMSE and bubble RMSE as well the spectral errors. The prediction results of saturated pool boiling dataset using two different NOs with + +and without HFS are summarized in the following table. Similar to the rest of the paper, the results are based on five time-step predictions. + +Table D.5: Saturated pool boiling temperature prediction errors of NO with and without HFS. The columns correspond to the metrics, NO with $\sim 0.6$ millions parameters, HFS-enhanced NO with $\sim 0.6$ millions parameters, NO with $\sim 3.5$ millions parameters, and HFS-enhanced NO with $\sim 3.5$ millions parameters. + +
NO, 0.6 MNO+HFS, 0.6 MNO, 3.5 MNO+HFS, 3.5 M
Rel. Error0.01730.01650.01490.0145
RMSE0.01710.01640.01480.0144
BRMSE0.04620.04500.03640.0355
Bubble RMSE0.09180.08980.07260.0692
Maxmean0.5920.5950.5530.544
Flow0.09640.08350.07450.0736
Fmid0.10860.09980.09190.0855
Fhigh0.02090.02080.01820.0180
Parameters [Millions]0.6140.6153.4803.481
+ +![](images/109eabf1c2090326d7c86275a3cb013ee490da972d1d8047ce97ccfd130ab144.jpg) +(a) +Figure E.14: Effect of HFS on the latent space mean features. (a) Mean latent feature maps in decoder (downsampling) with five layers. (b) Mean latent feature maps in decoder (upsampling) with five layers. The results are based on a NO with $\sim 16$ millions parameters. + +![](images/f7993c093c7192f5cb3cbfb825bab433b9db09b4062eae2dea9b86fd1e4c6d5b.jpg) +(b) + +# Appendix F. Additional visualizations of the subcooled pool boiling predictions + +![](images/818538e3c98e7ef1cb7ca02c7839b776ab683c489ef3ad9ddcfc9a7d6277cc8e.jpg) +Figure F.15: Examples of subcooled pool boiling prediction results by DM integrated with NO and HFS-enhanced NO. (a) Ground truth (GT) results. (b) NO predictions. (c) NO + DM predictions. (d) NO + HFS predictions. (e) NO + HFS + DM predictions. The results are shown for five time-step predictions from left to right. + +# Appendix G. Optimized scaling parameters, $\lambda_{DC}$ and $\lambda_{HFC}$ + +The following figure demonstrates the learned $\lambda_{DC}$ and $\lambda_{HFC}$ across all the feature maps in the latent space of the encoder and decoder. The results are based on the training of a HFS-enhanced NO with $\sim 1.7$ million parameters for the subcooled pool boiling problem. + +![](images/b32764dafa3d505c255f1f64f1257658a5fa838bf50794dd76059aea8719ee2a.jpg) + +![](images/a0784824075ff041ec54bce51f4a9f920414699bc99c6bfb71782ab60b664316.jpg) +Figure G.16: (a) learned values of $\lambda_{DC}$ and $\lambda_{HFC}$ in the encoder of the NO. (b) learned values of $\lambda_{DC}$ and $\lambda_{HFC}$ in the decoder of NO. Layers start from highest spatial resolution to the lowest in the encoder and vice versa for the decoder. + +# Appendix H. Kolmogorov flow prediction results + +The vorticity formulation of the unsteady 2D incompressible Navier-Stokes equation for a viscous and incompressible fluid with the Kolmogorov forcing term is given as follows, where $\omega$ is the vorticity, $u$ is the velocity vector, and $\nu$ is the kinematic viscosity. + +$$ +\left\{ \begin{array}{l l} \partial_ {t} \omega + \mathbf {u} \cdot \nabla \omega = \nu \Delta \omega + f (x, y), & (x, y) \in (0, 2 \pi) ^ {2}, t \in (0, t _ {\text {f i n a l}} ] \\ f (x, y) = \chi (\sin (2 \pi (x + y)) + \cos (2 \pi (x + y))), & (x, y) \in (0, 2 \pi) ^ {2} \\ \nabla \cdot \mathbf {u} = 0, & (x, y) \in (0, 2 \pi) ^ {2}, t \in (0, t _ {\text {f i n a l}} ] \\ \omega (x, y, 0) = \omega_ {0}, & (x, y) \in (0, 2 \pi) ^ {2} \end{array} \right. \tag {H.1} +$$ + +In this study, we used $\chi = 0.1$ , $\nu = 10^{-5}$ , and periodic boundary conditions. The vorticity initial condition was sampled from a Gaussian random field according to the distribution $\mathcal{N}(0,14^{0.5}(-\Delta +196I)^{-1.5})$ . The following figure demonstrate an example of the prediction results of the neural operator with and without HFS. + +![](images/1cfcf41b9fcb16a03911d047cd5dfb8c48cb6eb5ef8522a0e365588ab7ffadcb.jpg) + +![](images/f2e000dac344778458cfa4d7d28377618422c7e3854b30636a302d1144e056ab.jpg) + +![](images/dfc42718a5b15fa76af837f1cf09d44379ea5e84068c7dfccc29da3080f16166.jpg) +Figure H.17: 2D Kolmogorov flow prediction results. (a) Ground truth solutions. (b) NO predictions. (c) HFS-enhanced NO predictions. (d) The corresponding energy spectra $((p(k))$ for predictions at each time-step. (e) Zoomed-in view of energy spectra showing only the high wavenumbers for better visualization of the differences. The legends in (d) are applicable to (e) as well. \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13695/images/0255dc7ff44b63e60640617dbe202f47d5e43c9354551d69843c1a39ce725f0f.jpg b/data/2025/2503_13xxx/2503.13695/images/0255dc7ff44b63e60640617dbe202f47d5e43c9354551d69843c1a39ce725f0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..179d2d4d1bc1a844496db3314cfb04942c706fe8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/0255dc7ff44b63e60640617dbe202f47d5e43c9354551d69843c1a39ce725f0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ab2889ef93de83d71d405ded2a303c95baf0a3e04c698864035673b067888bd +size 6432 diff --git a/data/2025/2503_13xxx/2503.13695/images/02e29bc84d1768810e1282a65f1138753dd896b1b55969f5a4838f82670d53f6.jpg b/data/2025/2503_13xxx/2503.13695/images/02e29bc84d1768810e1282a65f1138753dd896b1b55969f5a4838f82670d53f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a9ed1d1b3d798ecc2b85ccfcbaf669463f5b6ac --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/02e29bc84d1768810e1282a65f1138753dd896b1b55969f5a4838f82670d53f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1e894517b102054773c5d95b753c267d99de4bfbd3cf6199bb01cf57ade22df +size 12471 diff --git a/data/2025/2503_13xxx/2503.13695/images/05fba3cc6e4ff3f30a68095ffe33390bd08dc560864db76ec9a2afb800f5746a.jpg b/data/2025/2503_13xxx/2503.13695/images/05fba3cc6e4ff3f30a68095ffe33390bd08dc560864db76ec9a2afb800f5746a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a805c3326fe480d7b40b15769e2f52aee0d0ea3b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/05fba3cc6e4ff3f30a68095ffe33390bd08dc560864db76ec9a2afb800f5746a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d97c184f6a62e3dd2596aa065200597f64554d4c18eaaa5e3f540978936a2182 +size 5740 diff --git a/data/2025/2503_13xxx/2503.13695/images/06a9908dbf997c468a6ff901a52a01cdf5091d706ec7f0c0dae199bce729c2d8.jpg b/data/2025/2503_13xxx/2503.13695/images/06a9908dbf997c468a6ff901a52a01cdf5091d706ec7f0c0dae199bce729c2d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7ec22a8dc7746fd2e3880d0a5d05d220b75431b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/06a9908dbf997c468a6ff901a52a01cdf5091d706ec7f0c0dae199bce729c2d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57574e9a32f41a1a983f8c65678c22e9409ce43a99cda2755564a27fbb31f3f2 +size 22246 diff --git a/data/2025/2503_13xxx/2503.13695/images/06d4a56db22499e908292b06114cca61f60690cad185fe59b16acb00df8e6ef2.jpg b/data/2025/2503_13xxx/2503.13695/images/06d4a56db22499e908292b06114cca61f60690cad185fe59b16acb00df8e6ef2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a97b99b860f3a89857ef8f200936c5712b5e3ffd --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/06d4a56db22499e908292b06114cca61f60690cad185fe59b16acb00df8e6ef2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fcb4ef86852c00abc7851372ff550eeb0d06f1b8b79235a2c32f3d57ca2d027 +size 7014 diff --git a/data/2025/2503_13xxx/2503.13695/images/0a2207b92d4a876252be664d76541511a159339b9f752eac681e2c9f2f8745fa.jpg b/data/2025/2503_13xxx/2503.13695/images/0a2207b92d4a876252be664d76541511a159339b9f752eac681e2c9f2f8745fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0ae84140481d2217fae3f800a617244d85c0cdb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/0a2207b92d4a876252be664d76541511a159339b9f752eac681e2c9f2f8745fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4ddcda9a72aa0857c1f2f2806c1d959527fa1723633c22642f34b404fd402a2 +size 6507 diff --git a/data/2025/2503_13xxx/2503.13695/images/0b11dca30d91ab7cc7f583b42ca3b48537df8a367e51724eab149a411f6447b1.jpg b/data/2025/2503_13xxx/2503.13695/images/0b11dca30d91ab7cc7f583b42ca3b48537df8a367e51724eab149a411f6447b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04e0d5b8f1c61572ad980b8c6a3c760909e42f0d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/0b11dca30d91ab7cc7f583b42ca3b48537df8a367e51724eab149a411f6447b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eb36939eb1bd591ff7c81afed6ea297544fc418e7895858ce6682d42884efb7 +size 10866 diff --git a/data/2025/2503_13xxx/2503.13695/images/0d8bcff3f5dad1dfd457e9f5dd4d81f799dc0b6bcee229617b3874b126d9c772.jpg b/data/2025/2503_13xxx/2503.13695/images/0d8bcff3f5dad1dfd457e9f5dd4d81f799dc0b6bcee229617b3874b126d9c772.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b29d7f6968869934c51064d5f39b5bc5dcf37f0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/0d8bcff3f5dad1dfd457e9f5dd4d81f799dc0b6bcee229617b3874b126d9c772.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c949fc2511c3d1d4e6923254947b12d9047bd9a607f3f8280a88051059b532a +size 4814 diff --git a/data/2025/2503_13xxx/2503.13695/images/109eabf1c2090326d7c86275a3cb013ee490da972d1d8047ce97ccfd130ab144.jpg b/data/2025/2503_13xxx/2503.13695/images/109eabf1c2090326d7c86275a3cb013ee490da972d1d8047ce97ccfd130ab144.jpg new file mode 100644 index 0000000000000000000000000000000000000000..378d7ba0d4f4f484ff72de5314b474c9a2a2f807 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/109eabf1c2090326d7c86275a3cb013ee490da972d1d8047ce97ccfd130ab144.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbd8b1c4315668b69210ca485fbb8aaa3f1382536e88653200bdd2bfdd0f7445 +size 108746 diff --git a/data/2025/2503_13xxx/2503.13695/images/12c8e15807fdd5f188eefd57ef41904289a4d945386824debe1ba6cbf304b01e.jpg b/data/2025/2503_13xxx/2503.13695/images/12c8e15807fdd5f188eefd57ef41904289a4d945386824debe1ba6cbf304b01e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cca7d5df2e44b597a948990e677c688dcbcff202 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/12c8e15807fdd5f188eefd57ef41904289a4d945386824debe1ba6cbf304b01e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a62616d4a4e48ff50674877f153e8f4dbb3d83ba3f493012bd620faf94582a8 +size 7261 diff --git a/data/2025/2503_13xxx/2503.13695/images/14d2c43c9f26237111d97d362572890aad6cd1ac9400cb8eb6973553a8a9c8d1.jpg b/data/2025/2503_13xxx/2503.13695/images/14d2c43c9f26237111d97d362572890aad6cd1ac9400cb8eb6973553a8a9c8d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0858a22594a7638625da69fb61192dfe1ed8848e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/14d2c43c9f26237111d97d362572890aad6cd1ac9400cb8eb6973553a8a9c8d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5dd1141fd29a397c8ca51965e8aa951b36af212e7550e7c7e4cef306e724d4a +size 8164 diff --git a/data/2025/2503_13xxx/2503.13695/images/1a0ef49c31cd391865df11f05db9cb02c6029f97bb662e0074c4d4064dacdf5c.jpg b/data/2025/2503_13xxx/2503.13695/images/1a0ef49c31cd391865df11f05db9cb02c6029f97bb662e0074c4d4064dacdf5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ffb3eadabd1d3b951ba3250dd9875b8520be842 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/1a0ef49c31cd391865df11f05db9cb02c6029f97bb662e0074c4d4064dacdf5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ab00fa4000ee43d2555f407b182906430482794131e744f4cb689d63c682b7a +size 30692 diff --git a/data/2025/2503_13xxx/2503.13695/images/1cfcf41b9fcb16a03911d047cd5dfb8c48cb6eb5ef8522a0e365588ab7ffadcb.jpg b/data/2025/2503_13xxx/2503.13695/images/1cfcf41b9fcb16a03911d047cd5dfb8c48cb6eb5ef8522a0e365588ab7ffadcb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b3946822ddb25ef3b47ff611c9d3fe0b3099e6b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/1cfcf41b9fcb16a03911d047cd5dfb8c48cb6eb5ef8522a0e365588ab7ffadcb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb1590b35d725f0ed56574a52ce9ceab84fd417f626f67f1c98c089a00418b44 +size 114124 diff --git a/data/2025/2503_13xxx/2503.13695/images/1f4a0d9406d73a62956b40c8971057b6fabc517067f9a1e18c104fd4f627edbb.jpg b/data/2025/2503_13xxx/2503.13695/images/1f4a0d9406d73a62956b40c8971057b6fabc517067f9a1e18c104fd4f627edbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ab6cc81ca9d52aca2a6c49e6b1654043c1c2477 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/1f4a0d9406d73a62956b40c8971057b6fabc517067f9a1e18c104fd4f627edbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a9c6d246911324401656a72aa6a3fb77160be05e8a73138703ca2992d4c1a1 +size 16049 diff --git a/data/2025/2503_13xxx/2503.13695/images/216db5d1060828f55173fc75cc3f6bdd527f94ae0d29cc7ad38e6bb2fa212e64.jpg b/data/2025/2503_13xxx/2503.13695/images/216db5d1060828f55173fc75cc3f6bdd527f94ae0d29cc7ad38e6bb2fa212e64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b0f0c1c9dc04ba39201c6a0d598982a932044a5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/216db5d1060828f55173fc75cc3f6bdd527f94ae0d29cc7ad38e6bb2fa212e64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:634df3b07657cf57516495d4e79a71470ca79f4e398d5c2e2b4ff39845464cf9 +size 5663 diff --git a/data/2025/2503_13xxx/2503.13695/images/21d6dea1825b413f89957217e3a9148548072fa1907281353e79782273e0fc08.jpg b/data/2025/2503_13xxx/2503.13695/images/21d6dea1825b413f89957217e3a9148548072fa1907281353e79782273e0fc08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7a9e71c0591f7fafdf9c42539c4b801a7ac2326 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/21d6dea1825b413f89957217e3a9148548072fa1907281353e79782273e0fc08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75540b4b86fdd635b00721c4b8adf2cf76f1e011431277583430a1f0ccfefadc +size 7395 diff --git a/data/2025/2503_13xxx/2503.13695/images/24a5a142fba9df36a266ff532a6dd6f10514925e5ceb3121f29d52b03610d45f.jpg b/data/2025/2503_13xxx/2503.13695/images/24a5a142fba9df36a266ff532a6dd6f10514925e5ceb3121f29d52b03610d45f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb8f92096ac1674eab7759437fec5fb68aa13152 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/24a5a142fba9df36a266ff532a6dd6f10514925e5ceb3121f29d52b03610d45f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85745f16164cbf130880842fa410406614a46d65335c81066ef5416f80eb56e4 +size 6137 diff --git a/data/2025/2503_13xxx/2503.13695/images/24e9419cdc031bbe8ee845630b852d0f762ff0f707d7ec8c2a0be986d2140e2a.jpg b/data/2025/2503_13xxx/2503.13695/images/24e9419cdc031bbe8ee845630b852d0f762ff0f707d7ec8c2a0be986d2140e2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e70e43bc8fa4879577c6086d4081d1d45053aba5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/24e9419cdc031bbe8ee845630b852d0f762ff0f707d7ec8c2a0be986d2140e2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c65f2caa1dd1da19acd155546b57d204a074389e8f7d85d75a69dac71a4e0ef +size 17314 diff --git a/data/2025/2503_13xxx/2503.13695/images/26821bc824c2e55cf6de7c68946bbee89a3839676996ae1a9d5b3a48a7ec2804.jpg b/data/2025/2503_13xxx/2503.13695/images/26821bc824c2e55cf6de7c68946bbee89a3839676996ae1a9d5b3a48a7ec2804.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b93eaf7caa5559723563cf07ced53b0073c3179a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/26821bc824c2e55cf6de7c68946bbee89a3839676996ae1a9d5b3a48a7ec2804.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0ac26f846e14eb4dc9fdefc31b4b90ce451b4942ae678dd3e1eb3b2b3990048 +size 5765 diff --git a/data/2025/2503_13xxx/2503.13695/images/26fae4e7f5d449cdb38456dce2702cfb869bd6bbd14b388e48a3f8928a80c39d.jpg b/data/2025/2503_13xxx/2503.13695/images/26fae4e7f5d449cdb38456dce2702cfb869bd6bbd14b388e48a3f8928a80c39d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13150feb1cd41ffe41dc6e9cbaf2e8cc86daa8e8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/26fae4e7f5d449cdb38456dce2702cfb869bd6bbd14b388e48a3f8928a80c39d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13feda5597b9781afb31cad3420e81ec434fd88d34add4fe78f85338c111d4ac +size 57476 diff --git a/data/2025/2503_13xxx/2503.13695/images/296cbe932f78eae1ce2bd146deed18f92446c6a21cc9c53a07e445e9df2d2011.jpg b/data/2025/2503_13xxx/2503.13695/images/296cbe932f78eae1ce2bd146deed18f92446c6a21cc9c53a07e445e9df2d2011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08a0654e3e00880a01f39e153bc672668d11a53e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/296cbe932f78eae1ce2bd146deed18f92446c6a21cc9c53a07e445e9df2d2011.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6faf1601c75874e413aed8a956d9360824c9f1f85c6edf7c30f3753fb38bd24d +size 4012 diff --git a/data/2025/2503_13xxx/2503.13695/images/2e662c6924e4dc916926501fd33cf6fb7658ebfdf77a643b1876c42b62c9c1aa.jpg b/data/2025/2503_13xxx/2503.13695/images/2e662c6924e4dc916926501fd33cf6fb7658ebfdf77a643b1876c42b62c9c1aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adcd63d1396998f692245aa991715141c7422d71 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/2e662c6924e4dc916926501fd33cf6fb7658ebfdf77a643b1876c42b62c9c1aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07321d191c78495879dafb10a17eb492b9bae6455ef4aba3686cab9e6750f930 +size 9555 diff --git a/data/2025/2503_13xxx/2503.13695/images/32ecc7c14abb8460e4d6c4033cf8cd415b55ec2ac589b266e28f285a8d99e10a.jpg b/data/2025/2503_13xxx/2503.13695/images/32ecc7c14abb8460e4d6c4033cf8cd415b55ec2ac589b266e28f285a8d99e10a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7bd11316c6b18f914acd70521719b745f690764 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/32ecc7c14abb8460e4d6c4033cf8cd415b55ec2ac589b266e28f285a8d99e10a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c9b999bf4e2f821bf4a5cf7dc313941f0c887333882b69585f8b6522aa32c5f +size 5405 diff --git a/data/2025/2503_13xxx/2503.13695/images/32f06be7849e92231d12edb1f8aab5d30df58ae3d8707e22c1fe4771b3eb8a5e.jpg b/data/2025/2503_13xxx/2503.13695/images/32f06be7849e92231d12edb1f8aab5d30df58ae3d8707e22c1fe4771b3eb8a5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..441b0930a5c57cb0ae012e2c624ebf70e944f28d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/32f06be7849e92231d12edb1f8aab5d30df58ae3d8707e22c1fe4771b3eb8a5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:623850d6419d63f844fdb8e87859abaedca94595e6442dad0a6cfe553c854b69 +size 5715 diff --git a/data/2025/2503_13xxx/2503.13695/images/3379c693ec231bdecec176e4940ec55fb41c91a3584394e5c718fa44b432e3e9.jpg b/data/2025/2503_13xxx/2503.13695/images/3379c693ec231bdecec176e4940ec55fb41c91a3584394e5c718fa44b432e3e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb040f567e7f52cce16adf621a8f2745902bab6e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/3379c693ec231bdecec176e4940ec55fb41c91a3584394e5c718fa44b432e3e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:859968eca30716f4752c6e1078459870acb332d844ddb42861a6a54129f7879f +size 5468 diff --git a/data/2025/2503_13xxx/2503.13695/images/3af287cf23e6f53219a3589e04c57a037bb16dd30b3685c7cc69a05bbec05bb1.jpg b/data/2025/2503_13xxx/2503.13695/images/3af287cf23e6f53219a3589e04c57a037bb16dd30b3685c7cc69a05bbec05bb1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38bbab72a181df3e5cb26aa083914684a26d63e6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/3af287cf23e6f53219a3589e04c57a037bb16dd30b3685c7cc69a05bbec05bb1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b838c9ff36ebc284ff1e343af6ddfa7306e892beb73d915e3c472f310fc082ec +size 13471 diff --git a/data/2025/2503_13xxx/2503.13695/images/3c80f7d8bae04c690074b0bd039bbc2ccb94c86c53fb41a6d536ea842e012cfb.jpg b/data/2025/2503_13xxx/2503.13695/images/3c80f7d8bae04c690074b0bd039bbc2ccb94c86c53fb41a6d536ea842e012cfb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ae2c963390d9c6f38804d3ace5eb07fb43429f1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/3c80f7d8bae04c690074b0bd039bbc2ccb94c86c53fb41a6d536ea842e012cfb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a2700eab79a8bded61e391c83788ab8252b6fea18055e93deb6e6ed6a1d1131 +size 5027 diff --git a/data/2025/2503_13xxx/2503.13695/images/3e6d7324658656d6206cc3e05f7607fad8522133a6e64cee125fd0325b76d3b3.jpg b/data/2025/2503_13xxx/2503.13695/images/3e6d7324658656d6206cc3e05f7607fad8522133a6e64cee125fd0325b76d3b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..764ebe62857412c09b2b3ac247299bb3e98a6203 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/3e6d7324658656d6206cc3e05f7607fad8522133a6e64cee125fd0325b76d3b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63e0d9f4480c68a6e0297cd593719479c965427a9f7dff9c81503982def132d8 +size 28586 diff --git a/data/2025/2503_13xxx/2503.13695/images/404739d4e1a7bce81a61b07954c246561d4c0b5d4e5df4224d919983d7e0719e.jpg b/data/2025/2503_13xxx/2503.13695/images/404739d4e1a7bce81a61b07954c246561d4c0b5d4e5df4224d919983d7e0719e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..835137d55e58c125b9dce6a4643bbc13c93764ac --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/404739d4e1a7bce81a61b07954c246561d4c0b5d4e5df4224d919983d7e0719e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e0a6926e01b190c994f58139f839d4bb388a417c117885edbe95199ff7a9e41 +size 24572 diff --git a/data/2025/2503_13xxx/2503.13695/images/4077f969a3d5c28f84a171a7ac2a8f647114a1140ff1cab20e790c3e6be946c6.jpg b/data/2025/2503_13xxx/2503.13695/images/4077f969a3d5c28f84a171a7ac2a8f647114a1140ff1cab20e790c3e6be946c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..180f565d3b0c3b8eee4cc47052a349928b0c9952 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/4077f969a3d5c28f84a171a7ac2a8f647114a1140ff1cab20e790c3e6be946c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ece7ca7579fcb91b22c20afd278b105e59c96988f23d987f84d06be31eccb11 +size 9308 diff --git a/data/2025/2503_13xxx/2503.13695/images/46f47561b47a1d8e82a51bf46d73c89d671b14142e75b748b48a9f23c4bc18e5.jpg b/data/2025/2503_13xxx/2503.13695/images/46f47561b47a1d8e82a51bf46d73c89d671b14142e75b748b48a9f23c4bc18e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56f7e2aa83a7600bdbc4ecc00ff9aeb8c2a9775d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/46f47561b47a1d8e82a51bf46d73c89d671b14142e75b748b48a9f23c4bc18e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4363c7dc6c6b4e582a22809c195c500367257ca4bbb80db9bcea01e85ca4d4f1 +size 4619 diff --git a/data/2025/2503_13xxx/2503.13695/images/4b5053f22f5d1045d4bc1f94ad7a3d9e743281a60a70f0fe826b8afd6b118513.jpg b/data/2025/2503_13xxx/2503.13695/images/4b5053f22f5d1045d4bc1f94ad7a3d9e743281a60a70f0fe826b8afd6b118513.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4260b374632d85c92be0caa182ebd332572fc17d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/4b5053f22f5d1045d4bc1f94ad7a3d9e743281a60a70f0fe826b8afd6b118513.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddd4bf92bd4e97ad4c409a4441d00daaf040ddf37373f9ec7c06f3197e154f14 +size 10407 diff --git a/data/2025/2503_13xxx/2503.13695/images/4c7260a04619c0b1d7dbd6af86e9d60a152d47c4eda51b99716497509cce91f6.jpg b/data/2025/2503_13xxx/2503.13695/images/4c7260a04619c0b1d7dbd6af86e9d60a152d47c4eda51b99716497509cce91f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24f30b9bc89cac9b61aeb5096a2428370fd72ab3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/4c7260a04619c0b1d7dbd6af86e9d60a152d47c4eda51b99716497509cce91f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9be0ac6301fe138a990d1cb8cfc985d4f67d3470bc6dd59d7ad1d78b74c6bb99 +size 10025 diff --git a/data/2025/2503_13xxx/2503.13695/images/5060cf71f4b61cf2ad25d180bd338654e4f2c327f2618a01d4d585f52ba14efd.jpg b/data/2025/2503_13xxx/2503.13695/images/5060cf71f4b61cf2ad25d180bd338654e4f2c327f2618a01d4d585f52ba14efd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82ebbeae1a8cc8c49726347acd8b0fd6c9acbe02 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/5060cf71f4b61cf2ad25d180bd338654e4f2c327f2618a01d4d585f52ba14efd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b1733d1c32fbc18aa3f4319c1b052b70a4b2d0d9926d6f09e5feccabe8ba948 +size 5795 diff --git a/data/2025/2503_13xxx/2503.13695/images/54d45f249993056243a6c7a732dd99069452cb3aadac4e89970dbe83138eb984.jpg b/data/2025/2503_13xxx/2503.13695/images/54d45f249993056243a6c7a732dd99069452cb3aadac4e89970dbe83138eb984.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b5f27beec73f9a64c9d972c9d7db0d7f09342c4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/54d45f249993056243a6c7a732dd99069452cb3aadac4e89970dbe83138eb984.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9486fb55b7ce5e04a198be50bf335018b4d6d18e4f05542ac429d14cdc15c4e2 +size 12394 diff --git a/data/2025/2503_13xxx/2503.13695/images/54e291267e3c4266a59c83d920c861ff4f608dfa734ee3f6ab5844d262da96c2.jpg b/data/2025/2503_13xxx/2503.13695/images/54e291267e3c4266a59c83d920c861ff4f608dfa734ee3f6ab5844d262da96c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..058d79e7a0876b91c0245ca779b0621b26c4d241 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/54e291267e3c4266a59c83d920c861ff4f608dfa734ee3f6ab5844d262da96c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae28001082f60e30b603e570a40ac38ad1675990a79b78843e2c5f9ca2e4777b +size 50043 diff --git a/data/2025/2503_13xxx/2503.13695/images/5724b436a86300be2784fa68ea7aec5bd1eb7e60f905356e9a61d6ce2dea4070.jpg b/data/2025/2503_13xxx/2503.13695/images/5724b436a86300be2784fa68ea7aec5bd1eb7e60f905356e9a61d6ce2dea4070.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7466168465217013695fcbb7b335ddfb8977440c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/5724b436a86300be2784fa68ea7aec5bd1eb7e60f905356e9a61d6ce2dea4070.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12a5081a064e2836144228d870543674f83c2b4324d9fb70de3554978ba55928 +size 7124 diff --git a/data/2025/2503_13xxx/2503.13695/images/58537f9f0b24681dc7cbeaeca1b1a9b091ce3452f0c6cb59fec6e162e5eb03b3.jpg b/data/2025/2503_13xxx/2503.13695/images/58537f9f0b24681dc7cbeaeca1b1a9b091ce3452f0c6cb59fec6e162e5eb03b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d46116d60dbff529f3ff1a070ce913b062adf8d5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/58537f9f0b24681dc7cbeaeca1b1a9b091ce3452f0c6cb59fec6e162e5eb03b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf85ba99c4a3c0176ba7f7cf3a6b4de34e2878a3c309a70b3e0fc4c056d8a766 +size 6924 diff --git a/data/2025/2503_13xxx/2503.13695/images/5920e7c922da97a60a54c9bbdc9099dafa8582f64a270ac461208c70d6889c24.jpg b/data/2025/2503_13xxx/2503.13695/images/5920e7c922da97a60a54c9bbdc9099dafa8582f64a270ac461208c70d6889c24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3eb68c3697f8414caea839cdfb4b3ddf024c967 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/5920e7c922da97a60a54c9bbdc9099dafa8582f64a270ac461208c70d6889c24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd72a393503f86a42909700173689702b9e1a4d99d984e75500ac969e844ac93 +size 20389 diff --git a/data/2025/2503_13xxx/2503.13695/images/59f496ffcbc52b4a9b0fa7ec0bf05bdff02cdd881eb5c24ff6271a98046d3169.jpg b/data/2025/2503_13xxx/2503.13695/images/59f496ffcbc52b4a9b0fa7ec0bf05bdff02cdd881eb5c24ff6271a98046d3169.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38856cb586c6898f4f684a207bcc3303f93d640a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/59f496ffcbc52b4a9b0fa7ec0bf05bdff02cdd881eb5c24ff6271a98046d3169.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fe82be91e1f6a5df0e00f6df7933853c0406d77f936d38613ae7d140e2f5cda +size 6878 diff --git a/data/2025/2503_13xxx/2503.13695/images/5bd552819042709e8514a471c06051655dec81d3d173212e98e78c9851f40c04.jpg b/data/2025/2503_13xxx/2503.13695/images/5bd552819042709e8514a471c06051655dec81d3d173212e98e78c9851f40c04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..158d10d5754c795fb6178d4628d0f3933c7a4adb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/5bd552819042709e8514a471c06051655dec81d3d173212e98e78c9851f40c04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da634ff500b530cdd32a02d612039f2c3287a8d539c552863fc8c90c79f9fee4 +size 19823 diff --git a/data/2025/2503_13xxx/2503.13695/images/5cbd1f6bf814e03424dbab09e92667104e5c0975fa72dd0b632444ed73b832f1.jpg b/data/2025/2503_13xxx/2503.13695/images/5cbd1f6bf814e03424dbab09e92667104e5c0975fa72dd0b632444ed73b832f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a86f9a843bb8630ca3dadbd400f96f6672fdf512 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/5cbd1f6bf814e03424dbab09e92667104e5c0975fa72dd0b632444ed73b832f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a28ecae0cea9bf8c088540576b6d24ce10a23c6539b5f9db45b77c11443ffe06 +size 13481 diff --git a/data/2025/2503_13xxx/2503.13695/images/5f217779f4ebeb8edefda5e739dab23bb283949bec47b55a0e34af427e3e5413.jpg b/data/2025/2503_13xxx/2503.13695/images/5f217779f4ebeb8edefda5e739dab23bb283949bec47b55a0e34af427e3e5413.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b66e4d6423003cce73080a6429f4a03aa8d1791 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/5f217779f4ebeb8edefda5e739dab23bb283949bec47b55a0e34af427e3e5413.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48fa6f4a4f998decb139ce22329600ebfcd68de3b56fb5d895a40a3858e814f9 +size 8403 diff --git a/data/2025/2503_13xxx/2503.13695/images/5fb472f59af9da9bdd882ecd832da3951e0959515d0fc1aeac26fd1cca7a0ba9.jpg b/data/2025/2503_13xxx/2503.13695/images/5fb472f59af9da9bdd882ecd832da3951e0959515d0fc1aeac26fd1cca7a0ba9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d11f10457199d31bc2a32c6d765b1dd6f7a7a69 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/5fb472f59af9da9bdd882ecd832da3951e0959515d0fc1aeac26fd1cca7a0ba9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56de8d0e8ffdcf8733320e9dd856e4e74e06d2de1185db011d22ec1740b3839e +size 8612 diff --git a/data/2025/2503_13xxx/2503.13695/images/629dde0d1af87ae717d9f62eb3415d917ebe53718ff14baae00b6da5edc7a8fa.jpg b/data/2025/2503_13xxx/2503.13695/images/629dde0d1af87ae717d9f62eb3415d917ebe53718ff14baae00b6da5edc7a8fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4f7efeab99fe3dd1a22cafa4984db5f9fa1fcf2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/629dde0d1af87ae717d9f62eb3415d917ebe53718ff14baae00b6da5edc7a8fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6cf3b5b09815210171e7ac19b8aec7afb66fbd4b45621c01aae2a747fc448f9 +size 157183 diff --git a/data/2025/2503_13xxx/2503.13695/images/62f709bfa82bfda8d899c4456f8d18cb4aaae24a3a3354dc86d148ef45498e67.jpg b/data/2025/2503_13xxx/2503.13695/images/62f709bfa82bfda8d899c4456f8d18cb4aaae24a3a3354dc86d148ef45498e67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4282df19986ab10779623d123fb52fac422fc1a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/62f709bfa82bfda8d899c4456f8d18cb4aaae24a3a3354dc86d148ef45498e67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8ab9eafb7ddc5765f79968ebbd0643b8cc6cd6ca98c7487875104cdf90b0b88 +size 114002 diff --git a/data/2025/2503_13xxx/2503.13695/images/675a8f9336f44185e5a4a8f988d3a39818f20adc98510f75efb9bd2996a69884.jpg b/data/2025/2503_13xxx/2503.13695/images/675a8f9336f44185e5a4a8f988d3a39818f20adc98510f75efb9bd2996a69884.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9497887feba3606f9e7b63bef49c65c2dd7338f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/675a8f9336f44185e5a4a8f988d3a39818f20adc98510f75efb9bd2996a69884.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32b0f39c0722bc5ed8d847ed68e11799a99ee81fd9f0d0757a6e57f37591ede2 +size 5315 diff --git a/data/2025/2503_13xxx/2503.13695/images/686e2d01f5e6a349e72871fbccfb325e6cbbd5c81366e8d859267b2c0dfd9c41.jpg b/data/2025/2503_13xxx/2503.13695/images/686e2d01f5e6a349e72871fbccfb325e6cbbd5c81366e8d859267b2c0dfd9c41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd1c1fc853aa4039e9e359173344f956e8b0e4e7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/686e2d01f5e6a349e72871fbccfb325e6cbbd5c81366e8d859267b2c0dfd9c41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5893a351f758f36789cfe2d5189e86f614b89ab26391d546bfa04361cc66d771 +size 25025 diff --git a/data/2025/2503_13xxx/2503.13695/images/68dddade02c2590e94e546d373570e8a15684914e477cd49190c23707cd2bc18.jpg b/data/2025/2503_13xxx/2503.13695/images/68dddade02c2590e94e546d373570e8a15684914e477cd49190c23707cd2bc18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ee4f9ed899137d3f2680c3cdd5a45f7185492e3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/68dddade02c2590e94e546d373570e8a15684914e477cd49190c23707cd2bc18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5e052be8533e0d6194a4399e45ca6f0e9a99b7caa43fcad179264c3d58734a9 +size 5308 diff --git a/data/2025/2503_13xxx/2503.13695/images/6acce8a74d6789197fed4cf6e66d8f7ce554c25121db815c611f22a624e7aa8c.jpg b/data/2025/2503_13xxx/2503.13695/images/6acce8a74d6789197fed4cf6e66d8f7ce554c25121db815c611f22a624e7aa8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19e55cfdaa2ea06683e186c5c8c1611d1512d147 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/6acce8a74d6789197fed4cf6e66d8f7ce554c25121db815c611f22a624e7aa8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2abde89cb2485f1031365a7c5f57a9b6b4475ebcc79901bfd840429a694e5468 +size 7466 diff --git a/data/2025/2503_13xxx/2503.13695/images/6af72eec91e07a50d37a4a08fa6586c52969c4291b266bc7426bfb1b0c1c374a.jpg b/data/2025/2503_13xxx/2503.13695/images/6af72eec91e07a50d37a4a08fa6586c52969c4291b266bc7426bfb1b0c1c374a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..350cefabc299dadd06df6c4b91e7f88eabf86b43 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/6af72eec91e07a50d37a4a08fa6586c52969c4291b266bc7426bfb1b0c1c374a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b31fcc46746ec0866a0f532bcafcbcb947002facc593a8505776c49d0196988e +size 5186 diff --git a/data/2025/2503_13xxx/2503.13695/images/6c7b0c2e0c10c5a798fdcfa928b49ce73045efbd3b1cd8453688f7c99d17cd8a.jpg b/data/2025/2503_13xxx/2503.13695/images/6c7b0c2e0c10c5a798fdcfa928b49ce73045efbd3b1cd8453688f7c99d17cd8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0fd16e37318eba6729aaf8eb23da4d183a205194 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/6c7b0c2e0c10c5a798fdcfa928b49ce73045efbd3b1cd8453688f7c99d17cd8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6204857eb8a25ba8dbaeb5bae4404b5326b43d1f14bc346227a4c368b302db4a +size 5172 diff --git a/data/2025/2503_13xxx/2503.13695/images/6ebc32a309ea9520fc757d5c3ce1220418cc5c6e2798e6456c14b5411edca5e4.jpg b/data/2025/2503_13xxx/2503.13695/images/6ebc32a309ea9520fc757d5c3ce1220418cc5c6e2798e6456c14b5411edca5e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97c333e13997b25356abd8cd704d43cca12f6023 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/6ebc32a309ea9520fc757d5c3ce1220418cc5c6e2798e6456c14b5411edca5e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:632cf0a3efb4b5e8e4b65a5ac9e30ac5c700bfb1d84c421048963c144596d12f +size 11446 diff --git a/data/2025/2503_13xxx/2503.13695/images/7336432f3f14053949b40efa42bad4fd0c39bbab6cbcf2075dc6987d42cba825.jpg b/data/2025/2503_13xxx/2503.13695/images/7336432f3f14053949b40efa42bad4fd0c39bbab6cbcf2075dc6987d42cba825.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f85f0dcced62c3da42d49da994898519fa47915d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/7336432f3f14053949b40efa42bad4fd0c39bbab6cbcf2075dc6987d42cba825.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dab300294e57dab288026984741734cc71a47dba313f92202c806fef5b08a2c +size 100374 diff --git a/data/2025/2503_13xxx/2503.13695/images/750959e9badbced2a4d620b4cc1c1984ac361f39eab61640da800817a909a643.jpg b/data/2025/2503_13xxx/2503.13695/images/750959e9badbced2a4d620b4cc1c1984ac361f39eab61640da800817a909a643.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be079d1f34845dc7d5449a85efbd606bcb20fef2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/750959e9badbced2a4d620b4cc1c1984ac361f39eab61640da800817a909a643.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:322930e44acc24ebadc167766484ebcc5680795ebfe015ed6ce350a049a79b1a +size 15097 diff --git a/data/2025/2503_13xxx/2503.13695/images/7acbf978c26e0e5745e06313089bfbd821791e207c3cd601e0ff50804ab6dc4b.jpg b/data/2025/2503_13xxx/2503.13695/images/7acbf978c26e0e5745e06313089bfbd821791e207c3cd601e0ff50804ab6dc4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d83b55636d9a14cd7ecea1d60fab4c1c29e96bb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/7acbf978c26e0e5745e06313089bfbd821791e207c3cd601e0ff50804ab6dc4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe57c5b47efdc2201d10fbe99809dcb908500a9f00229c230807523254b523e1 +size 9265 diff --git a/data/2025/2503_13xxx/2503.13695/images/7ad97d2813f0aaf6bea9c40b37d06895f3bb84041a8c318c716f437749e17e13.jpg b/data/2025/2503_13xxx/2503.13695/images/7ad97d2813f0aaf6bea9c40b37d06895f3bb84041a8c318c716f437749e17e13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a63d22e25cd74c30e61b14eb80d1542256a5d155 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/7ad97d2813f0aaf6bea9c40b37d06895f3bb84041a8c318c716f437749e17e13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b175a3f353266b649314866049a96f3d8b27d96e04bd118b5ba366d38fe6524c +size 14856 diff --git a/data/2025/2503_13xxx/2503.13695/images/7afd174a61dc3d1de47273ea402d57ec42d4b4855f6e00716ed25610f3bf2e99.jpg b/data/2025/2503_13xxx/2503.13695/images/7afd174a61dc3d1de47273ea402d57ec42d4b4855f6e00716ed25610f3bf2e99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58d9a2d2d1ee69a81912a49cea47d6a73300549c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/7afd174a61dc3d1de47273ea402d57ec42d4b4855f6e00716ed25610f3bf2e99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d369ff97f309a3cab81bf8ddd91ce10c9fa5a5ad8ed91ae136f29bbc2a3929e0 +size 4879 diff --git a/data/2025/2503_13xxx/2503.13695/images/818538e3c98e7ef1cb7ca02c7839b776ab683c489ef3ad9ddcfc9a7d6277cc8e.jpg b/data/2025/2503_13xxx/2503.13695/images/818538e3c98e7ef1cb7ca02c7839b776ab683c489ef3ad9ddcfc9a7d6277cc8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58595fe3f2f22234b892f1f3449f6c8de942014d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/818538e3c98e7ef1cb7ca02c7839b776ab683c489ef3ad9ddcfc9a7d6277cc8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a66995ea89ee464c9ac08f59d274ee76138d9fa43cb3d782f79cd4355e77112c +size 167099 diff --git a/data/2025/2503_13xxx/2503.13695/images/824288cda52c2891f4d10fcc048556f5025e12a9a08916a114c2879b54a5a4e0.jpg b/data/2025/2503_13xxx/2503.13695/images/824288cda52c2891f4d10fcc048556f5025e12a9a08916a114c2879b54a5a4e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35a5334c9eba8b288a115dec0b0cde8762f4a981 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/824288cda52c2891f4d10fcc048556f5025e12a9a08916a114c2879b54a5a4e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c577eeb386832c3857c3c5d38be864fa706f0028912740b0ddb99f464141ed37 +size 8575 diff --git a/data/2025/2503_13xxx/2503.13695/images/829637f19e543333f93c6632ec44ddd9bda78de8bfca3bc1bff0bc0e7512c61c.jpg b/data/2025/2503_13xxx/2503.13695/images/829637f19e543333f93c6632ec44ddd9bda78de8bfca3bc1bff0bc0e7512c61c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c80357e27731b76d2de57b140547cd09f1ce1a6c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/829637f19e543333f93c6632ec44ddd9bda78de8bfca3bc1bff0bc0e7512c61c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d16b881b3209b64601030b23af374edddd152786b2afb22a48ca95fdbeede4cd +size 17943 diff --git a/data/2025/2503_13xxx/2503.13695/images/836bc2f95d5c77926d61af7c14d4b1113300a5ed667f5ba49106606f9e5c07d1.jpg b/data/2025/2503_13xxx/2503.13695/images/836bc2f95d5c77926d61af7c14d4b1113300a5ed667f5ba49106606f9e5c07d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4fa060a662a2aeb2321e805eaa17eedc9b94238 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/836bc2f95d5c77926d61af7c14d4b1113300a5ed667f5ba49106606f9e5c07d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27c69689f168dda0247f423b17d305955cd9b6e20919629a138d56747500eebd +size 29702 diff --git a/data/2025/2503_13xxx/2503.13695/images/86cb337727b5fed499f55faf0f9b8ba65d56df1ad1aefcc124e2d92f7f7ac2c2.jpg b/data/2025/2503_13xxx/2503.13695/images/86cb337727b5fed499f55faf0f9b8ba65d56df1ad1aefcc124e2d92f7f7ac2c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2174c28571b1a01c2d3f0b00b0aa48aa428b3560 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/86cb337727b5fed499f55faf0f9b8ba65d56df1ad1aefcc124e2d92f7f7ac2c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a4308d4470fdc51bf8bb782d9f25038e40d6f5ff15ce90e71344f1a52f71b0b +size 20965 diff --git a/data/2025/2503_13xxx/2503.13695/images/8b521be2ad6db402353a683c8e72a14a74947a986a7ba99f5da56787754d3d44.jpg b/data/2025/2503_13xxx/2503.13695/images/8b521be2ad6db402353a683c8e72a14a74947a986a7ba99f5da56787754d3d44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4eedf406f155dc604b807ac255c92872e9df353 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/8b521be2ad6db402353a683c8e72a14a74947a986a7ba99f5da56787754d3d44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b369c4d25af81850eb6c29e448b53dfe185c522771eb4874349cd723d344a6c +size 21120 diff --git a/data/2025/2503_13xxx/2503.13695/images/8d4ce1826ae5537dfd8426399bd05bca2a0010646e57a820e373e7035e1a5285.jpg b/data/2025/2503_13xxx/2503.13695/images/8d4ce1826ae5537dfd8426399bd05bca2a0010646e57a820e373e7035e1a5285.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7dcb79e6e329557f8eb9feeb93d68a959686e60e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/8d4ce1826ae5537dfd8426399bd05bca2a0010646e57a820e373e7035e1a5285.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed5f1a85ff93355074f8e8c5e45c8e3dcc58f35a2f7855bad2d2f02680baa703 +size 15442 diff --git a/data/2025/2503_13xxx/2503.13695/images/8f424c4056077457e9ba4f82ff71b2dceebe8e63582d2e9e2910553e7c5a91a2.jpg b/data/2025/2503_13xxx/2503.13695/images/8f424c4056077457e9ba4f82ff71b2dceebe8e63582d2e9e2910553e7c5a91a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd09d88f700bebe94d3143d8bfe0423687c1d6ab --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/8f424c4056077457e9ba4f82ff71b2dceebe8e63582d2e9e2910553e7c5a91a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d75faac5a23ba0fa4922b5aa26e254447984036b6d0a28a8a15c4ebdffc8e6b +size 7503 diff --git a/data/2025/2503_13xxx/2503.13695/images/90f68fd7a6962a77f49e5e72d7250151fc08ad8098feb7692cd4ae7c3d8d8efd.jpg b/data/2025/2503_13xxx/2503.13695/images/90f68fd7a6962a77f49e5e72d7250151fc08ad8098feb7692cd4ae7c3d8d8efd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25a140f9466dc1c48cc02a89f2c84c5c3572025c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/90f68fd7a6962a77f49e5e72d7250151fc08ad8098feb7692cd4ae7c3d8d8efd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a96328cb8e86ebc822d5d39985eca93d6e0e5a94df2a0f2cc1ab27fd75672b39 +size 201911 diff --git a/data/2025/2503_13xxx/2503.13695/images/96e9fe952ba04a71427afb1dec43bde98adb15cd8c90862e8d75dc70147f8e70.jpg b/data/2025/2503_13xxx/2503.13695/images/96e9fe952ba04a71427afb1dec43bde98adb15cd8c90862e8d75dc70147f8e70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8eedf8ebb21927619b153687483a93c650d18b05 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/96e9fe952ba04a71427afb1dec43bde98adb15cd8c90862e8d75dc70147f8e70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4152efdc0ce2790bc90fe42469415a983e15085d9ccb8ca1a07d4dac29edd5ee +size 5931 diff --git a/data/2025/2503_13xxx/2503.13695/images/9baf9f6b18eb233208ff948902b5fec1c3ee7677059013bbd5eea3505b509fce.jpg b/data/2025/2503_13xxx/2503.13695/images/9baf9f6b18eb233208ff948902b5fec1c3ee7677059013bbd5eea3505b509fce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..303f1a34753a35e444115cba598047b91dee326c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/9baf9f6b18eb233208ff948902b5fec1c3ee7677059013bbd5eea3505b509fce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fda148b5951ce39b3c6d24e62093b36e080295e6b2a3463a7a817dafeba4fd1 +size 8056 diff --git a/data/2025/2503_13xxx/2503.13695/images/9c5bc0ee49ccb11782a786fdf420a16404e092d08ea9b8b167f0def97ae4f912.jpg b/data/2025/2503_13xxx/2503.13695/images/9c5bc0ee49ccb11782a786fdf420a16404e092d08ea9b8b167f0def97ae4f912.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3323924a03d56bd5841442adc3ce317c5aa64563 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/9c5bc0ee49ccb11782a786fdf420a16404e092d08ea9b8b167f0def97ae4f912.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4962c6bb2a21243f2dc45ce30bac7e0c9fcb907fb0663a280461d5a6049c99d3 +size 5661 diff --git a/data/2025/2503_13xxx/2503.13695/images/9dd2b8fb0cc2df44a4b3dffb169d3d12295ff795a4bc3448676f90a768ee5d25.jpg b/data/2025/2503_13xxx/2503.13695/images/9dd2b8fb0cc2df44a4b3dffb169d3d12295ff795a4bc3448676f90a768ee5d25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7fb8ae1368b07b0876110faf6b94cec9ae548f3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/9dd2b8fb0cc2df44a4b3dffb169d3d12295ff795a4bc3448676f90a768ee5d25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22b0a7f92826978156efe0c0513e074afc381fbf31704a38d31114717514a6ec +size 7294 diff --git a/data/2025/2503_13xxx/2503.13695/images/a0784824075ff041ec54bce51f4a9f920414699bc99c6bfb71782ab60b664316.jpg b/data/2025/2503_13xxx/2503.13695/images/a0784824075ff041ec54bce51f4a9f920414699bc99c6bfb71782ab60b664316.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bcd604a58d0613083c10346eae58e1255963f69 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/a0784824075ff041ec54bce51f4a9f920414699bc99c6bfb71782ab60b664316.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11c18e0e149335fbac14e92683072347914eb30a60cbf2c180447db1e56133a9 +size 56542 diff --git a/data/2025/2503_13xxx/2503.13695/images/a2d0d2a7c6fc7b04c17becb19f33300d8aa8cf147d4405358f82dc9231fef45b.jpg b/data/2025/2503_13xxx/2503.13695/images/a2d0d2a7c6fc7b04c17becb19f33300d8aa8cf147d4405358f82dc9231fef45b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9d379b65801938be2701665b624535cd27d876c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/a2d0d2a7c6fc7b04c17becb19f33300d8aa8cf147d4405358f82dc9231fef45b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baf3dc242c2456e0b559f457490c8ec6ad6da3d5a22304d8fd7c6d99ea476c1d +size 15406 diff --git a/data/2025/2503_13xxx/2503.13695/images/a3920278fe9862529af92facf4befe880ca3cdb6ffd2727b6fd4c8c5dd3e3403.jpg b/data/2025/2503_13xxx/2503.13695/images/a3920278fe9862529af92facf4befe880ca3cdb6ffd2727b6fd4c8c5dd3e3403.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f70d2f5030090f8d0e4d862c99b45043a3f6a314 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/a3920278fe9862529af92facf4befe880ca3cdb6ffd2727b6fd4c8c5dd3e3403.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a84860b56365690bdd18d6fa9f23785d2b2e76906e4709dde832c5caad2b6c1a +size 13182 diff --git a/data/2025/2503_13xxx/2503.13695/images/a4e1631501950d91264465bc2f7c1b6b7fe7a0c7b014189fb7a3c98d91f992aa.jpg b/data/2025/2503_13xxx/2503.13695/images/a4e1631501950d91264465bc2f7c1b6b7fe7a0c7b014189fb7a3c98d91f992aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..310ce33a15b590cb3be7e7ef42bf5503d4846170 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/a4e1631501950d91264465bc2f7c1b6b7fe7a0c7b014189fb7a3c98d91f992aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ecbcab7363e69f485d9ece68c01aeafbd764ea8b0928a735a31a9198bb17e91 +size 7177 diff --git a/data/2025/2503_13xxx/2503.13695/images/a758c825f19ab19d8b155d31e8b085f5d408ecd312bcde014754a5ce38a102a6.jpg b/data/2025/2503_13xxx/2503.13695/images/a758c825f19ab19d8b155d31e8b085f5d408ecd312bcde014754a5ce38a102a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d1b52d442dfbab248e6348b32c0df78dead68b4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/a758c825f19ab19d8b155d31e8b085f5d408ecd312bcde014754a5ce38a102a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41fd0cc9d6613498e64a868632321b3e2f97f3d72b90da773ab41801c0e751f2 +size 4320 diff --git a/data/2025/2503_13xxx/2503.13695/images/adcc8ac420e67795e1500fd44cec9e0c040709c3a1c3f3f3c6afdf73a317f59a.jpg b/data/2025/2503_13xxx/2503.13695/images/adcc8ac420e67795e1500fd44cec9e0c040709c3a1c3f3f3c6afdf73a317f59a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ae234142865454c09ec009d320eddaf5cff8cc2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/adcc8ac420e67795e1500fd44cec9e0c040709c3a1c3f3f3c6afdf73a317f59a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7b04fad48d9d9ffb8ac6e61149983cfd16b211dcddaec3eeb231dd814feab32 +size 26548 diff --git a/data/2025/2503_13xxx/2503.13695/images/b20b61142a3bce86ad36ea7d9f2a959ee5be79e20456608ea2474ed5fc37e15f.jpg b/data/2025/2503_13xxx/2503.13695/images/b20b61142a3bce86ad36ea7d9f2a959ee5be79e20456608ea2474ed5fc37e15f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14520b53de7cdaa489742c2281df8402ae95e499 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/b20b61142a3bce86ad36ea7d9f2a959ee5be79e20456608ea2474ed5fc37e15f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20c8d9a93ef3dc5fa828a43aa42a375658963d8814e807c6e0b0d1a1ac04553c +size 9819 diff --git a/data/2025/2503_13xxx/2503.13695/images/b32764dafa3d505c255f1f64f1257658a5fa838bf50794dd76059aea8719ee2a.jpg b/data/2025/2503_13xxx/2503.13695/images/b32764dafa3d505c255f1f64f1257658a5fa838bf50794dd76059aea8719ee2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc21dcf44fecdac03539232dd7433657e1cfe3c7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/b32764dafa3d505c255f1f64f1257658a5fa838bf50794dd76059aea8719ee2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:271d6087d930cc521684a965bb9a40040deca1efcfd47b672568d9692c361126 +size 59559 diff --git a/data/2025/2503_13xxx/2503.13695/images/b350e1bc05bbca85614b53e41a0cf557286e86ef6d847dfe3ef52f18dbbc9fbe.jpg b/data/2025/2503_13xxx/2503.13695/images/b350e1bc05bbca85614b53e41a0cf557286e86ef6d847dfe3ef52f18dbbc9fbe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..333113cab0a78439e258c12682bc5a4a02a03a12 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/b350e1bc05bbca85614b53e41a0cf557286e86ef6d847dfe3ef52f18dbbc9fbe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf84245f13f969826ab297db055fddea431c19d6cf2aa7e10ceb96b739a4424a +size 5432 diff --git a/data/2025/2503_13xxx/2503.13695/images/b5e173640877cb0cef234d0b4ae167124b4fc270eef3d2f99ffc99eb772ed691.jpg b/data/2025/2503_13xxx/2503.13695/images/b5e173640877cb0cef234d0b4ae167124b4fc270eef3d2f99ffc99eb772ed691.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd206fb2852bd8901565f4b55d5b07a6d6d57ece --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/b5e173640877cb0cef234d0b4ae167124b4fc270eef3d2f99ffc99eb772ed691.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3638fbe818c1becedc4ffb83573196386ae171fc7c79337d8d5c2ebf545495d3 +size 7595 diff --git a/data/2025/2503_13xxx/2503.13695/images/b6f0c5dd6c47a2958767bffd09c994b0edb2e6ba4a6e8bf3fdcb4e17cb43447e.jpg b/data/2025/2503_13xxx/2503.13695/images/b6f0c5dd6c47a2958767bffd09c994b0edb2e6ba4a6e8bf3fdcb4e17cb43447e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d8d5054346c5b099433b03dc655f34755490ada --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/b6f0c5dd6c47a2958767bffd09c994b0edb2e6ba4a6e8bf3fdcb4e17cb43447e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92dc961610b33097b4f2bf116ec188e63a61e1d789fcae63a8aeef664e175499 +size 10737 diff --git a/data/2025/2503_13xxx/2503.13695/images/b8520abce2c4b102c3587cdadf9a75faceafd934ce44f67cb0d3a2e2c69cb107.jpg b/data/2025/2503_13xxx/2503.13695/images/b8520abce2c4b102c3587cdadf9a75faceafd934ce44f67cb0d3a2e2c69cb107.jpg new file mode 100644 index 0000000000000000000000000000000000000000..181453f6014c7d9a1680c433c7e7fd7b5762eb12 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/b8520abce2c4b102c3587cdadf9a75faceafd934ce44f67cb0d3a2e2c69cb107.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58dae0deb5ec6b2fd88276fe1d8119ca3fa61272be61f2c238186a597f91bc93 +size 9783 diff --git a/data/2025/2503_13xxx/2503.13695/images/b862fe7585e9b671c2dd19d6695b0f98522b4600cbfca9788e317af5b5c1ff45.jpg b/data/2025/2503_13xxx/2503.13695/images/b862fe7585e9b671c2dd19d6695b0f98522b4600cbfca9788e317af5b5c1ff45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4e07d4fa1e1206180d386980474fa1932ceab8a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/b862fe7585e9b671c2dd19d6695b0f98522b4600cbfca9788e317af5b5c1ff45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd9b9c9da72a03129460e81d0f827a6a2a7a32190775b0215c8e798a37600acf +size 4188 diff --git a/data/2025/2503_13xxx/2503.13695/images/ba25d5e98b4b75fd94526f7be742d92551619db9606fb8a8bb8ed6f2f35060f3.jpg b/data/2025/2503_13xxx/2503.13695/images/ba25d5e98b4b75fd94526f7be742d92551619db9606fb8a8bb8ed6f2f35060f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40f2defb3d57f44089a57339fb22a304e750d4b6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/ba25d5e98b4b75fd94526f7be742d92551619db9606fb8a8bb8ed6f2f35060f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08f93a74a17b840bd836d20a8ea40df39697cd64f71fdd0167937d655d0eaacf +size 8363 diff --git a/data/2025/2503_13xxx/2503.13695/images/bd897e21140790da8c2beed7842475c14e96f323cdd2ec392c15e182b1ce4a42.jpg b/data/2025/2503_13xxx/2503.13695/images/bd897e21140790da8c2beed7842475c14e96f323cdd2ec392c15e182b1ce4a42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95b24a4a177cf7875750bcfb5d014315e746af80 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/bd897e21140790da8c2beed7842475c14e96f323cdd2ec392c15e182b1ce4a42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fec02146ed0b5725529a3bd3904b1362dce445a329e19b45b3d41a1cb2ad3c92 +size 5782 diff --git a/data/2025/2503_13xxx/2503.13695/images/bde31bbafc0dd86d2cacce81eadabd84b6642b05c7685ef1db226bcddcc21013.jpg b/data/2025/2503_13xxx/2503.13695/images/bde31bbafc0dd86d2cacce81eadabd84b6642b05c7685ef1db226bcddcc21013.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb49aa038957bc2d39a79a98b0e3d40c46fb4fce --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/bde31bbafc0dd86d2cacce81eadabd84b6642b05c7685ef1db226bcddcc21013.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2864ac2da1bffc324f91afdfffe53f1259016f45abd7ec39e655fc895994ceb +size 16412 diff --git a/data/2025/2503_13xxx/2503.13695/images/bf303478b135b4702e050d5bc329eafb8ed3bee1cea9c893e56c2e3acfcdfe30.jpg b/data/2025/2503_13xxx/2503.13695/images/bf303478b135b4702e050d5bc329eafb8ed3bee1cea9c893e56c2e3acfcdfe30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f289f5bb8a3cec7a1d1d047e35b175d801b485a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/bf303478b135b4702e050d5bc329eafb8ed3bee1cea9c893e56c2e3acfcdfe30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c73466613884cb0c18099ac56e90ad7a7a0b501144b64135965f0392b2827f1d +size 4823 diff --git a/data/2025/2503_13xxx/2503.13695/images/bf8c0afdc7ba0107544fd83710ce308cb2759d8c05cdf5f7306cec86bee95cb2.jpg b/data/2025/2503_13xxx/2503.13695/images/bf8c0afdc7ba0107544fd83710ce308cb2759d8c05cdf5f7306cec86bee95cb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97d50d539a72b9212d41fb46760675438b1fca7f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/bf8c0afdc7ba0107544fd83710ce308cb2759d8c05cdf5f7306cec86bee95cb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97230903628e7b0685c39f12b5681c40d6afd72d0715bddfbeab13337c3d7cf4 +size 95848 diff --git a/data/2025/2503_13xxx/2503.13695/images/bfd8e918449ad4e2c5b9c9505fa08483327fc17c132d390ea6522a9ebfd8e7c7.jpg b/data/2025/2503_13xxx/2503.13695/images/bfd8e918449ad4e2c5b9c9505fa08483327fc17c132d390ea6522a9ebfd8e7c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc33302815395340b0a2d5705c818d856c65bb91 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/bfd8e918449ad4e2c5b9c9505fa08483327fc17c132d390ea6522a9ebfd8e7c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b24ba34aad48bb27a1073b6b837c57f3f6ab768350d57e095b79f87f12bc9091 +size 10031 diff --git a/data/2025/2503_13xxx/2503.13695/images/c33f2ead32bb0936798071bc0c00592261094bdf3e45f08d526a0fbf6f153427.jpg b/data/2025/2503_13xxx/2503.13695/images/c33f2ead32bb0936798071bc0c00592261094bdf3e45f08d526a0fbf6f153427.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b0e791e585a376dfda0d50c4921134411032450 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/c33f2ead32bb0936798071bc0c00592261094bdf3e45f08d526a0fbf6f153427.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb1d788e81621ec680f4f0240c635c3eb7e4a98d2d746bf51c7a53814610c676 +size 4288 diff --git a/data/2025/2503_13xxx/2503.13695/images/c56982feee0b6b0776494c62e84144cadf9d1c1c5ea6d8927a0b1794c6c97899.jpg b/data/2025/2503_13xxx/2503.13695/images/c56982feee0b6b0776494c62e84144cadf9d1c1c5ea6d8927a0b1794c6c97899.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c98b05c13d156fb99c66be6d787a60c60722a15f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/c56982feee0b6b0776494c62e84144cadf9d1c1c5ea6d8927a0b1794c6c97899.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5025a4ba8c865e82f9c8b9b34dfab2f4113457989a1281385ab5c6a68ae16de4 +size 4405 diff --git a/data/2025/2503_13xxx/2503.13695/images/c61ee8c84c709ba417474b42e09eec116bd2299f768602183e70caadd447b32c.jpg b/data/2025/2503_13xxx/2503.13695/images/c61ee8c84c709ba417474b42e09eec116bd2299f768602183e70caadd447b32c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc97ac81f26db0003e47d1d3445b24e9df2b47ed --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/c61ee8c84c709ba417474b42e09eec116bd2299f768602183e70caadd447b32c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abb28d7a7f4959e560728604abf2ad7ae2a9c5e32d1b9633e336704b1f62c67d +size 27312 diff --git a/data/2025/2503_13xxx/2503.13695/images/c875edc340d8e0893926ec11727820b8909097c64ebe04e21e7e0b12c6ba14a6.jpg b/data/2025/2503_13xxx/2503.13695/images/c875edc340d8e0893926ec11727820b8909097c64ebe04e21e7e0b12c6ba14a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05bb07ca979de8733e6e11484b9fa7ff59e89988 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/c875edc340d8e0893926ec11727820b8909097c64ebe04e21e7e0b12c6ba14a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06e3d66fcf19c4d9fe208b91002350e8af4c064a341315a327d7c9f651746b23 +size 113665 diff --git a/data/2025/2503_13xxx/2503.13695/images/cb76dc3b8182510948260c393fd0b1837e9635110ffd915e5f5771e0712f0739.jpg b/data/2025/2503_13xxx/2503.13695/images/cb76dc3b8182510948260c393fd0b1837e9635110ffd915e5f5771e0712f0739.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4770fa6a3ca46616df4f9e09bb61fc046d986aa --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/cb76dc3b8182510948260c393fd0b1837e9635110ffd915e5f5771e0712f0739.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10a03812835d03d35c648ff56fa10f1f2b963776c99b3d1774cbf9e81856fb8a +size 38637 diff --git a/data/2025/2503_13xxx/2503.13695/images/cd6f9024ad18056f68340ec20642f220bc4cfe1c04907395459421d74c229cb8.jpg b/data/2025/2503_13xxx/2503.13695/images/cd6f9024ad18056f68340ec20642f220bc4cfe1c04907395459421d74c229cb8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d233dbb01226606d413de58d3c6653f3fc59458a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/cd6f9024ad18056f68340ec20642f220bc4cfe1c04907395459421d74c229cb8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e6776f674254358ce874c9f86b1d4556bec1aeae319d43a5811a1d3aa263979 +size 5569 diff --git a/data/2025/2503_13xxx/2503.13695/images/cd911c74dd595db620775568094d4217a9e0788b4a95e0be4c48f5928145f1f8.jpg b/data/2025/2503_13xxx/2503.13695/images/cd911c74dd595db620775568094d4217a9e0788b4a95e0be4c48f5928145f1f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04f6efaa970dbdcb6003bdebab6f03e8d0a2a48d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/cd911c74dd595db620775568094d4217a9e0788b4a95e0be4c48f5928145f1f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:228adb8500325abb25be1750a143658e83c001e2dff5a1bdb568323d139fc40e +size 10439 diff --git a/data/2025/2503_13xxx/2503.13695/images/ce9d9915b51cc6b01cef8d4fd9ad5004076ee8c64d31f8dc18cd381d62f2850c.jpg b/data/2025/2503_13xxx/2503.13695/images/ce9d9915b51cc6b01cef8d4fd9ad5004076ee8c64d31f8dc18cd381d62f2850c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48f9aa64276c5c84888497f0325d56a01dd09467 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/ce9d9915b51cc6b01cef8d4fd9ad5004076ee8c64d31f8dc18cd381d62f2850c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0478a51162f141c1cf907729d6f1f7fc0e986ce057a2353e6394a2c86fc0aa6f +size 19472 diff --git a/data/2025/2503_13xxx/2503.13695/images/cfcb1f30c856f0902c80a6ca75eb09c6526ab9c4fc6d870998c725e3266546de.jpg b/data/2025/2503_13xxx/2503.13695/images/cfcb1f30c856f0902c80a6ca75eb09c6526ab9c4fc6d870998c725e3266546de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..762bc6f8666a81bd421c6e658dc486f48faea45c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/cfcb1f30c856f0902c80a6ca75eb09c6526ab9c4fc6d870998c725e3266546de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a9fa93c67ff97e1fca7095282f34c839066eabed00d56b3ee5f5eb6c26d4745 +size 10512 diff --git a/data/2025/2503_13xxx/2503.13695/images/d3c67e83e17b5fd46d7760afc8200d37ecb7166597966030724c7e43b7db60ed.jpg b/data/2025/2503_13xxx/2503.13695/images/d3c67e83e17b5fd46d7760afc8200d37ecb7166597966030724c7e43b7db60ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4574222901fe20054dc633c9df4f34fbc7f92fce --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/d3c67e83e17b5fd46d7760afc8200d37ecb7166597966030724c7e43b7db60ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cf8f5afd729377ae258fcb0f5ba21f0d052e7fdc17056fd9cc8258d453988c0 +size 5655 diff --git a/data/2025/2503_13xxx/2503.13695/images/d434f1115c71df8fc03f6ff24ee680bd8baf845b45ea59fc5cacb1faa46bfbf1.jpg b/data/2025/2503_13xxx/2503.13695/images/d434f1115c71df8fc03f6ff24ee680bd8baf845b45ea59fc5cacb1faa46bfbf1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9881689386a27146694ca10a527a7e1ae2163ed5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/d434f1115c71df8fc03f6ff24ee680bd8baf845b45ea59fc5cacb1faa46bfbf1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f797340fa27754307518adbc1f01b958d2165b541c00b3a571e9533fd75fa725 +size 8288 diff --git a/data/2025/2503_13xxx/2503.13695/images/d5559774d6cdd782902e47caa66d146219b8e97b2e6458c25af5364ff727b183.jpg b/data/2025/2503_13xxx/2503.13695/images/d5559774d6cdd782902e47caa66d146219b8e97b2e6458c25af5364ff727b183.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27304db502dfb2b7782bf776452f29ffdebd5108 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/d5559774d6cdd782902e47caa66d146219b8e97b2e6458c25af5364ff727b183.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d540cc9b5071eb3031c290f5125de7e4df7dc94c38592e0ac56dff9c9653e2f +size 8199 diff --git a/data/2025/2503_13xxx/2503.13695/images/dda5aca73cc52ff6e026a688772971bb0db28707f5dbb8771a941a8018016d77.jpg b/data/2025/2503_13xxx/2503.13695/images/dda5aca73cc52ff6e026a688772971bb0db28707f5dbb8771a941a8018016d77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2774078917c3eb45d63138f1b38cdb76c06dde2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/dda5aca73cc52ff6e026a688772971bb0db28707f5dbb8771a941a8018016d77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b244a47e1c2106a5efc5f96e4b3b70fc47a70cb7e926368ebdbb2f8bb03e474 +size 29365 diff --git a/data/2025/2503_13xxx/2503.13695/images/dfc42718a5b15fa76af837f1cf09d44379ea5e84068c7dfccc29da3080f16166.jpg b/data/2025/2503_13xxx/2503.13695/images/dfc42718a5b15fa76af837f1cf09d44379ea5e84068c7dfccc29da3080f16166.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2e8840ec1742928987e0cb469e0a24a0ecf19a3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/dfc42718a5b15fa76af837f1cf09d44379ea5e84068c7dfccc29da3080f16166.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24295aea129b8476714cc3ae2595506fa5b5a8cbe4ffd26fca40923341477f71 +size 51753 diff --git a/data/2025/2503_13xxx/2503.13695/images/e6da86530a472579fba0dd641c2521881c22ad246da13baba0f7a778d0d158dd.jpg b/data/2025/2503_13xxx/2503.13695/images/e6da86530a472579fba0dd641c2521881c22ad246da13baba0f7a778d0d158dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a67f1459b4672cb0fb0ff80c9f0488959ce7403 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/e6da86530a472579fba0dd641c2521881c22ad246da13baba0f7a778d0d158dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e6c56e90a813bb370ba0061155a1cd820fda6f9553093f37ca39e8e2eac3ff3 +size 17629 diff --git a/data/2025/2503_13xxx/2503.13695/images/e7f016f46bca5309b8f0172ae1f4e4919c83884da81073d94df649bce739af4e.jpg b/data/2025/2503_13xxx/2503.13695/images/e7f016f46bca5309b8f0172ae1f4e4919c83884da81073d94df649bce739af4e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..302077899e88cec4c3c411c2ceba3e28c6095fe1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/e7f016f46bca5309b8f0172ae1f4e4919c83884da81073d94df649bce739af4e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41a8358555090923d73875e64aa500b2aea286be13d03150c31ad53c290ed6fe +size 7923 diff --git a/data/2025/2503_13xxx/2503.13695/images/e9182f9b9d24aea5d10f9e8d93f5432f03b021167555925e5f3ae7c3a8199e16.jpg b/data/2025/2503_13xxx/2503.13695/images/e9182f9b9d24aea5d10f9e8d93f5432f03b021167555925e5f3ae7c3a8199e16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..970a5611c2c0d7d7a07c1569f34c10849697d170 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/e9182f9b9d24aea5d10f9e8d93f5432f03b021167555925e5f3ae7c3a8199e16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:570eed5edc9dc6975da0fa5a446eb557f9b1582360eaf5251f81cdb95051a116 +size 9756 diff --git a/data/2025/2503_13xxx/2503.13695/images/eac42b74e203c3984876f446ef93ec5dd5fca48a451c96c024fa9dd7291c3fc6.jpg b/data/2025/2503_13xxx/2503.13695/images/eac42b74e203c3984876f446ef93ec5dd5fca48a451c96c024fa9dd7291c3fc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62a8769e29646cd8aaad4926ef1cdc72eac150b0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/eac42b74e203c3984876f446ef93ec5dd5fca48a451c96c024fa9dd7291c3fc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45f21abc1167b89b52a025a89efb21af682e73bc05cac6ca2b3419c7f6443b45 +size 32601 diff --git a/data/2025/2503_13xxx/2503.13695/images/eb30a68d14feeeec7115a4f0bfd522c57e7ac27be8ba6f04103ccf160cddcb2e.jpg b/data/2025/2503_13xxx/2503.13695/images/eb30a68d14feeeec7115a4f0bfd522c57e7ac27be8ba6f04103ccf160cddcb2e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2df31e8c163d233198fd35380ada1e4999605197 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/eb30a68d14feeeec7115a4f0bfd522c57e7ac27be8ba6f04103ccf160cddcb2e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b58a6d71345b1f8505126bf6f5b06066290278befd16be635f657d13dffbc1fd +size 5712 diff --git a/data/2025/2503_13xxx/2503.13695/images/f0284ad1958098bbf58a3a6ea0491e38ff5e9e5a2a1d05a08a833dda237df2d2.jpg b/data/2025/2503_13xxx/2503.13695/images/f0284ad1958098bbf58a3a6ea0491e38ff5e9e5a2a1d05a08a833dda237df2d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5b2b1467f71a87aa00d6af6410200124c62f33f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f0284ad1958098bbf58a3a6ea0491e38ff5e9e5a2a1d05a08a833dda237df2d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed9691c38c78b1d0c285c287975281ecc74084bef23188fa7c71dc9eaf78b5cb +size 8376 diff --git a/data/2025/2503_13xxx/2503.13695/images/f25f4d232a20466bc9713e2e41834bd05ae965269973355ed118407fdbc425cf.jpg b/data/2025/2503_13xxx/2503.13695/images/f25f4d232a20466bc9713e2e41834bd05ae965269973355ed118407fdbc425cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb29e48c585fb87f364c0bb4f76597b84f4298af --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f25f4d232a20466bc9713e2e41834bd05ae965269973355ed118407fdbc425cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5751d244d2094a2d5d3b7d90a77432c237aa293ebb463fb6d3e940f931cdb5d4 +size 8149 diff --git a/data/2025/2503_13xxx/2503.13695/images/f2e000dac344778458cfa4d7d28377618422c7e3854b30636a302d1144e056ab.jpg b/data/2025/2503_13xxx/2503.13695/images/f2e000dac344778458cfa4d7d28377618422c7e3854b30636a302d1144e056ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db9b5486fc2e7b90f9c7578c5840b6e7a2e36612 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f2e000dac344778458cfa4d7d28377618422c7e3854b30636a302d1144e056ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edad42e17c1c84bf6e698d9e819176dd5d840c38981e47d5726465a70a9688f7 +size 45341 diff --git a/data/2025/2503_13xxx/2503.13695/images/f33a31413ad2eaeeede8babb667db0cc68ed58e6ff4dc30157c96c10ab1893ed.jpg b/data/2025/2503_13xxx/2503.13695/images/f33a31413ad2eaeeede8babb667db0cc68ed58e6ff4dc30157c96c10ab1893ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ddd6bd7c55c2f12281827ca1149590ad78b01197 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f33a31413ad2eaeeede8babb667db0cc68ed58e6ff4dc30157c96c10ab1893ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d23c4bd1b8603fe5931aeebca773e23eb5349ed051cee0f43ea383df6452859d +size 5653 diff --git a/data/2025/2503_13xxx/2503.13695/images/f37a91c2b7d01648a6b5163c046b04d6f2f03461a786f529a25ee22873ad0693.jpg b/data/2025/2503_13xxx/2503.13695/images/f37a91c2b7d01648a6b5163c046b04d6f2f03461a786f529a25ee22873ad0693.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf6ef7e1242a2c26f1684be10776635c10b3c46d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f37a91c2b7d01648a6b5163c046b04d6f2f03461a786f529a25ee22873ad0693.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:364a7766b31ee6137c459363ec8f47c9fdd9eda903c958a653ac9c73f14b5367 +size 58398 diff --git a/data/2025/2503_13xxx/2503.13695/images/f60206ede39643fc5c044f9f4610b8380888747ad4c4e5778e0eb56aee2fce8c.jpg b/data/2025/2503_13xxx/2503.13695/images/f60206ede39643fc5c044f9f4610b8380888747ad4c4e5778e0eb56aee2fce8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f00a9d01ce40ebe7ae62a6630ded656da157f53d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f60206ede39643fc5c044f9f4610b8380888747ad4c4e5778e0eb56aee2fce8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81703eddc9565d978c6744854072dddd94a097483f5994a3867f4eb0354461de +size 7330 diff --git a/data/2025/2503_13xxx/2503.13695/images/f761d30cc71f1613eca2ac1939256b2e144c0b60cd211db5ea28ffe6a928d863.jpg b/data/2025/2503_13xxx/2503.13695/images/f761d30cc71f1613eca2ac1939256b2e144c0b60cd211db5ea28ffe6a928d863.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53b3856db71738060e88f8888c266d521d25615d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f761d30cc71f1613eca2ac1939256b2e144c0b60cd211db5ea28ffe6a928d863.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5eabebd08c1ea4e83a37b7296bb53adb1a901ac94f70f002f3bdbedcea46075 +size 7605 diff --git a/data/2025/2503_13xxx/2503.13695/images/f77d7cde018d6c74bb0d627654ab043227cdb2f775c121acb59c5727d34539a7.jpg b/data/2025/2503_13xxx/2503.13695/images/f77d7cde018d6c74bb0d627654ab043227cdb2f775c121acb59c5727d34539a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed4c1d20f98a5fe32bcdd659cb75cf18c1e11530 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f77d7cde018d6c74bb0d627654ab043227cdb2f775c121acb59c5727d34539a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e462a2dcfe86c4a7be78ce56c5ff2330c7176a16b8dfd480fbc330657da1b549 +size 8145 diff --git a/data/2025/2503_13xxx/2503.13695/images/f7993c093c7192f5cb3cbfb825bab433b9db09b4062eae2dea9b86fd1e4c6d5b.jpg b/data/2025/2503_13xxx/2503.13695/images/f7993c093c7192f5cb3cbfb825bab433b9db09b4062eae2dea9b86fd1e4c6d5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d9d2e9e308543cce0b1b192351523225dbc9d4b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f7993c093c7192f5cb3cbfb825bab433b9db09b4062eae2dea9b86fd1e4c6d5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c24868c00f636e943c5f253f862797aea30cfc7ff594731eeb10a3e6e248e3d8 +size 164046 diff --git a/data/2025/2503_13xxx/2503.13695/images/f986092ac9f6ad1661f7ee50cf594bcb849746ce965b8ad4798f4b49976d2df6.jpg b/data/2025/2503_13xxx/2503.13695/images/f986092ac9f6ad1661f7ee50cf594bcb849746ce965b8ad4798f4b49976d2df6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0af7f5f3ad9748beaa00c68738f7b6d5eaa61e3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f986092ac9f6ad1661f7ee50cf594bcb849746ce965b8ad4798f4b49976d2df6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08ff4bec838a37c50c073aae895ae10a2dc32561fef4a5e2a83dc16f7fc7569d +size 7718 diff --git a/data/2025/2503_13xxx/2503.13695/images/f9f5641b6c82f11306de0d3689768b2b13e8457b6f1cd3662c1b66e60e3ed261.jpg b/data/2025/2503_13xxx/2503.13695/images/f9f5641b6c82f11306de0d3689768b2b13e8457b6f1cd3662c1b66e60e3ed261.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83f1871368c6aaf9b39bcadd8a2ec1cbe2982b33 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/f9f5641b6c82f11306de0d3689768b2b13e8457b6f1cd3662c1b66e60e3ed261.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d2dbc7f4072473852d7378d20aa3d020d9888c783e254dba8a17e13f0ae7815 +size 5592 diff --git a/data/2025/2503_13xxx/2503.13695/images/faa78b851598a3a8e280e4c419776b1b8ff245583ac31e14424ca9a458edbe62.jpg b/data/2025/2503_13xxx/2503.13695/images/faa78b851598a3a8e280e4c419776b1b8ff245583ac31e14424ca9a458edbe62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6cd6f6f1bf8f680050022f3cfa6cced6e8791e9b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/faa78b851598a3a8e280e4c419776b1b8ff245583ac31e14424ca9a458edbe62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:526fa7dc0aed440cc06dfe6725a9ebb9cc6c38c2bb97e8f604a5999bb88fd1fb +size 195826 diff --git a/data/2025/2503_13xxx/2503.13695/images/fb0cd2d796a34b11d747483d3064e7f18b5a3e9c93c722d66bc954154eeb9ef0.jpg b/data/2025/2503_13xxx/2503.13695/images/fb0cd2d796a34b11d747483d3064e7f18b5a3e9c93c722d66bc954154eeb9ef0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..542ae03f99eb66349af8aff5303fadd49ae094ea --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/fb0cd2d796a34b11d747483d3064e7f18b5a3e9c93c722d66bc954154eeb9ef0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acc2ff524af1f812e39ad0ecf46ea16dd900380019b21a107a18035c53d1a6b2 +size 20599 diff --git a/data/2025/2503_13xxx/2503.13695/images/fe38d8420ef1f2b61297d2ff15a9cd6a35da167a630d84b91c2c48f27b959981.jpg b/data/2025/2503_13xxx/2503.13695/images/fe38d8420ef1f2b61297d2ff15a9cd6a35da167a630d84b91c2c48f27b959981.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84ef330816da4cdbb8d9c47caa8ee55b43e9b83c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/fe38d8420ef1f2b61297d2ff15a9cd6a35da167a630d84b91c2c48f27b959981.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5deea142b290be4e0a5b38e6c73cef8ccd6114db0b608dcc1e614fab5b635ca5 +size 5019 diff --git a/data/2025/2503_13xxx/2503.13695/images/fed505b0b3fb58ffeec21fa0ee18d356e35b414f57d217bb2c2bca7bb7f7715b.jpg b/data/2025/2503_13xxx/2503.13695/images/fed505b0b3fb58ffeec21fa0ee18d356e35b414f57d217bb2c2bca7bb7f7715b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..422776fc59e1fea7f6c82a789a00398f4b8cf547 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/images/fed505b0b3fb58ffeec21fa0ee18d356e35b414f57d217bb2c2bca7bb7f7715b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b8d2b2bdcb5c9ed50fc7bc2e36245acfdc8dcc319f40b2666825b00fd287ee4 +size 8758 diff --git a/data/2025/2503_13xxx/2503.13695/layout.json b/data/2025/2503_13xxx/2503.13695/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7b718fb37c7ee5678da50e78a79ec79627a4c27a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13695/layout.json @@ -0,0 +1,20101 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 70, + 68, + 523, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 68, + 523, + 110 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 523, + 110 + ], + "type": "text", + "content": "Mitigating Spectral Bias in Neural Operators via High-Frequency Scaling for Physical Systems" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 86, + 126, + 506, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 126, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 86, + 126, + 506, + 141 + ], + "type": "text", + "content": "Siavash Khodakaramia, Vivek Oommenb, Aniruddha Boraa, George Em Karniadakisa,c,*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 150, + 461, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 150, + 461, + 162 + ], + "spans": [ + { + "bbox": [ + 130, + 150, + 461, + 162 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 130, + 150, + 461, + 162 + ], + "type": "text", + "content": "Division of Applied Mathematics, Brown University, Providence, RI, 02912, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 153, + 162, + 440, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 162, + 440, + 174 + ], + "spans": [ + { + "bbox": [ + 153, + 162, + 440, + 174 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 153, + 162, + 440, + 174 + ], + "type": "text", + "content": "School of Engineering, Brown University, Providence, RI, 02912, USA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 160, + 174, + 433, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 174, + 433, + 185 + ], + "spans": [ + { + "bbox": [ + 160, + 174, + 433, + 185 + ], + "type": "inline_equation", + "content": "^{c}" + }, + { + "bbox": [ + 160, + 174, + 433, + 185 + ], + "type": "text", + "content": "Pacific Northwest National Laboratory, Richland, WA, 99354, USA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 231, + 117, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 231, + 117, + 243 + ], + "spans": [ + { + "bbox": [ + 69, + 231, + 117, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 251, + 528, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 251, + 528, + 452 + ], + "spans": [ + { + "bbox": [ + 70, + 251, + 528, + 452 + ], + "type": "text", + "content": "Neural operators have emerged as powerful surrogates for modeling complex physical problems. However, they suffer from spectral bias making them oblivious to high-frequency modes, which are present in multiscale physical systems. Therefore, they tend to produce over-smoothed solutions, which is particularly problematic in modeling turbulence and for systems with intricate patterns and sharp gradients such as multi-phase flow systems. In this work, we introduce a new approach named high-frequency scaling (HFS) to mitigate spectral bias in convolutional-based neural operators. By integrating HFS with proper variants of UNet neural operators, we demonstrate a higher prediction accuracy by mitigating spectral bias in single and two-phase flow problems. Unlike Fourier-based techniques, HFS is directly applied to the latent space, thus eliminating the computational cost associated with the Fourier transform. Additionally, we investigate alternative spectral bias mitigation through diffusion models conditioned on neural operators. While the diffusion model integrated with the standard neural operator may still suffer from significant errors, these errors are substantially reduced when the diffusion model is integrated with a HFS-enhanced neural operator." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 459, + 490, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 459, + 490, + 487 + ], + "spans": [ + { + "bbox": [ + 69, + 459, + 490, + 487 + ], + "type": "text", + "content": "Keywords: Neural operator, Spectral Bias, Two-phase flow, Boiling, Kolmogorov flow, Diffusion model" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 523, + 153, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 523, + 153, + 537 + ], + "spans": [ + { + "bbox": [ + 69, + 523, + 153, + 537 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 546, + 526, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 546, + 526, + 676 + ], + "spans": [ + { + "bbox": [ + 70, + 546, + 526, + 676 + ], + "type": "text", + "content": "Design and control problems in engineering often require repeated simulation of the underlying physical system, necessitating the solution of governing partial differential equations (PDEs) multiple times. For a wide range of applications from fluid dynamics to material science, classical discretization-based direct numerical simulation (DNS) [1, 2, 3, 4] has been the cornerstone of scientific computing. While the methods for DNS have matured significantly over the past several decades, their computational cost becomes prohibitive when performing repeated simulations over varying parametric conditions or configurations. This challenge has fueled a growing interest in developing computationally efficient surrogate models capable of approximating these simulations at only a fraction of the cost." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 691, + 526, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 691, + 526, + 735 + ], + "spans": [ + { + "bbox": [ + 69, + 691, + 526, + 735 + ], + "type": "text", + "content": "In particular, the classical DNS can estimate the solution for a given set of conditions. If one of these conditions is modified, the solver has to be re-run, further aggravating the computational cost. To mitigate this issue, neural operators were developed to handle a plurality of" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 256, + 36, + 610 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 256, + 36, + 610 + ], + "spans": [ + { + "bbox": [ + 13, + 256, + 36, + 610 + ], + "type": "text", + "content": "arXiv:2503.13695v1 [cs.LG] 17 Mar 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 756, + 314, + 768 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 756, + 314, + 768 + ], + "spans": [ + { + "bbox": [ + 83, + 756, + 314, + 768 + ], + "type": "text", + "content": "*Corresponding Author: george_karniadakis@brown.edu" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 457, + 771, + 524, + 783 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 457, + 771, + 524, + 783 + ], + "spans": [ + { + "bbox": [ + 457, + 771, + 524, + 783 + ], + "type": "text", + "content": "March 19, 2025" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 72, + 526, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 526, + 261 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 526, + 261 + ], + "type": "text", + "content": "conditions and parametric settings [5, 6, 7, 8, 9, 10, 11]. Neural operators, which are based on the universal operator approximation theorem [12], are trained to learn the mapping between infinite-dimensional functional spaces. Although it is expensive to train such surrogates offline, a trained neural operator can efficiently estimate solutions of unseen conditions almost instantaneously during inference. Many studies have used neural operators as surrogates to learn physical problems in space and time. Various physical problems such as vortex-induced vibration [13], crack nucleation and propagation [14], Riemann problems [15], turbulence [16, 17], plasma modeling [18], and many more have been solved, at least under limited conditions, by neural operators. Furthermore, other studies [19, 20, 21] attempted to learn the temporal evolution of two-phase microstructures in diffusion-driven processes such as spinodal decomposition and dendritic growth. However, very few studies have investigated the application of neural operators for buoyancy-dominated or advection-dominated two-phase flow problems, such as those encountered in boiling and condensation [22]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 286, + 396, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 286, + 396, + 301 + ], + "spans": [ + { + "bbox": [ + 70, + 286, + 396, + 301 + ], + "type": "text", + "content": "1.1. Neural operators and applications in two-phase flow modeling" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 304, + 526, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 304, + 526, + 462 + ], + "spans": [ + { + "bbox": [ + 70, + 304, + 526, + 462 + ], + "type": "text", + "content": "Modeling and predicting two-phase flow during boiling is one of the most challenging problems in computational fluid dynamics. These phenomena involve complex interface dynamics and phase transitions, resulting in high-frequency spatio-temporal variations that are both challenging and computationally expensive to capture. Analyzing the solutions of such a system reveals a slowly decaying energy spectrum, where even the high wavenumbers carry a nontrivial amount of energy that cannot be neglected. Effective modeling of a two-phase flow system requires the neural operators to accurately predict spatio-temporal evolution of both low and high wavenumber modes. Unfortunately, neural networks and neural operators suffer from spectral bias [23, 24, 25], which makes them oblivious to high wavenumber modes. Consequently, the neural operators can only offer an over-smoothed prediction that fails to capture the intricate features near the interfaces where the sharp gradients are commonly observed." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 476, + 526, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 476, + 526, + 635 + ], + "spans": [ + { + "bbox": [ + 70, + 476, + 526, + 635 + ], + "type": "text", + "content": "Previous studies in boiling modeling with neural operators also confirm the spectral bias problem. [26] used DeepONet [5] to solve for the transient solution of a single bubble growth. Their findings demonstrate that DeepONet can effectively capture the mean component of the solution in the microscale regime, but it fails to accurately predict the stochastic fluctuations described by high-frequency components of the solution. A study by Jain et al. [27] on the prediction of multiphase flow through porous media with UNet [28] also showed that larger errors occurred near the interfaces. The Fourier neural operator (FNO) [6] also suffers from spectral bias [29]. The common practice of truncating high-frequency modes in FNOs leads to the loss of rich information, hindering the accurate modeling of chaotic systems in multi-phase heat transfer and turbulence. However, without truncation, training FNOs becomes unstable [9]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 650, + 526, + 766 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 650, + 526, + 766 + ], + "spans": [ + { + "bbox": [ + 70, + 650, + 526, + 766 + ], + "type": "text", + "content": "A recent study by Hassan et al.[30] collected a valuable boiling dataset based on Flash-X simulations [31] and developed neural operators based on different structures such as UNet, FNO, and group equivariant FNO (GFNO) for prediction in boiling problems. As shown in the results of our work, the previously best neural operator still struggles to capture high-frequency modes, which are prominently observed within the bubbles, along the interfaces, and in condensation traces in subcooled pool boiling. These over-smoothened solutions highlight the need for further advancements to mitigate spectral bias in modeling phase-change and multi-phase flow phenomena. Similarly, spectral bias of neural operators cannot be overlooked when mod-" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "spans": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 74, + 523, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 74, + 523, + 100 + ], + "spans": [ + { + "bbox": [ + 70, + 74, + 523, + 100 + ], + "type": "text", + "content": "eling other chaotic systems like turbulence [32], where small-scale, low-energy features play a crucial role." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 114, + 259, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 114, + 259, + 128 + ], + "spans": [ + { + "bbox": [ + 71, + 114, + 259, + 128 + ], + "type": "text", + "content": "1.2. Spectral bias mitigation strategies" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 131, + 527, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 131, + 527, + 303 + ], + "spans": [ + { + "bbox": [ + 70, + 131, + 527, + 303 + ], + "type": "text", + "content": "Previous studies have proposed various methods to mitigate spectral bias and over-smoothing in deep neural networks (DNNs). Cai et al. [33] proposed a multi-scale DNN (MscaleDNN) to enhance approximations over a wide range of frequencies for the solution of PDEs. Tancik et al. [34] proposed Fourier feature mapping for coordinate-based multilayer perceptron (MLP) to tackle spectral bias in image regression tasks in low dimensional domains. Wang et al. [35] used Fourier feature mapping along with Physics-informed Neural Networks (PINNs) [36, 37] to enhance the multi-scale PDE solutions by mitigating the spectral bias compared to vanilla PINN. A better optimization of activation functions have been also shown to slightly reduce spectral bias of DNNs and PINNs [38, 39]. Phase shift DNN is another method converting high-frequency component of the data into low frequency spectrum, which can be learned and represented by a DNN. Subsequently, the learned representation is converted into the original high-frequency. However, phase shift DNN suffers from the curse of dimensionality [40]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 318, + 523, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 318, + 523, + 549 + ], + "spans": [ + { + "bbox": [ + 70, + 318, + 523, + 549 + ], + "type": "text", + "content": "Efforts have also been made to mitigate the spectral bias encountered by neural operators trained to learn spatiotemporal systems. Lippe at al. [41] developed PDE-Refiner, which iteratively adds noise to perturb different scales of the system and trains the neural operator to correct these corrupted states. Zhang et al. [42] developed Hybrid Iterative Numerical Transferable Solver (HINTS) to exploit the spectral bias in solving large linear systems by blending neural operators and relaxation methods. Generative Artificial Intelligence (GenAI)-based algorithms are also emerging as effective methods to overcome the spectral bias barrier. Wu et al. [43] accurately reconstructed the small-scale structures accompanying turbulent boundary layers in wall turbulence using Super Resolution Generative Adversarial Networks (SRGANs). Wang et al. [44] developed a framework based on GANs to reconstruct high spatiotemporal resolution supersonic flow states from sparse measurements. Molinaro et al. [45] developed GenCFD using score-based diffusion models to learn three-dimensional turbulence in compressible and incompressible flows. Lockwood et al. [46] used denoising diffusion probabilistic models to refine the estimates of tropical cyclone wind intensities. Oommen et al. [47] addressed the spectral limitations of neural operators in modeling a series of turbulent systems by training a conditional score-based diffusion model conditioned on the neural operator as prior." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 564, + 523, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 564, + 523, + 721 + ], + "spans": [ + { + "bbox": [ + 70, + 564, + 523, + 721 + ], + "type": "text", + "content": "In this work, we first propose the use of UNet with residual blocks (ResUNet) to achieve more accurate two-phase flow predictions compared to the state-of-the-art neural operators. Subsequently, we present a new method named high-frequency scaling (HFS) to mitigate spectral bias in two-phase flow predictions. Our approach demonstrates higher accuracy and better alignment of energy spectra, with negligible additional memory requirements and only a small computational overhead on the neural operator. We applied HFS to different variants of ResUNet. Finally, we explore the dependency of diffusion models on the prior accuracies when integrated with neural operators. Specifically, we show that the integration of the diffusion model with neural operators equipped with HFS results in further mitigation of spectral bias without compromising prediction accuracy. We demonstrate the effectiveness of our methodology for both two-phase and single-phase flows." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 737, + 523, + 765 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 737, + 523, + 765 + ], + "spans": [ + { + "bbox": [ + 70, + 737, + 523, + 765 + ], + "type": "text", + "content": "The manuscript is organized as follows. We start with providing an in-depth description about neural operators, HFS, and diffusion models in Section 2. We present the results of our" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 772, + 300, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 772, + 300, + 781 + ], + "spans": [ + { + "bbox": [ + 293, + 772, + 300, + 781 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 524, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 524, + 100 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 524, + 100 + ], + "type": "text", + "content": "investigations in Section 3, followed by discussion and summary in Sections 4 and 5, respectively. In the Appendix, we include more technical details and additional results." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 119, + 133, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 119, + 133, + 132 + ], + "spans": [ + { + "bbox": [ + 69, + 119, + 133, + 132 + ], + "type": "text", + "content": "2. Methods" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 142, + 182, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 142, + 182, + 156 + ], + "spans": [ + { + "bbox": [ + 69, + 142, + 182, + 156 + ], + "type": "text", + "content": "2.1. Neural Operators" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 160, + 523, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 160, + 523, + 188 + ], + "spans": [ + { + "bbox": [ + 68, + 160, + 523, + 188 + ], + "type": "text", + "content": "The mathematical operator " + }, + { + "bbox": [ + 68, + 160, + 523, + 188 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 68, + 160, + 523, + 188 + ], + "type": "text", + "content": " that governs the temporal evolution of a time-dependent system can be expressed as," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 226, + 190, + 524, + 204 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 190, + 524, + 204 + ], + "spans": [ + { + "bbox": [ + 226, + 190, + 524, + 204 + ], + "type": "interline_equation", + "content": "\\boldsymbol {u} (\\boldsymbol {x}, t + \\Delta t) \\approx \\mathcal {N} (\\boldsymbol {u} (\\boldsymbol {x}, t)) (\\Delta t), \\tag {1}", + "image_path": "f33a31413ad2eaeeede8babb667db0cc68ed58e6ff4dc30157c96c10ab1893ed.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "spans": [ + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "text", + "content": " is the representative state variable(s) of interest. The objective here is to train a neural operator " + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "text", + "content": " to learn the true underlying operator " + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "inline_equation", + "content": "(\\mathcal{N})" + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "text", + "content": " by, typically, minimizing the mean of an error norm such as " + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "inline_equation", + "content": "\\| \\pmb {u}(\\pmb {x},t + \\Delta t) - \\mathcal{F}_{\\theta}(u(\\pmb {x},t))(\\Delta t)\\| _2" + }, + { + "bbox": [ + 68, + 210, + 524, + 253 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "spans": [ + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "text", + "content": "In this work, we focus on resolving solutions in pool boiling problems and single-phase turbulent flows. We start our analysis with pool boiling problems. Then, we investigate the application of our method on single-phase turbulent flows. There have been several efforts to use neural operators to learn temperature and flow dynamics in two-phase flow problems. Here, we demonstrate the advantage of using the ResUNet structure compared to previously developed neural operators such as UNet and FNO for two-phase flow problems with high-frequency features [48]. The models are trained to predict future temperatures based on temperature history and velocity information. The problem configuration is shown in Equation 2, where " + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "text", + "content": " is the spatial mesh, " + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "text", + "content": " is the temperature, " + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "text", + "content": " is the velocity, " + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "text", + "content": " specifies the prediction time interval length, and " + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 68, + 268, + 526, + 412 + ], + "type": "text", + "content": " is the trained neural operator." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 151, + 427, + 524, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 427, + 524, + 442 + ], + "spans": [ + { + "bbox": [ + 151, + 427, + 524, + 442 + ], + "type": "interline_equation", + "content": "T (\\boldsymbol {x}, t: t + k \\Delta t) = \\mathcal {F} _ {\\theta} (T (\\boldsymbol {x}, t - k \\Delta t: t), V (\\boldsymbol {x}, t - k \\Delta t: t + k \\Delta t)) \\tag {2}", + "image_path": "5fb472f59af9da9bdd882ecd832da3951e0959515d0fc1aeac26fd1cca7a0ba9.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 447, + 525, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 447, + 525, + 607 + ], + "spans": [ + { + "bbox": [ + 68, + 447, + 525, + 607 + ], + "type": "text", + "content": "UNet with residual blocks (ResUNet) was first introduced for a semantic segmentation task by imposing skip connections between convolutional layers in a UNet-like structure [49]. We use the same idea to add skip connections in the form of residual blocks to both the encoder and decoder side of the UNet. The residual blocks have been shown to mitigate vanishing gradient problems by offering a smoother optimization landscape [50]. We also demonstrate that they help with the better flow of information in the network for complex datasets such as two-phase flows, which results in a better capture of localized features, possibly reducing the spectral bias towards low-frequency components. We also introduced several modifications, such as the GELU activation function and group normalization, that demonstrated superior prediction accuracy. We used the mean squared error (MSE) loss function in all prediction time steps (Equation 3) as the objective criterion to train the model, i.e.," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 166, + 618, + 524, + 655 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 618, + 524, + 655 + ], + "spans": [ + { + "bbox": [ + 166, + 618, + 524, + 655 + ], + "type": "interline_equation", + "content": "L (\\theta) = \\frac {1}{N _ {u} k} \\sum_ {i = 1} ^ {N _ {u}} \\sum_ {j = 1} ^ {k} \\| T ^ {i} (\\boldsymbol {x}, t + j \\Delta t) - \\mathcal {F} _ {\\theta} (T ^ {i} (\\boldsymbol {x}, t)) (j \\Delta t) \\| _ {2} ^ {2} \\tag {3}", + "image_path": "6ebc32a309ea9520fc757d5c3ce1220418cc5c6e2798e6456c14b5411edca5e4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 672, + 524, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 672, + 524, + 729 + ], + "spans": [ + { + "bbox": [ + 68, + 672, + 524, + 729 + ], + "type": "text", + "content": "We used the Lion optimizer [51] to perform the optimization as we observed superior performance with this optimizer compared to the conventional Adam optimizer [52]. More details about the ResUNet structure, the training hyperparameters, and comparison with UNet predictions are included in Appendix A." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 729, + 524, + 758 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 729, + 524, + 758 + ], + "spans": [ + { + "bbox": [ + 68, + 729, + 524, + 758 + ], + "type": "text", + "content": "We evaluated our baseline neural operator on both saturated and subcooled pool boiling datasets from the BubbleML data repository, which is generated through Flash-X simulations" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 772, + 301, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 772, + 301, + 782 + ], + "spans": [ + { + "bbox": [ + 293, + 772, + 301, + 782 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 525, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 525, + 246 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 525, + 246 + ], + "type": "text", + "content": "[53] and were collected in a previous study [48]. It should be noted that predictions in sub-cooled boiling is more difficult due to the vortices generated by condensation trails. Therefore, the errors are higher in subcooled boiling predictions, and the results look more over-smoothed compared to saturated boiling prediction results. A visualization of the subcooled boiling prediction results is shown in Appendix A. A comprehensive comparison of our baseline model with the previous best baseline model developed by [48] is included in Table 1 and Table 2 for saturated and subcooled pool boiling dataset, respectively. The ResUNet improves the resolution of high-frequency features, resulting in higher prediction accuracy. We note that given the possible differences in the testing dataset, the one-to-one comparison with the previously reported numbers may not be fair. Therefore, we trained and tested the previously reported best model (e.g., UNet) with our dataset configuration, which consists of a larger test dataset and smaller training dataset compared to the previous work." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "spans": [ + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "content": "We evaluated our model using six different field metrics relevant to two-phase problems. These metrics include relative error (Rel. Error), root mean square error (RMSE), boundary RMSE (BRMSE) showing the error on the boundaries, bubble RMSE showing the error in the bubble areas and at the interfaces, mean maximum error " + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "inline_equation", + "content": "(\\mathrm{Max}_{\\mathrm{mean}})" + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "content": " showing the mean of the maximum error for each prediction, and overall maximum error " + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "inline_equation", + "content": "(\\mathrm{Max}_{\\mathrm{max}})" + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "content": " showing the maximum error over the test dataset. We also evaluated the predictions in three different frequency bands using spectral errors at low frequency " + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "inline_equation", + "content": "(F_{\\mathrm{low}})" + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "content": ", medium frequency " + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "inline_equation", + "content": "(F_{\\mathrm{mid}})" + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "content": ", and high frequency " + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "inline_equation", + "content": "(F_{\\mathrm{high}})" + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "content": ". Exact definitions of BRMSE and bubble RMSE, as well as spectral errors are described in Appendix B. All the metrics are computed on the normalized dataset " + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "inline_equation", + "content": "(T^{i}(\\boldsymbol{x},t + j\\Delta t)\\in [-1,1]\\forall \\{i,j\\})" + }, + { + "bbox": [ + 67, + 260, + 526, + 433 + ], + "type": "text", + "content": ". For all the results, the state of the temperature at five future time-steps are predicted based on five time-step previous temperature history and the velocity information in two spatial dimensions." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 182, + 491, + 411, + 658 + ], + "blocks": [ + { + "bbox": [ + 67, + 446, + 525, + 483 + ], + "lines": [ + { + "bbox": [ + 67, + 446, + 525, + 483 + ], + "spans": [ + { + "bbox": [ + 67, + 446, + 525, + 483 + ], + "type": "text", + "content": "Table 1: Saturated pool boiling temperature prediction errors. The training dataset consists of simulations from 11 different wall temperatures. The test dataset consists of simulations with two other wall temperatures " + }, + { + "bbox": [ + 67, + 446, + 525, + 483 + ], + "type": "inline_equation", + "content": "(70^{\\circ}\\mathrm{C}," + }, + { + "bbox": [ + 67, + 446, + 525, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 446, + 525, + 483 + ], + "type": "inline_equation", + "content": "95^{\\circ}\\mathrm{C})" + }, + { + "bbox": [ + 67, + 446, + 525, + 483 + ], + "type": "text", + "content": " not seen during training." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 182, + 491, + 411, + 658 + ], + "lines": [ + { + "bbox": [ + 182, + 491, + 411, + 658 + ], + "spans": [ + { + "bbox": [ + 182, + 491, + 411, + 658 + ], + "type": "table", + "html": "
UNetResUNet
Rel. Error0.01910.0149
RMSE0.01890.0148
BRMSE0.05820.0364
Bubble RMSE0.1160.0726
Maxmean0.7050.553
Maxmax1.2041.154
Flow0.1050.0745
Fmid0.1130.0919
Fhigh0.02380.0182
Parameters [Millions]7.83.5
", + "image_path": "f37a91c2b7d01648a6b5163c046b04d6f2f03461a786f529a25ee22873ad0693.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "spans": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 182, + 115, + 412, + 282 + ], + "blocks": [ + { + "bbox": [ + 68, + 70, + 524, + 107 + ], + "lines": [ + { + "bbox": [ + 68, + 70, + 524, + 107 + ], + "spans": [ + { + "bbox": [ + 68, + 70, + 524, + 107 + ], + "type": "text", + "content": "Table 2: Subcooled pool boiling temperature prediction errors. The training dataset consists of simulations from eight different wall temperatures. The test dataset consists of simulations with two other wall temperatures (95°C, and 98°C) not seen during training." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 182, + 115, + 412, + 282 + ], + "lines": [ + { + "bbox": [ + 182, + 115, + 412, + 282 + ], + "spans": [ + { + "bbox": [ + 182, + 115, + 412, + 282 + ], + "type": "table", + "html": "
UNetResUNet
Rel. Error0.05160.0295
RMSE0.05010.0288
BRMSE0.1390.0646
Bubble RMSE0.2690.127
Maxmean1.1410.837
Maxmax2.2791.433
Flow0.3460.157
Fmid0.3670.197
Fhigh0.05830.0370
Parameters [Millions]7.83.5
", + "image_path": "26fae4e7f5d449cdb38456dce2702cfb869bd6bbd14b388e48a3f8928a80c39d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "spans": [ + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "text", + "content": "The results in Tables 1 and 2 demonstrate that all the metrics are improved by simply introducing residual blocks in the network, a better optimizer, and a better normalization. For example, there is approximately " + }, + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "inline_equation", + "content": "21\\%" + }, + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "inline_equation", + "content": "42\\%" + }, + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "text", + "content": " reduction of RMSE in saturated and subcooled boiling, respectively. Interestingly, the ResUNet achieves better accuracies with less than half of the number of parameters in UNet. Most of the prediction errors occur within the bubble areas and at the condensation trails. This is due to the larger gradients in the bubble areas and around condensation trails resulting into more complex patterns that are more challenging to capture with the neural operator. This is expected as the neural operators are known to have spectral bias to low-frequency modes. The high-frequency content typically exists in regions with significant gradients such as interfaces and condensation trails. In subcooled pool boiling, departing bubbles may condense after departure, creating vortices that gradually dissipate over time. These vortices form complex structures containing higher energy at high frequencies. As a result, subcooled boiling presents greater prediction challenges compared to saturated boiling. For instance, prediction spectral errors " + }, + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "inline_equation", + "content": "(F_{\\mathrm{low}}, F_{\\mathrm{mid}}, F_{\\mathrm{high}})" + }, + { + "bbox": [ + 68, + 298, + 525, + 515 + ], + "type": "text", + "content": " are approximately two times higher in subcooled boiling, highlighting the increased complexity with the high-frequency content." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 528, + 524, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 528, + 524, + 731 + ], + "spans": [ + { + "bbox": [ + 68, + 528, + 524, + 731 + ], + "type": "text", + "content": "While the residual blocks improve the neural operator's ability to reduce field errors (e.g., RMSE) and over-smoothing of certain high-frequency contents, the results still suffer from significant over-smoothing (see Appendix A). Previous studies have also shown the oversmoothing issue of convolutional based neural operators for image generation tasks and scientific computing [54, 55]. Other studies demonstrated the frequency selectiveness of convolutional neural network (CNN) architectures resulting in different learning rates for low and high-frequency components [56, 57]. Wang et al. [58] demonstrated the spectral bias in vision transformers (ViT) through Fourier analysis. They showed that the problem arises by self-attention layers that act as low-pass filters and continuously reduce high-frequency information with the network depth. A feature scaling technique was proposed to decompose the attention output signal into direct and high-frequency components and scale them separately to adjust the proportion of different frequencies of the signal. We draw inspiration from this technique and propose a similar approach to separately scale low frequency and high-frequency components of the features in the latent space of the neural operator to mitigate spectral bias." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 772, + 301, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 772, + 301, + 781 + ], + "spans": [ + { + "bbox": [ + 293, + 772, + 301, + 781 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 73, + 208, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 73, + 208, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 208, + 87 + ], + "type": "text", + "content": "2.2. High-frequency scaling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 90, + 525, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 90, + 525, + 262 + ], + "spans": [ + { + "bbox": [ + 67, + 90, + 525, + 262 + ], + "type": "text", + "content": "As discussed in Section 2.1, neural operators suffer from spectral bias. While residual blocks offer improvements up to some extent, they cannot effectively mitigate the spectral bias inherent in the neural operators. Hence, we propose the high-frequency scaling (HFS) approach to be applied to the output of convolutional layers. The latent feature map of each convolutional layer is first divided into non-overlapping patches, similar to the first step in vision transformers. This will break down the spatial dimensions into smaller regions, which empirically will allow for better localized processing. We consider the mean of the patches as the direct component (DC) of these signals. Then, the high-frequency component (HFC) for each patch can be defined as the difference of each patch with the DC. It should be noted that here the DC is calculated across the patches and not individually for each patch. Then, we introduce two parameter groups of " + }, + { + "bbox": [ + 67, + 90, + 525, + 262 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{DC}}" + }, + { + "bbox": [ + 67, + 90, + 525, + 262 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 90, + 525, + 262 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{HFC}}" + }, + { + "bbox": [ + 67, + 90, + 525, + 262 + ], + "type": "text", + "content": " to separately scale the DC and HFC for each patch. We then re-assemble the patches to the original latent feature size before the next operation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "spans": [ + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": "A more rigorous description of the method is as follows: Let " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "X \\in \\mathbb{R}^{H \\times W \\times C}" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": " be the output feature map of a convolutional layer, where " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": " are the height, width, and number of channels, respectively. We divide " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": " non-overlapping patches of size " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "p \\times p" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": " denoted as " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "X^{(i)} \\in \\mathbb{R}^{p \\times p \\times C}" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "i \\in [0, N]" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": ". The DC is defined as the mean patch across all " + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 68, + 263, + 526, + 350 + ], + "type": "text", + "content": " patches as shown in Equation (4). The HFC calculation for each patch and the scaling step are shown in Equations (5-6):" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 248, + 361, + 524, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 361, + 524, + 397 + ], + "spans": [ + { + "bbox": [ + 248, + 361, + 524, + 397 + ], + "type": "interline_equation", + "content": "D C (X) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} X ^ {(i)}, \\tag {4}", + "image_path": "eb30a68d14feeeec7115a4f0bfd522c57e7ac27be8ba6f04103ccf160cddcb2e.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 231, + 408, + 524, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 408, + 524, + 423 + ], + "spans": [ + { + "bbox": [ + 231, + 408, + 524, + 423 + ], + "type": "interline_equation", + "content": "H F C \\left(X ^ {(i)}\\right) = X ^ {(i)} - D C (X), \\tag {5}", + "image_path": "b350e1bc05bbca85614b53e41a0cf557286e86ef6d847dfe3ef52f18dbbc9fbe.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 183, + 443, + 524, + 459 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 443, + 524, + 459 + ], + "spans": [ + { + "bbox": [ + 183, + 443, + 524, + 459 + ], + "type": "interline_equation", + "content": "\\hat {X} ^ {(i)} = X ^ {(i)} + \\lambda_ {D C} \\odot D C (X) + \\lambda_ {H F C} \\odot H F C (X ^ {(i)}). \\tag {6}", + "image_path": "f25f4d232a20466bc9713e2e41834bd05ae965269973355ed118407fdbc425cf.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 86, + 464, + 456, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 464, + 456, + 478 + ], + "spans": [ + { + "bbox": [ + 86, + 464, + 456, + 478 + ], + "type": "text", + "content": "The scaled feature map can be then reconstructed by re-assembling the " + }, + { + "bbox": [ + 86, + 464, + 456, + 478 + ], + "type": "inline_equation", + "content": "\\hat{X}^{(i)}\\mathrm{s}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "text", + "content": "The scaling parameters " + }, + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "inline_equation", + "content": "\\lambda_{DC} \\in \\mathbb{R}^{1 \\times 1 \\times C}" + }, + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC} \\in \\mathbb{R}^{1 \\times 1 \\times C}" + }, + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "text", + "content": " are left to be learnable parameters that are optimized using gradient descent simultaneously with the network optimization. Here, we initialized the parameters to be one and optimized them with the same learning rate used for network training. In ResUNet structure, HFS is applied to the output of both convolutional layers and the skip-connection paths with " + }, + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 67, + 479, + 525, + 666 + ], + "type": "text", + "content": " convolutions or identity skip-connections. In practice, HFS can be seen as a new module incorporated to each layer of the encoder and decoder, as shown in Fig. 1. Fig. 1 also depicts examples of the learned feature maps for models with and without HFS. The most similar feature maps between the models from the first encoder layers and the last decoder layers are depicted. The model with HFS learns features with more pronounced high-frequency content and reduced over-smoothing, which possibly enhances the capture of high-frequency components of the solution and mitigates spectral bias of the neural operator. A summary of the improvements in prediction accuracy achieved through HFS is provided in Appendix C." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "spans": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 75, + 213, + 314 + ], + "blocks": [ + { + "bbox": [ + 73, + 75, + 213, + 314 + ], + "lines": [ + { + "bbox": [ + 73, + 75, + 213, + 314 + ], + "spans": [ + { + "bbox": [ + 73, + 75, + 213, + 314 + ], + "type": "image", + "image_path": "cb76dc3b8182510948260c393fd0b1837e9635110ffd915e5f5771e0712f0739.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 333, + 525, + 428 + ], + "lines": [ + { + "bbox": [ + 68, + 333, + 525, + 428 + ], + "spans": [ + { + "bbox": [ + 68, + 333, + 525, + 428 + ], + "type": "text", + "content": "Figure 1: Structure of the HFS-enhanced NO. (a) Schematic of the HFS module (right) integrated with the residual block (left). (b) Structure of the ResUNet with the HFS modules (blocks in front of conv layers). (c) An example of a learned latent space feature from the first layer of the encoder trained with and without HFS. The most similar feature maps of the models in the first encoder level are shown. (d) An example of a learned latent space feature from the last layer of the decoder trained with and without HFS. The most similar feature maps of the two models at the last decoder level are shown. (e-f) Examples of temperature prediction with NO and HFS-enhanced NO at two different time-steps. A region with high-frequency features (top right corner) is zoomed in for better visualization." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 218, + 75, + 320, + 318 + ], + "blocks": [ + { + "bbox": [ + 218, + 75, + 320, + 318 + ], + "lines": [ + { + "bbox": [ + 218, + 75, + 320, + 318 + ], + "spans": [ + { + "bbox": [ + 218, + 75, + 320, + 318 + ], + "type": "image", + "image_path": "686e2d01f5e6a349e72871fbccfb325e6cbbd5c81366e8d859267b2c0dfd9c41.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 325, + 74, + 411, + 170 + ], + "blocks": [ + { + "bbox": [ + 325, + 74, + 411, + 170 + ], + "lines": [ + { + "bbox": [ + 325, + 74, + 411, + 170 + ], + "spans": [ + { + "bbox": [ + 325, + 74, + 411, + 170 + ], + "type": "image", + "image_path": "0b11dca30d91ab7cc7f583b42ca3b48537df8a367e51724eab149a411f6447b1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 311, + 170, + 401, + 317 + ], + "blocks": [ + { + "bbox": [ + 311, + 170, + 401, + 317 + ], + "lines": [ + { + "bbox": [ + 311, + 170, + 401, + 317 + ], + "spans": [ + { + "bbox": [ + 311, + 170, + 401, + 317 + ], + "type": "image", + "image_path": "86cb337727b5fed499f55faf0f9b8ba65d56df1ad1aefcc124e2d92f7f7ac2c2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 406, + 74, + 502, + 170 + ], + "blocks": [ + { + "bbox": [ + 406, + 74, + 502, + 170 + ], + "lines": [ + { + "bbox": [ + 406, + 74, + 502, + 170 + ], + "spans": [ + { + "bbox": [ + 406, + 74, + 502, + 170 + ], + "type": "image", + "image_path": "4c7260a04619c0b1d7dbd6af86e9d60a152d47c4eda51b99716497509cce91f6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 408, + 170, + 515, + 316 + ], + "blocks": [ + { + "bbox": [ + 408, + 170, + 515, + 316 + ], + "lines": [ + { + "bbox": [ + 408, + 170, + 515, + 316 + ], + "spans": [ + { + "bbox": [ + 408, + 170, + 515, + 316 + ], + "type": "image", + "image_path": "5920e7c922da97a60a54c9bbdc9099dafa8582f64a270ac461208c70d6889c24.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 447, + 175, + 461 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 447, + 175, + 461 + ], + "spans": [ + { + "bbox": [ + 69, + 447, + 175, + 461 + ], + "type": "text", + "content": "2.3. Diffusion Model" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 465, + 525, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 465, + 525, + 609 + ], + "spans": [ + { + "bbox": [ + 68, + 465, + 525, + 609 + ], + "type": "text", + "content": "As discussed earlier, the NO and the HFS-enhanced NO learn the solution by minimizing some variant of the Euclidean distance, such as MSE, RMSE, relative " + }, + { + "bbox": [ + 68, + 465, + 525, + 609 + ], + "type": "inline_equation", + "content": "L^2" + }, + { + "bbox": [ + 68, + 465, + 525, + 609 + ], + "type": "text", + "content": " or relative " + }, + { + "bbox": [ + 68, + 465, + 525, + 609 + ], + "type": "inline_equation", + "content": "L^1" + }, + { + "bbox": [ + 68, + 465, + 525, + 609 + ], + "type": "text", + "content": " norms of the errors, between the true and predicted states. Unfortunately, such a loss function effectively prioritizes the error at those wavenumbers that bear higher energy. The systems considered in this study exhibit a decaying energy spectrum, implying that the lower wavenumbers carrying higher energy will be over-represented, while the higher wavenumbers that bear lower energy will be ignored due to its minimal influence on the Euclidean distance-based loss function. The recent efforts aimed at improving the spectral bias of NO using GenAI algorithms, discussed in Section 1, motivated us to explore this route. Specifically, we investigate if diffusion models [59] can help further refine the predictions estimated by NO and HFS-enhanced NO." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "spans": [ + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "text", + "content": "Diffusion models (DM) are generative frameworks capable of producing samples that align with the true underlying function distribution, " + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "text", + "content": ", given a limited set of observations from " + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "text", + "content": ". These models achieve sample generation by progressively refining a simple prior distribution, such as a standard normal distribution " + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "inline_equation", + "content": "(\\Gamma_0 = \\mathcal{N}(0,I))" + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "text", + "content": ", into the desired complex distribution " + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "inline_equation", + "content": "(\\Gamma_N \\approx \\mathcal{T})" + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 68, + 623, + 524, + 696 + ], + "type": "text", + "content": " iterative steps." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "spans": [ + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": "The diffusion process begins with an initial sample " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\mathbf{X_0}" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " drawn from " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\Gamma_0" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " and subsequently predicts " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_1" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\Gamma_0 = \\mathcal{N}(0,I)" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": ", obtaining " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_0" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " is straightforward. The model then iteratively refines the sample, estimating " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{i + 1}" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_i" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " steps. However, a key challenge arises on how to train the diffusion model to transition from " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\Gamma_0 = \\mathcal{N}(0,I)" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\Gamma_N\\approx \\mathcal{T}" + }, + { + "bbox": [ + 68, + 709, + 524, + 766 + ], + "type": "text", + "content": " when intermedi-" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "spans": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": "ate distributions " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "\\Gamma_{i}" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "i = \\{1,2,3,\\ldots ,N - 1\\}" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " are not explicitly available. This challenge is addressed using denoising score matching combined with Langevin dynamics [60]. The objective of a score-based diffusion model is to estimate the score function, which is defined as " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "s_{\\theta_D}(\\mathbf{X}) = \\nabla_X\\log p(\\mathbf{X})" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " , where " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "\\theta_{D}" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " represents the parameters of the diffusion model and " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " is the probability density of " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " , where " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " corresponds to continuous realizations of " + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_i\\sim \\Gamma_i" + }, + { + "bbox": [ + 67, + 72, + 525, + 231 + ], + "type": "text", + "content": " . Since the exact data distribution is unknown and may reside on a lower-dimensional manifold, the score function can become ill-defined in regions lacking data. To mitigate this issue, Gaussian noise is added to perturb the data, ensuring a well-defined score function across the entire space by smoothing the distribution. The score function provides a directional gradient toward regions of higher probability. However, a direct mechanism to sample from the learned distribution is still absent." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 246, + 526, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 246, + 526, + 320 + ], + "spans": [ + { + "bbox": [ + 67, + 246, + 526, + 320 + ], + "type": "text", + "content": "This limitation is overcome using Langevin dynamics, as proposed in [61]. Langevin dynamics ensures that the generated samples converge to the true underlying distribution by balancing deterministic motion, driven by the gradient of the log probability, with stochastic exploration introduced by noise. In our approach, we condition the score function on the output of the pre-trained NO, " + }, + { + "bbox": [ + 67, + 246, + 526, + 320 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 67, + 246, + 526, + 320 + ], + "type": "text", + "content": ", leading to the modified score function:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 222, + 333, + 523, + 348 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 333, + 523, + 348 + ], + "spans": [ + { + "bbox": [ + 222, + 333, + 523, + 348 + ], + "type": "interline_equation", + "content": "s _ {\\theta_ {D}} (\\mathbf {X}, \\sigma , \\mathcal {F} _ {\\theta}) = \\nabla_ {X} \\log p (\\mathbf {X} | \\mathcal {F} _ {\\theta}), \\tag {7}", + "image_path": "5060cf71f4b61cf2ad25d180bd338654e4f2c327f2618a01d4d585f52ba14efd.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "text", + "content": " represents the noise level. This conditioned score function guides the DM to sample from the posterior distribution of " + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "text", + "content": ", ensuring that the generated samples are consistent with both the structures imposed by " + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 67, + 354, + 525, + 413 + ], + "type": "text", + "content": " and the true data distribution. The update rule for Langevin dynamics is given by:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 206, + 421, + 524, + 446 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 421, + 524, + 446 + ], + "spans": [ + { + "bbox": [ + 206, + 421, + 524, + 446 + ], + "type": "interline_equation", + "content": "\\mathbf {X} _ {j + 1} = \\mathbf {X} _ {j} + \\frac {\\varepsilon}{2} s _ {\\theta_ {D}} (\\mathbf {X} _ {j}, \\sigma_ {j}, \\mathcal {F} _ {\\theta}) + \\sqrt {\\varepsilon} z _ {j}, \\tag {8}", + "image_path": "12c8e15807fdd5f188eefd57ef41904289a4d945386824debe1ba6cbf304b01e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "spans": [ + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "text", + "content": " is the step size, " + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "inline_equation", + "content": "z_{j}" + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "text", + "content": " is the noise component, and " + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "inline_equation", + "content": "\\sigma_{j}" + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "text", + "content": " denotes the noise scale at iteration " + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 449, + 525, + 492 + ], + "type": "text", + "content": " during the sampling process. The iterative denoising of the noised states by a diffusion model conditioned on the outputs of a pre-trained HFS-enhanced NO is illustrated in Fig 2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "spans": [ + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "text", + "content": "During training, the diffusion model learns to denoise the state of the system perturbed by a noise with zero mean and " + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "text", + "content": " standard deviation, where " + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "inline_equation", + "content": "\\ln \\sigma \\sim \\mathcal{N}(-1.2, 1.2^2)" + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "text", + "content": " is small, the score function " + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "inline_equation", + "content": "s_{\\theta_D}" + }, + { + "bbox": [ + 67, + 493, + 524, + 608 + ], + "type": "text", + "content": " increasingly focuses on reconstructing high-frequency details and vice versa. In this manner, the diffusion model learns to perturb and reconstruct the signal at multiple scales, unlike the NO whose scale is fixed throughout its training, and thereby learns the structure of the underlying system across all the scales. Our implementation of the DM conditioned on the NO and HFS-enhanced NO is based on [47] that adopts the training, network architecture, pre-conditioning, and sampling routine proposed in \"EDM\" [62]." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "spans": [ + { + "bbox": [ + 293, + 771, + 301, + 782 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 72, + 517, + 513 + ], + "blocks": [ + { + "bbox": [ + 73, + 72, + 517, + 513 + ], + "lines": [ + { + "bbox": [ + 73, + 72, + 517, + 513 + ], + "spans": [ + { + "bbox": [ + 73, + 72, + 517, + 513 + ], + "type": "image", + "image_path": "faa78b851598a3a8e280e4c419776b1b8ff245583ac31e14424ca9a458edbe62.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 528, + 525, + 569 + ], + "lines": [ + { + "bbox": [ + 68, + 528, + 525, + 569 + ], + "spans": [ + { + "bbox": [ + 68, + 528, + 525, + 569 + ], + "type": "text", + "content": "Figure 2: Mitigating Spectral Bias with Diffusion Model. The states estimated by the NO exhibit oversmoothing. They serve as the prior that conditions the DM, which in turn reconstructs the missing frequencies iteratively through conditional sampling. The results are based on a NO with 2 million parameters." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 587, + 126, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 587, + 126, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 587, + 126, + 601 + ], + "type": "text", + "content": "3. Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 611, + 327, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 611, + 327, + 624 + ], + "spans": [ + { + "bbox": [ + 69, + 611, + 327, + 624 + ], + "type": "text", + "content": "3.1. HFS-enhanced NO for two-phase flow problems" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 628, + 525, + 772 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 628, + 525, + 772 + ], + "spans": [ + { + "bbox": [ + 68, + 628, + 525, + 772 + ], + "type": "text", + "content": "We first conduct several experiments with different variants of ResUNet to demonstrate the advantage of HFS in spectral bias mitigation for two-phase flow operator learning problem. Given the higher complexity of subcooled boiling data compared to saturated boiling data, we will focus on the subcooled boiling experiments. Examples showing the saturated boiling predictions are shown in Appendix D. Given the flexibility of our NO structure, we investigated different variants of ResUNet by varying the NO size by changing the number of parameters in the range of " + }, + { + "bbox": [ + 68, + 628, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\sim 2" + }, + { + "bbox": [ + 68, + 628, + 525, + 772 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 68, + 628, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\sim 16" + }, + { + "bbox": [ + 68, + 628, + 525, + 772 + ], + "type": "text", + "content": " million parameters. The number of parameters was changed by simply changing the number of latent feature maps at each level of the ResUNet structure. The number of downsamplings/upsamplings was kept at five steps for all the models to achieve spatially consistent resolutions at each level across all the NOs. The subcooled pool boiling dataset" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "text", + "content": "consists of 10 different simulation trajectories, two of which were used for testing. Each simulation trajectory consists of 201 time-steps. However, similar to [48], the first 30 unsteady time-steps were not included in the training and testing of the models. Fig. 3 demonstrates the variation of RMSE, BRMSE, bubble RMSE, and " + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "inline_equation", + "content": "\\mathrm{Max}_{\\mathrm{mean}}" + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "text", + "content": " metrics with NO size for results obtained from NO and HFS-enhanced NO. As expected, both NO and HFS-enhanced NO exhibit error-decreasing trends with the number of parameters. However, the HFS-enhanced NO always yields lower errors compared to NO in all metrics and irrespective of the NO size. The effect of HFS is more pronounced in the bubble RMSE due to larger high-frequency content at the bubble interface and within the bubbles. For example, HFS yields " + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "text", + "content": " improvement in RMSE for the 16 million NO. This improvement is " + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "inline_equation", + "content": "16\\%" + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "text", + "content": " for the bubble RMSE metric. On average, HFS decreases the RMSE and bubble RMSE by " + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "inline_equation", + "content": "12.4\\%" + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "inline_equation", + "content": "18.2\\%" + }, + { + "bbox": [ + 67, + 73, + 525, + 233 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 71, + 244, + 298, + 405 + ], + "blocks": [ + { + "bbox": [ + 71, + 244, + 298, + 405 + ], + "lines": [ + { + "bbox": [ + 71, + 244, + 298, + 405 + ], + "spans": [ + { + "bbox": [ + 71, + 244, + 298, + 405 + ], + "type": "image", + "image_path": "404739d4e1a7bce81a61b07954c246561d4c0b5d4e5df4224d919983d7e0719e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 301, + 244, + 521, + 405 + ], + "blocks": [ + { + "bbox": [ + 301, + 244, + 521, + 405 + ], + "lines": [ + { + "bbox": [ + 301, + 244, + 521, + 405 + ], + "spans": [ + { + "bbox": [ + 301, + 244, + 521, + 405 + ], + "type": "image", + "image_path": "ce9d9915b51cc6b01cef8d4fd9ad5004076ee8c64d31f8dc18cd381d62f2850c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 76, + 406, + 298, + 564 + ], + "blocks": [ + { + "bbox": [ + 76, + 406, + 298, + 564 + ], + "lines": [ + { + "bbox": [ + 76, + 406, + 298, + 564 + ], + "spans": [ + { + "bbox": [ + 76, + 406, + 298, + 564 + ], + "type": "image", + "image_path": "8b521be2ad6db402353a683c8e72a14a74947a986a7ba99f5da56787754d3d44.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 565, + 279, + 576 + ], + "lines": [ + { + "bbox": [ + 133, + 565, + 279, + 576 + ], + "spans": [ + { + "bbox": [ + 133, + 565, + 279, + 576 + ], + "type": "text", + "content": "Neural Operator Size (Millions)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 406, + 521, + 565 + ], + "blocks": [ + { + "bbox": [ + 307, + 406, + 521, + 565 + ], + "lines": [ + { + "bbox": [ + 307, + 406, + 521, + 565 + ], + "spans": [ + { + "bbox": [ + 307, + 406, + 521, + 565 + ], + "type": "image", + "image_path": "5bd552819042709e8514a471c06051655dec81d3d173212e98e78c9851f40c04.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 356, + 565, + 502, + 576 + ], + "lines": [ + { + "bbox": [ + 356, + 565, + 502, + 576 + ], + "spans": [ + { + "bbox": [ + 356, + 565, + 502, + 576 + ], + "type": "text", + "content": "Neural Operator Size (Millions)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 68, + 587, + 525, + 638 + ], + "lines": [ + { + "bbox": [ + 68, + 587, + 525, + 638 + ], + "spans": [ + { + "bbox": [ + 68, + 587, + 525, + 638 + ], + "type": "text", + "content": "Figure 3: Temperature prediction errors of NO and HFS-enhanced NO varying with NO size. (a) Root mean square error (RMSE), (b) Boundary RMSE (BRMSE), (c) Bubble RMSE, (d) Mean maximum error. All the errors are calculated over the 5 time-step temperature predictions. The legends in (a) are applicable to (b - d) as well. All the results are based on test dataset in subcooled pool boiling." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 654, + 286, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 654, + 286, + 669 + ], + "spans": [ + { + "bbox": [ + 69, + 654, + 286, + 669 + ], + "type": "text", + "content": "3.2. Spectral analysis of HFS-enhanced NO" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 671, + 525, + 772 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 671, + 525, + 772 + ], + "spans": [ + { + "bbox": [ + 67, + 671, + 525, + 772 + ], + "type": "text", + "content": "HFS reduces the over-smoothing effect, hence, the intricate features of vortices induced by condensation trails in subcooled boiling are better resolved. Moreover, HFS results in better alignment of the energy spectra to the ground truth signal, especially at high wave numbers attributed to the high frequency features. Fig. 4 depicts the enhancements obtained by adding HFS modules to NO. The prediction results of HFS-enhanced NO are improved compared to NO for all time-steps. However, the enhancement is more pronounced at later time-steps, where the NO predictions are significantly over-smoothed." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 75, + 520, + 454 + ], + "blocks": [ + { + "bbox": [ + 76, + 75, + 520, + 454 + ], + "lines": [ + { + "bbox": [ + 76, + 75, + 520, + 454 + ], + "spans": [ + { + "bbox": [ + 76, + 75, + 520, + 454 + ], + "type": "image", + "image_path": "629dde0d1af87ae717d9f62eb3415d917ebe53718ff14baae00b6da5edc7a8fa.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "lines": [ + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "spans": [ + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "type": "text", + "content": "Figure 4: Subcooled pool boiling transient temperature prediction. (a) Ground truth (GT) temperatures for 5 consecutive time steps (from left to right) " + }, + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "type": "inline_equation", + "content": "(\\Delta t = 8" + }, + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "type": "text", + "content": " ms). (b) NO prediction results. (c) HFS-enhanced NO prediction results. (d) The corresponding energy spectra " + }, + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "type": "inline_equation", + "content": "(p(k))" + }, + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "type": "text", + "content": " for each time step. For better visualization, the subplots in (d) show the energy spectra only for the high wavenumbers. The legends in first plot are applicable to other plots as well. All the results are based on a " + }, + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 68, + 465, + 525, + 526 + ], + "type": "text", + "content": " M parameter NO." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 541, + 526, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 541, + 526, + 745 + ], + "spans": [ + { + "bbox": [ + 68, + 541, + 526, + 745 + ], + "type": "text", + "content": "The average energy for the high-frequency component of the latent features (e.g., excluding the first " + }, + { + "bbox": [ + 68, + 541, + 526, + 745 + ], + "type": "inline_equation", + "content": "12.5\\%" + }, + { + "bbox": [ + 68, + 541, + 526, + 745 + ], + "type": "text", + "content": " frequencies at the full resolution) is generally higher for HFS-enhanced NO. This behavior is specifically seen in all the encoder layers and the last three layers of the decoder for a five-layer decoder and five-layer encoder (e.g., five downsampling and five upsampling steps). The first two layers after the bottleneck are at very low spatial resolutions and may not represent any useful spectral information. However, more high-frequency component is generated in the later stages of the decoder that are closer to the output. The NO decoder mean feature maps at each layer show low-contrast regions at both left and right side of the maps, starting from layer two to the end. However, these regions are diminished when HFS is used, showing that a more diverse set of features is generated in the decoder (see Appendix E). However, the same behavior does not necessarily exist for the encoder mean latent features, suggesting that the mean feature map may not be a good representative of the high-frequency component. Instead, analysis of individual feature maps appears to be a more appropriate approach in this case." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 86, + 757, + 523, + 782 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 757, + 523, + 782 + ], + "spans": [ + { + "bbox": [ + 86, + 757, + 523, + 782 + ], + "type": "text", + "content": "Individual latent space features exhibit improved preservation and propagation of high-12" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "text", + "content": "frequency components when HFS is integrated in the NO structure. Fig. 5 depicts examples of latent features from the first layer of the encoder and the last layer of decoder. These layers are specifically chosen due to their proximity to the input and output layers, making the visualizations more understandable. When comparing similar latent feature maps, HFS reduces the excessive smoothing and increases the high-frequency component within the features in the latent space. The energy spectra plots in Fig. 5 demonstrate similar trends for both NO and HFS-enhanced NO with the later having larger spectral energy at the mid and high wave numbers (e.g. " + }, + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "inline_equation", + "content": "k > 20" + }, + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "text", + "content": "). For a more robust spectral analysis of latent features, we compared the individual latent features in the NO and HFS-enhanced NO with both " + }, + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "inline_equation", + "content": "\\sim 16" + }, + { + "bbox": [ + 70, + 72, + 524, + 273 + ], + "type": "text", + "content": " million parameter models. The HFS-enhanced NO decreases the over-smoothing in latent features when compared with a similar feature map from NO. The normalized energy spectra of these latent features exhibit larger high-frequency component with HFS-enhanced NO. This is evident in Fig. 5(b, d, f, and h), where the HFS-enhanced NO curves surpass the NO curves after a certain wave number." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 289, + 524, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 289, + 524, + 476 + ], + "spans": [ + { + "bbox": [ + 70, + 289, + 524, + 476 + ], + "type": "text", + "content": "Comparison of the ratio of high-frequency component energy when calculated separately for each latent feature and then averaged over all the features at each layer in the encoder also shows consistently higher values when HFS is used. The same trend is also observed in the last three layers of the decoder. These results are shown in Fig. 5i and 5j. We observed similar trends for other samples where the ratio of high-frequency component energy to total energy in the latent space is higher when HFS is integrated with the NO. However, this advantage may not be noticeable using the mean latent feature visualization at each layer. Note that for the analysis presented in Fig. 5i and 5j, we progressively increased the threshold (from " + }, + { + "bbox": [ + 70, + 289, + 524, + 476 + ], + "type": "inline_equation", + "content": "12.5\\%" + }, + { + "bbox": [ + 70, + 289, + 524, + 476 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 70, + 289, + 524, + 476 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 70, + 289, + 524, + 476 + ], + "type": "text", + "content": ") for separating the low and high-frequency bands as the spatial dimension in the latent space decreases. This result is based on a random sample from the test dataset. Similar results were obtained with other samples. It should be noted that a one-to-one comparison of similar feature maps may provide a more reliable assessment, as not all feature maps carry equally significant information and some might be irrelevant for our analysis." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 491, + 524, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 491, + 524, + 563 + ], + "spans": [ + { + "bbox": [ + 70, + 491, + 524, + 563 + ], + "type": "text", + "content": "In general, the HFS-enhanced NO contains more high-frequency component in the latent space, which can help with the propagation of high-frequency information to the output, helping with the better capture of high-frequency features. The enhancement in high-frequency component is achieved without any degradation in the low-frequency components. Therefore, both field errors such as RMSE, and the spectral errors are improved (see Appendix C)." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 771, + 303, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 771, + 303, + 781 + ], + "spans": [ + { + "bbox": [ + 291, + 771, + 303, + 781 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 88, + 77, + 164, + 162 + ], + "blocks": [ + { + "bbox": [ + 76, + 77, + 87, + 88 + ], + "lines": [ + { + "bbox": [ + 76, + 77, + 87, + 88 + ], + "spans": [ + { + "bbox": [ + 76, + 77, + 87, + 88 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 88, + 77, + 164, + 162 + ], + "lines": [ + { + "bbox": [ + 88, + 77, + 164, + 162 + ], + "spans": [ + { + "bbox": [ + 88, + 77, + 164, + 162 + ], + "type": "image", + "image_path": "bd897e21140790da8c2beed7842475c14e96f323cdd2ec392c15e182b1ce4a42.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 77, + 130, + 85 + ], + "lines": [ + { + "bbox": [ + 119, + 77, + 130, + 85 + ], + "spans": [ + { + "bbox": [ + 119, + 77, + 130, + 85 + ], + "type": "text", + "content": "NO" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 88, + 162, + 163, + 243 + ], + "blocks": [ + { + "bbox": [ + 88, + 162, + 163, + 243 + ], + "lines": [ + { + "bbox": [ + 88, + 162, + 163, + 243 + ], + "spans": [ + { + "bbox": [ + 88, + 162, + 163, + 243 + ], + "type": "image", + "image_path": "26821bc824c2e55cf6de7c68946bbee89a3839676996ae1a9d5b3a48a7ec2804.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 177, + 84, + 254, + 162 + ], + "blocks": [ + { + "bbox": [ + 199, + 77, + 229, + 84 + ], + "lines": [ + { + "bbox": [ + 199, + 77, + 229, + 84 + ], + "spans": [ + { + "bbox": [ + 199, + 77, + 229, + 84 + ], + "type": "text", + "content": "NO + HFS" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 177, + 84, + 254, + 162 + ], + "lines": [ + { + "bbox": [ + 177, + 84, + 254, + 162 + ], + "spans": [ + { + "bbox": [ + 177, + 84, + 254, + 162 + ], + "type": "image", + "image_path": "05fba3cc6e4ff3f30a68095ffe33390bd08dc560864db76ec9a2afb800f5746a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 270, + 84, + 356, + 162 + ], + "blocks": [ + { + "bbox": [ + 271, + 79, + 281, + 88 + ], + "lines": [ + { + "bbox": [ + 271, + 79, + 281, + 88 + ], + "spans": [ + { + "bbox": [ + 271, + 79, + 281, + 88 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 270, + 84, + 356, + 162 + ], + "lines": [ + { + "bbox": [ + 270, + 84, + 356, + 162 + ], + "spans": [ + { + "bbox": [ + 270, + 84, + 356, + 162 + ], + "type": "image", + "image_path": "f60206ede39643fc5c044f9f4610b8380888747ad4c4e5778e0eb56aee2fce8c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 271, + 162, + 281, + 171 + ], + "lines": [ + { + "bbox": [ + 271, + 162, + 281, + 171 + ], + "spans": [ + { + "bbox": [ + 271, + 162, + 281, + 171 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 87, + 245, + 164, + 327 + ], + "blocks": [ + { + "bbox": [ + 76, + 245, + 86, + 253 + ], + "lines": [ + { + "bbox": [ + 76, + 245, + 86, + 253 + ], + "spans": [ + { + "bbox": [ + 76, + 245, + 86, + 253 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 87, + 245, + 164, + 327 + ], + "lines": [ + { + "bbox": [ + 87, + 245, + 164, + 327 + ], + "spans": [ + { + "bbox": [ + 87, + 245, + 164, + 327 + ], + "type": "image", + "image_path": "32ecc7c14abb8460e4d6c4033cf8cd415b55ec2ac589b266e28f285a8d99e10a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 76, + 326, + 86, + 334 + ], + "lines": [ + { + "bbox": [ + 76, + 326, + 86, + 334 + ], + "spans": [ + { + "bbox": [ + 76, + 326, + 86, + 334 + ], + "type": "text", + "content": "(g)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 176, + 166, + 254, + 243 + ], + "blocks": [ + { + "bbox": [ + 176, + 166, + 254, + 243 + ], + "lines": [ + { + "bbox": [ + 176, + 166, + 254, + 243 + ], + "spans": [ + { + "bbox": [ + 176, + 166, + 254, + 243 + ], + "type": "image", + "image_path": "675a8f9336f44185e5a4a8f988d3a39818f20adc98510f75efb9bd2996a69884.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 255, + 166, + 356, + 243 + ], + "blocks": [ + { + "bbox": [ + 255, + 166, + 356, + 243 + ], + "lines": [ + { + "bbox": [ + 255, + 166, + 356, + 243 + ], + "spans": [ + { + "bbox": [ + 255, + 166, + 356, + 243 + ], + "type": "image", + "image_path": "e7f016f46bca5309b8f0172ae1f4e4919c83884da81073d94df649bce739af4e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 380, + 81, + 521, + 238 + ], + "blocks": [ + { + "bbox": [ + 371, + 80, + 380, + 89 + ], + "lines": [ + { + "bbox": [ + 371, + 80, + 380, + 89 + ], + "spans": [ + { + "bbox": [ + 371, + 80, + 380, + 89 + ], + "type": "text", + "content": "(i)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 380, + 81, + 521, + 238 + ], + "lines": [ + { + "bbox": [ + 380, + 81, + 521, + 238 + ], + "spans": [ + { + "bbox": [ + 380, + 81, + 521, + 238 + ], + "type": "image", + "image_path": "7ad97d2813f0aaf6bea9c40b37d06895f3bb84041a8c318c716f437749e17e13.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 88, + 326, + 164, + 408 + ], + "blocks": [ + { + "bbox": [ + 88, + 326, + 164, + 408 + ], + "lines": [ + { + "bbox": [ + 88, + 326, + 164, + 408 + ], + "spans": [ + { + "bbox": [ + 88, + 326, + 164, + 408 + ], + "type": "image", + "image_path": "3c80f7d8bae04c690074b0bd039bbc2ccb94c86c53fb41a6d536ea842e012cfb.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 177, + 249, + 254, + 327 + ], + "blocks": [ + { + "bbox": [ + 177, + 249, + 254, + 327 + ], + "lines": [ + { + "bbox": [ + 177, + 249, + 254, + 327 + ], + "spans": [ + { + "bbox": [ + 177, + 249, + 254, + 327 + ], + "type": "image", + "image_path": "c56982feee0b6b0776494c62e84144cadf9d1c1c5ea6d8927a0b1794c6c97899.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 177, + 332, + 254, + 408 + ], + "blocks": [ + { + "bbox": [ + 177, + 332, + 254, + 408 + ], + "lines": [ + { + "bbox": [ + 177, + 332, + 254, + 408 + ], + "spans": [ + { + "bbox": [ + 177, + 332, + 254, + 408 + ], + "type": "image", + "image_path": "b862fe7585e9b671c2dd19d6695b0f98522b4600cbfca9788e317af5b5c1ff45.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 255, + 245, + 356, + 323 + ], + "blocks": [ + { + "bbox": [ + 271, + 245, + 280, + 254 + ], + "lines": [ + { + "bbox": [ + 271, + 245, + 280, + 254 + ], + "spans": [ + { + "bbox": [ + 271, + 245, + 280, + 254 + ], + "type": "text", + "content": "(f)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 255, + 245, + 356, + 323 + ], + "lines": [ + { + "bbox": [ + 255, + 245, + 356, + 323 + ], + "spans": [ + { + "bbox": [ + 255, + 245, + 356, + 323 + ], + "type": "image", + "image_path": "f761d30cc71f1613eca2ac1939256b2e144c0b60cd211db5ea28ffe6a928d863.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 255, + 326, + 356, + 411 + ], + "blocks": [ + { + "bbox": [ + 271, + 326, + 280, + 334 + ], + "lines": [ + { + "bbox": [ + 271, + 326, + 280, + 334 + ], + "spans": [ + { + "bbox": [ + 271, + 326, + 280, + 334 + ], + "type": "text", + "content": "(h)" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 255, + 326, + 356, + 411 + ], + "lines": [ + { + "bbox": [ + 255, + 326, + 356, + 411 + ], + "spans": [ + { + "bbox": [ + 255, + 326, + 356, + 411 + ], + "type": "image", + "image_path": "5f217779f4ebeb8edefda5e739dab23bb283949bec47b55a0e34af427e3e5413.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "lines": [ + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "spans": [ + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": "Figure 5: Latent space features in HFS-enhanced NO. (a, b) Example of latent space feature in the first layer of encoder and the corresponding normalized energy spectra " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "(p(k))" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": " in the " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": " million parameter models. (c, d) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra for the model with " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": " million parameters. (e) Example of latent feature in the first layer of encoder and the corresponding normalized energy spectra in the " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "\\sim 16" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": " million parameter models. (g) Example of latent feature in the last layer of decoder and the corresponding normalized energy spectra in the " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "\\sim 16" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": " million parameter models. (i-j) Average ratio of high-frequency energy to total energy at each layer in encoder (i) and decoder (j). Note that the low-frequency cutoff is set to the first " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "12.5\\%" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "18.75\\%" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "37.5\\%" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 68, + 419, + 524, + 528 + ], + "type": "text", + "content": " of the wavenumbers, from highest to lowest spatial resolutions (384 to 24 pixels), respectively" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 380, + 240, + 521, + 406 + ], + "blocks": [ + { + "bbox": [ + 371, + 239, + 380, + 248 + ], + "lines": [ + { + "bbox": [ + 371, + 239, + 380, + 248 + ], + "spans": [ + { + "bbox": [ + 371, + 239, + 380, + 248 + ], + "type": "text", + "content": "(i)" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 380, + 240, + 521, + 406 + ], + "lines": [ + { + "bbox": [ + 380, + 240, + 521, + 406 + ], + "spans": [ + { + "bbox": [ + 380, + 240, + 521, + 406 + ], + "type": "image", + "image_path": "8d4ce1826ae5537dfd8426399bd05bca2a0010646e57a820e373e7035e1a5285.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "spans": [ + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": "Given the advantage of HFS in the mitigation of spectral bias towards low-frequency components, it is natural to calculate the prediction errors at different wavenumbers. Following the terminology proposed in [48], we divided the frequencies to three components including only low-frequency component (low " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": "), mid-frequency component (mid " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": "), and high-frequency component (high " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": "). For all the NOs with varying number of parameters, the errors in the mid " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": " and high " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": " components are always lower for HFS-enhanced NO. The RMSE for the low " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": " component is lower for HFS-enhanced NO with one exception in the NO with " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": " million parameters. We attribute this to the larger enhancement observed in mid " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": " and high " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": " of the 3.5 million parameter HFS-enhanced NO, causing the operator showing larger error in the low " + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 546, + 524, + 703 + ], + "type": "text", + "content": " as it fails to reduce the errors in all three components simultaneously. Visualization of each frequency component and the average spectral errors in each component are shown in Fig. 6." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 781 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 781 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 79, + 524, + 441 + ], + "blocks": [ + { + "bbox": [ + 78, + 79, + 524, + 441 + ], + "lines": [ + { + "bbox": [ + 78, + 79, + 524, + 441 + ], + "spans": [ + { + "bbox": [ + 78, + 79, + 524, + 441 + ], + "type": "image", + "image_path": "62f709bfa82bfda8d899c4456f8d18cb4aaae24a3a3354dc86d148ef45498e67.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "lines": [ + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "spans": [ + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "text", + "content": "Figure 6: Impact of HFS on spectral errors at different frequency bands (a-b) Examples showing the input, low, mid, and high-frequency contents of the input. (c-e) Spectral error " + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "inline_equation", + "content": "(F" + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "text", + "content": ". Error) of low, mid, and high-frequency bands over the test dataset. For these results, the low-frequency cutoff is set to the first " + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "text", + "content": " of the frequencies. The mid frequency band includes the first " + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "inline_equation", + "content": "6.2\\%" + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "text", + "content": " of the frequencies excluding the first " + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "text", + "content": ". The high-frequency band includes the last " + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "inline_equation", + "content": "93.8\\%" + }, + { + "bbox": [ + 68, + 449, + 525, + 510 + ], + "type": "text", + "content": " of the frequencies." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 525, + 208, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 525, + 208, + 539 + ], + "spans": [ + { + "bbox": [ + 69, + 525, + 208, + 539 + ], + "type": "text", + "content": "3.3. HFS parameter history" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "spans": [ + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": "The DC and HFC of the signals are scaled using two learnable parameters, " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{DC} \\in \\mathbb{R}^{1 \\times 1 \\times C}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC} \\in \\mathbb{R}^{1 \\times 1 \\times C}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": ". These parameters remain consistent across all patches in each latent space feature map, and also across all batches of the dataset. Therefore, the parameters are optimized based on all the samples in the training dataset. However, they are allowed to vary freely across the feature channels at each layer. This design enables the model to adaptively scale each channel based on its content. For instance, a feature channel with a larger high-frequency component can be scaled differently than a smoother feature channel. This flexibility enhances the effectiveness of HFS while minimizing the computational costs and reducing the optimization burden by maintaining fixed parameters across patches and samples. To better understand the learning process of " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": ", the histories of these parameters during the training phase in each of the encoder and decoder layers are shown in Fig. 7. The results in Fig. 7 show the mean " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": " across all latent features at each layer. The mean " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": " is always larger than the mean " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": ", demonstrating that the model is learning to scale HFC with larger weights, enhancing the representation of the HFC. Also, the optimized mean " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": " is higher in the deeper layers of the encoder. However, no such behavior is observed in the decoder. Another interesting observation is that the abrupt change in the slope of the " + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 540, + 525, + 772 + ], + "type": "text", + "content": " history curves" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 291, + 771, + 304, + 782 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 525, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 525, + 132 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 525, + 132 + ], + "type": "text", + "content": "(" + }, + { + "bbox": [ + 67, + 72, + 525, + 132 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 67, + 72, + 525, + 132 + ], + "type": "text", + "content": " iteration " + }, + { + "bbox": [ + 67, + 72, + 525, + 132 + ], + "type": "inline_equation", + "content": "160 \\times 10^{3}" + }, + { + "bbox": [ + 67, + 72, + 525, + 132 + ], + "type": "text", + "content": ") aligns well with the iteration when overfitting starts. After this iteration, the error over training dataset keeps decreasing but the error over validation dataset increases, leading to larger generalization gap. The dashed lines in Fig. 7 specify the iteration at which the validation dataset error is minimum." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "spans": [ + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "text", + "content": "It should be noted that " + }, + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "text", + "content": " are both free of any constraints and are automatically learned during the model optimization. However, comparing the final values of these parameters align well with the heuristic viewpoint proposed in our work. The larger values of " + }, + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 144, + 526, + 233 + ], + "type": "text", + "content": " imply that the HFC of the signals are better preserved and propagated through layers with HFS. This could explain why the HFS-enhanced NO results resolve high-frequency features better, and why the spectral bias of the NO is mitigated." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 74, + 248, + 297, + 412 + ], + "blocks": [ + { + "bbox": [ + 74, + 248, + 297, + 412 + ], + "lines": [ + { + "bbox": [ + 74, + 248, + 297, + 412 + ], + "spans": [ + { + "bbox": [ + 74, + 248, + 297, + 412 + ], + "type": "image", + "image_path": "836bc2f95d5c77926d61af7c14d4b1113300a5ed667f5ba49106606f9e5c07d1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 301, + 248, + 521, + 412 + ], + "blocks": [ + { + "bbox": [ + 301, + 248, + 521, + 412 + ], + "lines": [ + { + "bbox": [ + 301, + 248, + 521, + 412 + ], + "spans": [ + { + "bbox": [ + 301, + 248, + 521, + 412 + ], + "type": "image", + "image_path": "dda5aca73cc52ff6e026a688772971bb0db28707f5dbb8771a941a8018016d77.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 74, + 414, + 296, + 587 + ], + "blocks": [ + { + "bbox": [ + 74, + 414, + 296, + 587 + ], + "lines": [ + { + "bbox": [ + 74, + 414, + 296, + 587 + ], + "spans": [ + { + "bbox": [ + 74, + 414, + 296, + 587 + ], + "type": "image", + "image_path": "eac42b74e203c3984876f446ef93ec5dd5fca48a451c96c024fa9dd7291c3fc6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "lines": [ + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "spans": [ + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": "Figure 7: " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " histories during training phase of the HFS-enhanced NO. (a, b) " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " training history in all 5 layers of encoder. Note that layer 1 and layer 5 are defined as layers at highest and lowest spatial resolution, respectively, in the encoder. (c, d) " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " training history in all 5 layers of decoder. Note that layer 1 and layer 5 are defined as layers at lowest and highest spatial resolution, respectively, in the decoder, which is the opposite terminology used in encoder. The dashed lines specify the iteration from which overfitting on the training dataset starts. The results are based on the training of a model with " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\sim 1.7" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " million parameters and " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 596, + 525, + 681 + ], + "type": "text", + "content": " were initialized at 0.85 and 1.15, respectively." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 301, + 414, + 522, + 586 + ], + "blocks": [ + { + "bbox": [ + 301, + 414, + 522, + 586 + ], + "lines": [ + { + "bbox": [ + 301, + 414, + 522, + 586 + ], + "spans": [ + { + "bbox": [ + 301, + 414, + 522, + 586 + ], + "type": "image", + "image_path": "1a0ef49c31cd391865df11f05db9cb02c6029f97bb662e0074c4d4064dacdf5c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 698, + 180, + 713 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 698, + 180, + 713 + ], + "spans": [ + { + "bbox": [ + 69, + 698, + 180, + 713 + ], + "type": "text", + "content": "3.4. Kolmogorov flow" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 714, + 525, + 772 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 714, + 525, + 772 + ], + "spans": [ + { + "bbox": [ + 67, + 714, + 525, + 772 + ], + "type": "text", + "content": "To evaluate the effectiveness of HFS in mitigating spectral bias in a more chaotic system, we applied it on the prediction of a standard benchmark, namely the 2D Kolmogorov flow problem. This problem is governed by the unsteady and incompressible Navier-Stokes equations for a viscous fluid subject to a forcing term. The vorticity form of the problem is defined in" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 772, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 772, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 772, + 304, + 782 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": "Appendix H. We generated the dataset [63] using a publicly available pseudo-spectral solver [6]. The dataset consisted of 1000 samples with " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": " of them being used for training, validation, and testing respectively. We trained the NO with and without HFS to learn the mapping " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "\\omega(x,y,t)\\big|_{t\\in[0,10]} \\to \\omega(x,y,t)\\big|_{t\\in[10,t_{final}]}" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": " is the vorticity. Here, we used " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "t_{final} = 12.5" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": " s, and a NO with " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "\\sim 1.7" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": " million parameters as the benchmark. We optimized the hyperparameters based on the NO performance without HFS and then used the same hyperparameters for training the NO with HFS. This ensured that any improvement achieved with HFS was solely attributed to its effect and not simply due to differences in optimization strategies or hyperparameters. Although not specifically designed for turbulent problems, the HFS-enhanced NO demonstrated improvements over the NO for the 2D Kolmogorov problem, reducing the relative error from " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "5.3\\%" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "inline_equation", + "content": "4.7\\%" + }, + { + "bbox": [ + 70, + 72, + 526, + 348 + ], + "type": "text", + "content": ". Comparison of the energy spectra of the HFS-enhanced NO predictions also demonstrated better alignment with the ground truth solutions at high wavenumbers. The prediction results for snapshots chosen through random sampling from the test dataset are shown in Fig. 8. High-frequency features are more accurately captured and the energy spectra alignment at high wavenumbers is enhanced with the HFS-enhanced NO. We should acknowledge that HFS was effective for this problem only when the NO already provided reasonably accurate predictions. If the NO produced extremely over-smoothed predictions, integrating HFS offered little to no improvement. More detailed results showing the temporal predictions are shown in Appendix H." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 361, + 526, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 361, + 526, + 491 + ], + "spans": [ + { + "bbox": [ + 70, + 361, + 526, + 491 + ], + "type": "text", + "content": "The improvements in predicting Komogorov flow are less pronounced compared to the two-phase flow problem. This is due to the different underlying structures of the solution maps. The HFS approach operates by decomposing the feature maps into low-frequency and high-frequency components through observing the patches as different signals. This approach is most effective for the data with localized features, making the DC and HFC of the signals significantly different. For example, this is true for the subcooled pool boiling dataset with localized features at the bubble interface and condensation trails. For the data with similar features across all regions, the distinction between DC and HFC diminishes, thus reducing the impact of HFS." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 771, + 303, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 771, + 303, + 781 + ], + "spans": [ + { + "bbox": [ + 291, + 771, + 303, + 781 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 74, + 522, + 518 + ], + "blocks": [ + { + "bbox": [ + 77, + 74, + 522, + 518 + ], + "lines": [ + { + "bbox": [ + 77, + 74, + 522, + 518 + ], + "spans": [ + { + "bbox": [ + 77, + 74, + 522, + 518 + ], + "type": "image", + "image_path": "90f68fd7a6962a77f49e5e72d7250151fc08ad8098feb7692cd4ae7c3d8d8efd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 526, + 525, + 565 + ], + "lines": [ + { + "bbox": [ + 68, + 526, + 525, + 565 + ], + "spans": [ + { + "bbox": [ + 68, + 526, + 525, + 565 + ], + "type": "text", + "content": "Figure 8: HFS-enhanced Kolmogorov flow predictions. (a-c) denote different samples chosen randomly from the test dataset. Each example shows the ground truth (GT), NO and HFS-enhanced NO predictions along with the energy spectra " + }, + { + "bbox": [ + 68, + 526, + 525, + 565 + ], + "type": "inline_equation", + "content": "(p(k))" + }, + { + "bbox": [ + 68, + 526, + 525, + 565 + ], + "type": "text", + "content": " for each prediction." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 581, + 212, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 581, + 212, + 595 + ], + "spans": [ + { + "bbox": [ + 69, + 581, + 212, + 595 + ], + "type": "text", + "content": "3.5. Diffusion Model Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 598, + 525, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 598, + 525, + 743 + ], + "spans": [ + { + "bbox": [ + 68, + 598, + 525, + 743 + ], + "type": "text", + "content": "We investigated further mitigation of spectral bias using the score-based diffusion model (DM) with HFS-enhanced NO predictions as the prior. Specifically, we first conducted a systematic study to investigate the effect of NO prediction accuracy, obtained by varying the number of parameters in the NO, on the diffusion model performance. Second, we demonstrated that using HFS-enhanced NO can further help the diffusion model to match the correct energy spectra of the solutions without degrading the mean prediction errors. Since the NO predictions are used as priors to diffusion model, the accuracy of diffusion model predictions is strongly influenced by the reliability of these priors. For example, if the prior information is significantly erroneous or over-smoothed, then the diffusion model struggles to accurately recover the missing frequencies without compromising the accuracy of the mean predictions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 756, + 524, + 771 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 756, + 524, + 771 + ], + "spans": [ + { + "bbox": [ + 86, + 756, + 524, + 771 + ], + "type": "text", + "content": "Fig. 9 shows the subcooled pool boiling prediction results of DM conditioned on NO and" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 291, + 771, + 304, + 782 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 72, + 526, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 526, + 333 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 526, + 333 + ], + "type": "text", + "content": "HFS-enhanced NO predictions. Other prediction examples with DM integrated with NO and HFS-enhanced NO are visualized in Appendix F. When the NO predictions have significant errors, the DM can barely mitigate those errors. However, when HFS is integrated with the NO, the significant errors at large structures are reduced, and high-frequency components of the solutions are captured more accurately compared to " + }, + { + "bbox": [ + 70, + 72, + 526, + 333 + ], + "type": "inline_equation", + "content": "\\mathrm{NO} + \\mathrm{DM}" + }, + { + "bbox": [ + 70, + 72, + 526, + 333 + ], + "type": "text", + "content": " predictions. In addition, when the DM is integrated with the HFS-enhanced NO predictions, the DM is able to more accurately reconstruct intricate features that are already enhanced through more accurate predictions provided by HFS-enhanced NO. Therefore, less over-smoothing is observed in the " + }, + { + "bbox": [ + 70, + 72, + 526, + 333 + ], + "type": "inline_equation", + "content": "\\mathrm{NO} + \\mathrm{HFS} + \\mathrm{DM}" + }, + { + "bbox": [ + 70, + 72, + 526, + 333 + ], + "type": "text", + "content": " predictions and spectral bias is further reduced. It can be seen that both HFS and DM are helping with the capture of high-frequency features. DM cannot fix significant errors caused by NO predictions at large scale features (e.g., bubble interfaces). However, HFS reduces the errors around large scale features in addition to enhancing the smaller scale features. When DM is integrated with HFS-enhanced NO, it further enhances the small scale features. The quantitative metrics are shown in Fig. 10. It should be noted that the models are trained with a different set of hyperparameters for the results shown in Fig. 10 compared to the previous results (Fig. 3). However, HFS enhanced the prediction results of NO, irrespective of hyperparameters (either optimal or non-optimal hyperparameters), as long as the same hyperparameters are used for training both NO and HFS-enhanced NO." + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 88, + 75, + 165, + 162 + ], + "blocks": [ + { + "bbox": [ + 76, + 75, + 88, + 86 + ], + "lines": [ + { + "bbox": [ + 76, + 75, + 88, + 86 + ], + "spans": [ + { + "bbox": [ + 76, + 75, + 88, + 86 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 88, + 75, + 165, + 162 + ], + "lines": [ + { + "bbox": [ + 88, + 75, + 165, + 162 + ], + "spans": [ + { + "bbox": [ + 88, + 75, + 165, + 162 + ], + "type": "image", + "image_path": "f77d7cde018d6c74bb0d627654ab043227cdb2f775c121acb59c5727d34539a7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 170, + 75, + 249, + 162 + ], + "blocks": [ + { + "bbox": [ + 170, + 75, + 249, + 162 + ], + "lines": [ + { + "bbox": [ + 170, + 75, + 249, + 162 + ], + "spans": [ + { + "bbox": [ + 170, + 75, + 249, + 162 + ], + "type": "image", + "image_path": "14d2c43c9f26237111d97d362572890aad6cd1ac9400cb8eb6973553a8a9c8d1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 254, + 75, + 332, + 162 + ], + "blocks": [ + { + "bbox": [ + 254, + 75, + 332, + 162 + ], + "lines": [ + { + "bbox": [ + 254, + 75, + 332, + 162 + ], + "spans": [ + { + "bbox": [ + 254, + 75, + 332, + 162 + ], + "type": "image", + "image_path": "824288cda52c2891f4d10fcc048556f5025e12a9a08916a114c2879b54a5a4e0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 337, + 75, + 416, + 162 + ], + "blocks": [ + { + "bbox": [ + 337, + 75, + 416, + 162 + ], + "lines": [ + { + "bbox": [ + 337, + 75, + 416, + 162 + ], + "spans": [ + { + "bbox": [ + 337, + 75, + 416, + 162 + ], + "type": "image", + "image_path": "d434f1115c71df8fc03f6ff24ee680bd8baf845b45ea59fc5cacb1faa46bfbf1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 421, + 75, + 514, + 162 + ], + "blocks": [ + { + "bbox": [ + 421, + 75, + 514, + 162 + ], + "lines": [ + { + "bbox": [ + 421, + 75, + 514, + 162 + ], + "spans": [ + { + "bbox": [ + 421, + 75, + 514, + 162 + ], + "type": "image", + "image_path": "cfcb1f30c856f0902c80a6ca75eb09c6526ab9c4fc6d870998c725e3266546de.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 87, + 164, + 165, + 274 + ], + "blocks": [ + { + "bbox": [ + 76, + 159, + 87, + 169 + ], + "lines": [ + { + "bbox": [ + 76, + 159, + 87, + 169 + ], + "spans": [ + { + "bbox": [ + 76, + 159, + 87, + 169 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 87, + 164, + 165, + 274 + ], + "lines": [ + { + "bbox": [ + 87, + 164, + 165, + 274 + ], + "spans": [ + { + "bbox": [ + 87, + 164, + 165, + 274 + ], + "type": "image", + "image_path": "4b5053f22f5d1045d4bc1f94ad7a3d9e743281a60a70f0fe826b8afd6b118513.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 171, + 163, + 248, + 274 + ], + "blocks": [ + { + "bbox": [ + 171, + 163, + 248, + 274 + ], + "lines": [ + { + "bbox": [ + 171, + 163, + 248, + 274 + ], + "spans": [ + { + "bbox": [ + 171, + 163, + 248, + 274 + ], + "type": "image", + "image_path": "4077f969a3d5c28f84a171a7ac2a8f647114a1140ff1cab20e790c3e6be946c6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 254, + 163, + 332, + 274 + ], + "blocks": [ + { + "bbox": [ + 254, + 163, + 332, + 274 + ], + "lines": [ + { + "bbox": [ + 254, + 163, + 332, + 274 + ], + "spans": [ + { + "bbox": [ + 254, + 163, + 332, + 274 + ], + "type": "image", + "image_path": "bfd8e918449ad4e2c5b9c9505fa08483327fc17c132d390ea6522a9ebfd8e7c7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 337, + 163, + 415, + 274 + ], + "blocks": [ + { + "bbox": [ + 337, + 163, + 415, + 274 + ], + "lines": [ + { + "bbox": [ + 337, + 163, + 415, + 274 + ], + "spans": [ + { + "bbox": [ + 337, + 163, + 415, + 274 + ], + "type": "image", + "image_path": "2e662c6924e4dc916926501fd33cf6fb7658ebfdf77a643b1876c42b62c9c1aa.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 421, + 164, + 499, + 274 + ], + "blocks": [ + { + "bbox": [ + 421, + 164, + 499, + 274 + ], + "lines": [ + { + "bbox": [ + 421, + 164, + 499, + 274 + ], + "spans": [ + { + "bbox": [ + 421, + 164, + 499, + 274 + ], + "type": "image", + "image_path": "e9182f9b9d24aea5d10f9e8d93f5432f03b021167555925e5f3ae7c3a8199e16.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 88, + 286, + 165, + 364 + ], + "blocks": [ + { + "bbox": [ + 76, + 283, + 87, + 293 + ], + "lines": [ + { + "bbox": [ + 76, + 283, + 87, + 293 + ], + "spans": [ + { + "bbox": [ + 76, + 283, + 87, + 293 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 88, + 286, + 165, + 364 + ], + "lines": [ + { + "bbox": [ + 88, + 286, + 165, + 364 + ], + "spans": [ + { + "bbox": [ + 88, + 286, + 165, + 364 + ], + "type": "image", + "image_path": "b5e173640877cb0cef234d0b4ae167124b4fc270eef3d2f99ffc99eb772ed691.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 76, + 359, + 87, + 369 + ], + "lines": [ + { + "bbox": [ + 76, + 359, + 87, + 369 + ], + "spans": [ + { + "bbox": [ + 76, + 359, + 87, + 369 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 172, + 287, + 248, + 364 + ], + "blocks": [ + { + "bbox": [ + 172, + 287, + 248, + 364 + ], + "lines": [ + { + "bbox": [ + 172, + 287, + 248, + 364 + ], + "spans": [ + { + "bbox": [ + 172, + 287, + 248, + 364 + ], + "type": "image", + "image_path": "9dd2b8fb0cc2df44a4b3dffb169d3d12295ff795a4bc3448676f90a768ee5d25.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 255, + 287, + 332, + 364 + ], + "blocks": [ + { + "bbox": [ + 255, + 287, + 332, + 364 + ], + "lines": [ + { + "bbox": [ + 255, + 287, + 332, + 364 + ], + "spans": [ + { + "bbox": [ + 255, + 287, + 332, + 364 + ], + "type": "image", + "image_path": "21d6dea1825b413f89957217e3a9148548072fa1907281353e79782273e0fc08.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 338, + 287, + 414, + 364 + ], + "blocks": [ + { + "bbox": [ + 338, + 287, + 414, + 364 + ], + "lines": [ + { + "bbox": [ + 338, + 287, + 414, + 364 + ], + "spans": [ + { + "bbox": [ + 338, + 287, + 414, + 364 + ], + "type": "image", + "image_path": "5724b436a86300be2784fa68ea7aec5bd1eb7e60f905356e9a61d6ce2dea4070.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 422, + 286, + 514, + 364 + ], + "blocks": [ + { + "bbox": [ + 422, + 286, + 514, + 364 + ], + "lines": [ + { + "bbox": [ + 422, + 286, + 514, + 364 + ], + "spans": [ + { + "bbox": [ + 422, + 286, + 514, + 364 + ], + "type": "image", + "image_path": "7acbf978c26e0e5745e06313089bfbd821791e207c3cd601e0ff50804ab6dc4b.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 88, + 366, + 165, + 454 + ], + "blocks": [ + { + "bbox": [ + 88, + 366, + 165, + 454 + ], + "lines": [ + { + "bbox": [ + 88, + 366, + 165, + 454 + ], + "spans": [ + { + "bbox": [ + 88, + 366, + 165, + 454 + ], + "type": "image", + "image_path": "f0284ad1958098bbf58a3a6ea0491e38ff5e9e5a2a1d05a08a833dda237df2d2.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 466, + 524, + 515 + ], + "lines": [ + { + "bbox": [ + 68, + 466, + 524, + 515 + ], + "spans": [ + { + "bbox": [ + 68, + 466, + 524, + 515 + ], + "type": "text", + "content": "Figure 9: Visualization of the prediction by DM integrated with NO and HFS-enhanced NO. (a) Example showing ground truth (GT) solution and predictions by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Zoomed-in visualization of (a) focusing on the high-frequency contents. (c) Predictions of another randomly selected sample. (d) Zoomed-in visualization of (c) focusing on high-frequency contents." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 173, + 366, + 248, + 454 + ], + "blocks": [ + { + "bbox": [ + 173, + 366, + 248, + 454 + ], + "lines": [ + { + "bbox": [ + 173, + 366, + 248, + 454 + ], + "spans": [ + { + "bbox": [ + 173, + 366, + 248, + 454 + ], + "type": "image", + "image_path": "8f424c4056077457e9ba4f82ff71b2dceebe8e63582d2e9e2910553e7c5a91a2.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 255, + 366, + 332, + 454 + ], + "blocks": [ + { + "bbox": [ + 255, + 366, + 332, + 454 + ], + "lines": [ + { + "bbox": [ + 255, + 366, + 332, + 454 + ], + "spans": [ + { + "bbox": [ + 255, + 366, + 332, + 454 + ], + "type": "image", + "image_path": "ba25d5e98b4b75fd94526f7be742d92551619db9606fb8a8bb8ed6f2f35060f3.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 337, + 366, + 414, + 454 + ], + "blocks": [ + { + "bbox": [ + 337, + 366, + 414, + 454 + ], + "lines": [ + { + "bbox": [ + 337, + 366, + 414, + 454 + ], + "spans": [ + { + "bbox": [ + 337, + 366, + 414, + 454 + ], + "type": "image", + "image_path": "6acce8a74d6789197fed4cf6e66d8f7ce554c25121db815c611f22a624e7aa8c.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 421, + 366, + 498, + 454 + ], + "blocks": [ + { + "bbox": [ + 421, + 366, + 498, + 454 + ], + "lines": [ + { + "bbox": [ + 421, + 366, + 498, + 454 + ], + "spans": [ + { + "bbox": [ + 421, + 366, + 498, + 454 + ], + "type": "image", + "image_path": "9baf9f6b18eb233208ff948902b5fec1c3ee7677059013bbd5eea3505b509fce.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 86, + 531, + 460, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 531, + 460, + 545 + ], + "spans": [ + { + "bbox": [ + 86, + 531, + 460, + 545 + ], + "type": "text", + "content": "The results presented in Fig. 9 and Fig. 10 illustrate the following key points:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 86, + 552, + 524, + 771 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "spans": [ + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "text", + "content": "- HFS reduces the prediction errors in both physical and spectral domains, irrespective of NO size. On average, the relative errors (e.g., RMSE) and energy spectrum errors " + }, + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "inline_equation", + "content": "(\\mathcal{E}_F)" + }, + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "text", + "content": " (see Appendix B) are reduced by " + }, + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "inline_equation", + "content": "23.5\\%" + }, + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "inline_equation", + "content": "15.2\\%" + }, + { + "bbox": [ + 86, + 552, + 524, + 609 + ], + "type": "text", + "content": ", respectively, with HFS-enhanced NOs." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "spans": [ + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": "- Generally, DM does not change the prediction field errors (Fig. 10a). However, DM reduces the energy spectrum error, showing better energy spectra alignment with the correct solutions. On average, " + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "inline_equation", + "content": "\\mathrm{NO} + \\mathrm{DM}" + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": " has " + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "inline_equation", + "content": "27.8\\%" + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": " lower relative " + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "inline_equation", + "content": "\\varepsilon_{F}" + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": " compared to NO. The only exception is the NO with 16 millions parameters. On average, " + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "inline_equation", + "content": "\\mathrm{NO} + \\mathrm{HFS} + \\mathrm{DM}" + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": " has " + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "inline_equation", + "content": "23.2\\%" + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": " lower relative " + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "inline_equation", + "content": "\\varepsilon_{F}" + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": " compared to " + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "inline_equation", + "content": "\\mathrm{NO} + \\mathrm{HFS}" + }, + { + "bbox": [ + 86, + 618, + 524, + 691 + ], + "type": "text", + "content": " (Fig. 10b)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "spans": [ + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "text", + "content": "- HFS reduces the energy spectrum errors at all different frequency bands " + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "inline_equation", + "content": "(\\mathcal{E}_{F_{\\mathrm{low}}}, \\mathcal{E}_{F_{\\mathrm{mid}}}," + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{F_{\\mathrm{high}}})" + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "text", + "content": ", containing only the low, mid, and high-frequency components of the solutions, respectively. We refer to Fig. 6 for visualization of solutions at these frequency bands. However, DM does not enhance the results at " + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{F_{\\mathrm{low}}}" + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{F_{\\mathrm{mid}}}" + }, + { + "bbox": [ + 86, + 700, + 524, + 771 + ], + "type": "text", + "content": " when integrated with HFS-enhanced NO. Indeed, the results at these two frequency bands are sometimes the best" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 289, + 772, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 289, + 772, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 289, + 772, + 304, + 782 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 97, + 72, + 525, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 72, + 525, + 116 + ], + "spans": [ + { + "bbox": [ + 97, + 72, + 525, + 116 + ], + "type": "text", + "content": "for HFS-enhanced NO without DM, depending on the NO size. However, the advantage of DM is taken into action at " + }, + { + "bbox": [ + 97, + 72, + 525, + 116 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{F_{\\mathrm{high}}}" + }, + { + "bbox": [ + 97, + 72, + 525, + 116 + ], + "type": "text", + "content": " (Fig. 10e) with improved results compared to NO and HFS-enhanced NO. This explains the role of DM in further mitigation of spectral bias." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 90, + 143, + 285, + 329 + ], + "blocks": [ + { + "bbox": [ + 90, + 143, + 285, + 329 + ], + "lines": [ + { + "bbox": [ + 90, + 143, + 285, + 329 + ], + "spans": [ + { + "bbox": [ + 90, + 143, + 285, + 329 + ], + "type": "image", + "image_path": "adcc8ac420e67795e1500fd44cec9e0c040709c3a1c3f3f3c6afdf73a317f59a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 290, + 143, + 489, + 328 + ], + "blocks": [ + { + "bbox": [ + 290, + 143, + 489, + 328 + ], + "lines": [ + { + "bbox": [ + 290, + 143, + 489, + 328 + ], + "spans": [ + { + "bbox": [ + 290, + 143, + 489, + 328 + ], + "type": "image", + "image_path": "c61ee8c84c709ba417474b42e09eec116bd2299f768602183e70caadd447b32c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 71, + 333, + 218, + 470 + ], + "blocks": [ + { + "bbox": [ + 71, + 333, + 218, + 470 + ], + "lines": [ + { + "bbox": [ + 71, + 333, + 218, + 470 + ], + "spans": [ + { + "bbox": [ + 71, + 333, + 218, + 470 + ], + "type": "image", + "image_path": "829637f19e543333f93c6632ec44ddd9bda78de8bfca3bc1bff0bc0e7512c61c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "lines": [ + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "spans": [ + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": "Figure 10: Diffusion model prediction results. (a) Relative errors (Rel. Error) of prediction by NO, NO + DM, NO + HFS, and NO + HFS + DM. (b) Relative energy spectrum errors (Rel. " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_F" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": "). (c) Relative energy spectrum errors in the low frequency band (Rel. " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{F_{\\mathrm{low}}}" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": "). (d) Relative energy spectrum errors in the mid frequency band (Rel. " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{F_{\\mathrm{mid}}}" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": "). (e) Relative energy spectrum errors in the high-frequency band (Rel. " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{F_{\\mathrm{high}}}" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": "). Low, mid, and high-frequency thresholds are set to the first " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": ", the first " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "6.2\\%" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": " excluding the first " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": ", and the last " + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "inline_equation", + "content": "93.8\\%" + }, + { + "bbox": [ + 68, + 481, + 525, + 542 + ], + "type": "text", + "content": " of the wavenumbers." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 218, + 333, + 363, + 470 + ], + "blocks": [ + { + "bbox": [ + 218, + 333, + 363, + 470 + ], + "lines": [ + { + "bbox": [ + 218, + 333, + 363, + 470 + ], + "spans": [ + { + "bbox": [ + 218, + 333, + 363, + 470 + ], + "type": "image", + "image_path": "24e9419cdc031bbe8ee845630b852d0f762ff0f707d7ec8c2a0be986d2140e2a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 364, + 334, + 514, + 470 + ], + "blocks": [ + { + "bbox": [ + 364, + 334, + 514, + 470 + ], + "lines": [ + { + "bbox": [ + 364, + 334, + 514, + 470 + ], + "spans": [ + { + "bbox": [ + 364, + 334, + 514, + 470 + ], + "type": "image", + "image_path": "fb0cd2d796a34b11d747483d3064e7f18b5a3e9c93c722d66bc954154eeb9ef0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 563, + 141, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 563, + 141, + 576 + ], + "spans": [ + { + "bbox": [ + 69, + 563, + 141, + 576 + ], + "type": "text", + "content": "4. Discussion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 587, + 525, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 587, + 525, + 745 + ], + "spans": [ + { + "bbox": [ + 68, + 587, + 525, + 745 + ], + "type": "text", + "content": "HFS works by preserving more high-frequency components in the latent space after each convolutional layer in the NO. The flexibility of learning to scale the DC and HFC of the signals allows the model to enhance the predictions in mid and high-frequency contents without any degradation in the low frequency content of the solutions. As a result, both field metrics suh as RMSE and bubble RMSE, and the spectral errors are reduced in two-phase flow predictions. The enhancements observed in HFS-enhanced NO prediction results are more pronounced in areas with larger high-frequency features such as within the bubbles and at condensation trails seen in subcooled boiling solutions. This emphasizes the role of HFS in spectral bias mitigation, which helps with better capture of intricate features and sharp gradients. Similarly, both the relative errors and spectral errors are reduced, and high-frequency features are enhanced in the Kolmogorov flow predictions." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 289, + 771, + 302, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 289, + 771, + 302, + 782 + ], + "spans": [ + { + "bbox": [ + 289, + 771, + 302, + 782 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": "The scaling parameters " + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": " in the HFS method are optimized during the network training. Notably, the optimized values for " + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": " are consistently larger than " + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": ", indicating that the model is trying to pay more attention to the HFC in the latent space. This biased attention helps mitigating the persistent challenge of spectral bias in the NO. To reduce the optimization burden, the scaling parameters were consistent across all the patches but were allowed to vary across different feature maps. This flexibility enables the model to automatically adjust the scaling of the HFC of the feature map depending on its content and significance. The learned " + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": " for each of the latent feature maps in the HFS-enhanced NO with " + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\sim 1.7" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": " million parameters are shown in Appendix G. In our work, all the scaling parameters were initialized at one and they were optimized using gradient descent with the same learning rate used for training the NO (" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "inline_equation", + "content": "\\sim 8\\times 10^{-4}" + }, + { + "bbox": [ + 68, + 73, + 525, + 246 + ], + "type": "text", + "content": "). It would be interesting to explore faster convergence by using different initializations and optimization frameworks for the scaling parameters in future work." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "spans": [ + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "content": "Another method for spectral bias mitigation is through diffusion models conditioned on NO predictions as prior information. However, using diffusion models has two drawbacks. First, the diffusion model predictions are strongly dependent on the prior information. Therefore, it can only reduce over-smoothing from reasonably accurate NO predictions. If the NO predictions are not sufficiently accurate, then the diffusion model cannot perform well. Second, training diffusion models requires extensive computational cost as each training iteration involves " + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "inline_equation", + "content": "n(= 32)" + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "content": " auto-regressive denoising steps to estimate the state of the solution at each time-step. In our experiments, the diffusion model training cost is approximately 2 to 4 times higher than the NO training itself. On the other hand, the HFS method requires only a small additional computational cost and negligible additional memory for training along the NO. The number of parameters added by HFS modules varies depending on the underlying NO size. However, it is generally less than " + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "inline_equation", + "content": "0.1\\%" + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "content": " of the number of parameters in the NO. In our experimentation, the HFS module parameters vary between " + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "inline_equation", + "content": "0.018\\%" + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "inline_equation", + "content": "0.045\\%" + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "content": " of the number of parameters in NO, depending on the underlying NO size. Based on our experiments, the computational time for each training iteration is within " + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 68, + 260, + 526, + 491 + ], + "type": "text", + "content": " higher, depending on the NO size and the computational resource." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 505, + 525, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 505, + 525, + 678 + ], + "spans": [ + { + "bbox": [ + 68, + 505, + 525, + 678 + ], + "type": "text", + "content": "In addition to the enhancements observed in field metrics such as RMSE and bubble RMSE, our investigation revealed that HFS also helps with reducing the spectral errors. We demonstrated that matching the correct energy spectra at mid and high wavenumbers is directly correlated with capturing the complex features in the solutions. We would like to emphasize the importance of considering both field errors and correct energy spectra alignment in scientific machine learning problems. The field analysis demonstrates the average performance of the predictions. However, the energy spectra analysis reveals useful information about the prediction accuracies at different frequencies and thereby explaining the possible spectral bias and loss of useful information near interfaces, vortices, and sharp gradient areas in two-phase flow and turbulence problems. It should be noted that the predictions with enhanced energy spectra alignment is beneficial when accompanied by improved mean field predictions (e.g., RMSE) and HFS-enhanced NO results satisfy this requirement." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 693, + 524, + 766 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 693, + 524, + 766 + ], + "spans": [ + { + "bbox": [ + 68, + 693, + 524, + 766 + ], + "type": "text", + "content": "When aiming to scale the different frequency bands of the signals, a logical alternative would be to perform the scaling directly in the frequency domain rather than the physical domain. As a comparison, we implemented and compared scaling in the frequency domain with our proposed method (HFS). In this regard, let " + }, + { + "bbox": [ + 68, + 693, + 524, + 766 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{(l)}\\in \\mathbb{R}^{H\\times W\\times C}" + }, + { + "bbox": [ + 68, + 693, + 524, + 766 + ], + "type": "text", + "content": " be the output of " + }, + { + "bbox": [ + 68, + 693, + 524, + 766 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 68, + 693, + 524, + 766 + ], + "type": "text", + "content": "-th convolutional layer, then the feature maps can be transferred to frequency domain using a 2D Fourier" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 289, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 289, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 289, + 771, + 304, + 782 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 73, + 143, + 86 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 73, + 143, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 143, + 86 + ], + "type": "text", + "content": "transform " + }, + { + "bbox": [ + 69, + 73, + 143, + 86 + ], + "type": "inline_equation", + "content": "(\\mathcal{F})" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 184, + 99, + 524, + 118 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 99, + 524, + 118 + ], + "spans": [ + { + "bbox": [ + 184, + 99, + 524, + 118 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {X}} ^ {(l)} (:,:, c) = \\mathcal {F} \\left(\\mathbf {X} ^ {(l)} (:,: c)\\right), \\quad c = 1, 2, \\dots , C, \\tag {9}", + "image_path": "06d4a56db22499e908292b06114cca61f60690cad185fe59b16acb00df8e6ef2.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "spans": [ + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{X}}^{(l)}\\in \\mathbb{C}^{H\\times W\\times C}" + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "text", + "content": " includes the Fourier-transformed feature maps. The low frequency and high-frequency component of features maps can be generated by truncating " + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{X}}^{(l)}" + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "text", + "content": " at a frequency threshold of " + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "text", + "content": ". We name these components as " + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{X}}_{\\mathrm{low}}^{(l)}" + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{X}}_{\\mathrm{high}}^{(l)}" + }, + { + "bbox": [ + 68, + 124, + 524, + 183 + ], + "type": "text", + "content": ". Each Fourier-transformed feature will be scaled separately:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 209, + 192, + 524, + 212 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 192, + 524, + 212 + ], + "spans": [ + { + "bbox": [ + 209, + 192, + 524, + 212 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {X}} _ {\\text {s c a l e d}} ^ {(l)} = \\lambda_ {\\text {l o w}} \\odot \\hat {\\mathbf {X}} _ {\\text {l o w}} ^ {(l)} + \\lambda_ {\\text {h i g h}} \\odot \\hat {\\mathbf {X}} _ {\\text {h i g h}} ^ {(l)}, \\tag {10}", + "image_path": "a4e1631501950d91264465bc2f7c1b6b7fe7a0c7b014189fb7a3c98d91f992aa.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 222, + 524, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 222, + 524, + 265 + ], + "spans": [ + { + "bbox": [ + 68, + 222, + 524, + 265 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 222, + 524, + 265 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{low}} \\in \\mathbb{R}^{1 \\times 1 \\times C}" + }, + { + "bbox": [ + 68, + 222, + 524, + 265 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 222, + 524, + 265 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{high}} \\in \\mathbb{R}^{1 \\times 1 \\times C}" + }, + { + "bbox": [ + 68, + 222, + 524, + 265 + ], + "type": "text", + "content": " are learnable parameters that are optimized simultaneously with the network training. Finally, the scaled feature map is reconstructed using the inverse Fourier transform:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 167, + 278, + 524, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 278, + 524, + 297 + ], + "spans": [ + { + "bbox": [ + 167, + 278, + 524, + 297 + ], + "type": "interline_equation", + "content": "\\mathbf {X} _ {\\text {s c a l e d}} ^ {(l)} (:, :, c) = \\mathcal {F} ^ {- 1} \\left(\\hat {\\mathbf {X}} _ {\\text {s c a l e d}} ^ {(l)} (:, :, c)\\right), \\quad c = 1, 2, \\dots , C. \\tag {11}", + "image_path": "d5559774d6cdd782902e47caa66d146219b8e97b2e6458c25af5364ff727b183.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 301, + 524, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 301, + 524, + 445 + ], + "spans": [ + { + "bbox": [ + 68, + 301, + 524, + 445 + ], + "type": "text", + "content": "Our preliminary results demonstrate that scaling in the frequency domain also improves the two-phase flow prediction results, thus helping with the spectral bias mitigation. However, the enhancements are lower than the proposed HFS method, while the computational cost is significantly higher. This is due to the Fourier and Fourier inverse transforms required in this method. Consequently, we did not proceed with the second method. However, it may worth investigating this method in future work. There is one hyperparameter for each of these scaling methods. For the proposed HFS method, the patch size is the hyperparameter, and for scaling in the frequency domain the truncation frequency is the hyperparameter. A comparison of the prediction errors and computation costs of the two methods with a NO with " + }, + { + "bbox": [ + 68, + 301, + 524, + 445 + ], + "type": "inline_equation", + "content": "\\sim 1.7" + }, + { + "bbox": [ + 68, + 301, + 524, + 445 + ], + "type": "text", + "content": " million parameters is shown in Table 3." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 121, + 491, + 473, + 599 + ], + "blocks": [ + { + "bbox": [ + 94, + 470, + 498, + 483 + ], + "lines": [ + { + "bbox": [ + 94, + 470, + 498, + 483 + ], + "spans": [ + { + "bbox": [ + 94, + 470, + 498, + 483 + ], + "type": "text", + "content": "Table 3: Comparison of the proposed HFS (Method 1) and scaling in frequency domain (Method 2)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 121, + 491, + 473, + 599 + ], + "lines": [ + { + "bbox": [ + 121, + 491, + 473, + 599 + ], + "spans": [ + { + "bbox": [ + 121, + 491, + 473, + 599 + ], + "type": "table", + "html": "
NONO + Method 1NO + Method 2
Rel. Error0.0440.0330.034
RMSE0.0430.0330.034
BRMSE0.1160.0720.076
Maxmean1.140.890.92
Parameters [Millions]1.7111.7121.712
Iteration time (s)31.434.552.6
", + "image_path": "54e291267e3c4266a59c83d920c861ff4f608dfa734ee3f6ab5844d262da96c2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 615, + 201, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 615, + 201, + 629 + ], + "spans": [ + { + "bbox": [ + 69, + 615, + 201, + 629 + ], + "type": "text", + "content": "4.1. Effectiveness Criteria" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 632, + 524, + 762 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 632, + 524, + 762 + ], + "spans": [ + { + "bbox": [ + 68, + 632, + 524, + 762 + ], + "type": "text", + "content": "The HFS approach operates by spatially decomposing the features into several patches and scaling the common DC and individual HFC of the patches separately. Our investigation showed that HFS is mostly effective on datasets with localized features such as those in subcooled pool boiling dataset. For extremely chaotic systems with globally small-scale features, the DC and HFC cannot be directly separated from spatial patching as all the patches may contain similar frequency components. To better quantify this limitation, we directly applied HFS to the samples from three different case studies with inherently different features. The samples were chosen from the subcooled pool boiling, Kolmogorov flow, and a turbulent jet problem. We found that HFS is effective for the first two problems (with the effect being less" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 289, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 289, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 289, + 771, + 304, + 782 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 73, + 390, + 86 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 73, + 390, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 390, + 86 + ], + "type": "text", + "content": "pronounced on the later one), but is not effective for the third case." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "spans": [ + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "text", + "content": "The turbulent jet data is from the experimental Schlieren velocimetry of turbulent helium jet in air. More details about the dataset is available in the previous work [64]. We directly used the publicly available Schlieren velocimetry dataset [64] in the raw .tif format. All the regions in the turbulent jet have similar small-scale features (see Fig. 11), which are different from the more localized features in the subcooled pool boiling and less localized features in the Kolmogorov flow. We directly applied HFS to these datasets and visualized the gradient magnitude in a region with high-frequency features. Additionally, we visualized the ratio of the gradient strength on a high frequency region with and without HFS, as defined by " + }, + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "inline_equation", + "content": "\\frac{|\\nabla x_{\\mathrm{HFS}}|}{|\\nabla x_{\\mathrm{baseline}}|}" + }, + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "text", + "content": " is the chosen region, " + }, + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "inline_equation", + "content": "\\nabla" + }, + { + "bbox": [ + 70, + 101, + 526, + 290 + ], + "type": "text", + "content": " is the gradient operator, and baseline refers to the case without HFS. This ratio compares the selectiveness in scaling the gradient of the features. The HFS approach is effective for cases where it can selectively scale the gradients across the localized features. In contrast, HFS may not be effective if it results in a uniform gradient scaling, as it can be seen in the sample from the turbulent jet dataset." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 303, + 526, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 303, + 526, + 433 + ], + "spans": [ + { + "bbox": [ + 68, + 303, + 526, + 433 + ], + "type": "text", + "content": "Specifically, as shown in Fig. 11, the HFS approach successfully increases the gradient strength at high frequency regions in subcooled pool boiling and Kolmogorov flow. However, it scales the gradient uniformly in the turbulent jet case. Therefore, the ratio of the gradient strength with HFS to the baseline shows a less uniform solution on the subcooled pool boiling sample, followed by the Kolmogorov flow sample. However, this ratio is almost uniform for the turbulent jet case. Selective enhancement of the gradient near the edges and high-frequency features helps with the better representation of these local regions which helps the NO to better capture the high-frequency details. Since HFS is applied in the latent space, the artifacts caused by patching are mitigated and ultimately discarded in the deeper levels of the NO." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 96, + 75, + 205, + 190 + ], + "blocks": [ + { + "bbox": [ + 79, + 81, + 95, + 95 + ], + "lines": [ + { + "bbox": [ + 79, + 81, + 95, + 95 + ], + "spans": [ + { + "bbox": [ + 79, + 81, + 95, + 95 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 96, + 75, + 205, + 190 + ], + "lines": [ + { + "bbox": [ + 96, + 75, + 205, + 190 + ], + "spans": [ + { + "bbox": [ + 96, + 75, + 205, + 190 + ], + "type": "image", + "image_path": "a3920278fe9862529af92facf4befe880ca3cdb6ffd2727b6fd4c8c5dd3e3403.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 218, + 81, + 309, + 181 + ], + "blocks": [ + { + "bbox": [ + 218, + 81, + 309, + 181 + ], + "lines": [ + { + "bbox": [ + 218, + 81, + 309, + 181 + ], + "spans": [ + { + "bbox": [ + 218, + 81, + 309, + 181 + ], + "type": "image", + "image_path": "b20b61142a3bce86ad36ea7d9f2a959ee5be79e20456608ea2474ed5fc37e15f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 314, + 81, + 404, + 181 + ], + "blocks": [ + { + "bbox": [ + 314, + 81, + 404, + 181 + ], + "lines": [ + { + "bbox": [ + 314, + 81, + 404, + 181 + ], + "spans": [ + { + "bbox": [ + 314, + 81, + 404, + 181 + ], + "type": "image", + "image_path": "3af287cf23e6f53219a3589e04c57a037bb16dd30b3685c7cc69a05bbec05bb1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 408, + 70, + 515, + 181 + ], + "blocks": [ + { + "bbox": [ + 408, + 70, + 515, + 181 + ], + "lines": [ + { + "bbox": [ + 408, + 70, + 515, + 181 + ], + "spans": [ + { + "bbox": [ + 408, + 70, + 515, + 181 + ], + "type": "image", + "image_path": "06a9908dbf997c468a6ff901a52a01cdf5091d706ec7f0c0dae199bce729c2d8.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 95, + 193, + 201, + 309 + ], + "blocks": [ + { + "bbox": [ + 78, + 195, + 94, + 211 + ], + "lines": [ + { + "bbox": [ + 78, + 195, + 94, + 211 + ], + "spans": [ + { + "bbox": [ + 78, + 195, + 94, + 211 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 95, + 193, + 201, + 309 + ], + "lines": [ + { + "bbox": [ + 95, + 193, + 201, + 309 + ], + "spans": [ + { + "bbox": [ + 95, + 193, + 201, + 309 + ], + "type": "image", + "image_path": "e6da86530a472579fba0dd641c2521881c22ad246da13baba0f7a778d0d158dd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 227, + 211, + 301, + 302 + ], + "blocks": [ + { + "bbox": [ + 227, + 211, + 301, + 302 + ], + "lines": [ + { + "bbox": [ + 227, + 211, + 301, + 302 + ], + "spans": [ + { + "bbox": [ + 227, + 211, + 301, + 302 + ], + "type": "image", + "image_path": "02e29bc84d1768810e1282a65f1138753dd896b1b55969f5a4838f82670d53f6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 321, + 210, + 395, + 301 + ], + "blocks": [ + { + "bbox": [ + 321, + 210, + 395, + 301 + ], + "lines": [ + { + "bbox": [ + 321, + 210, + 395, + 301 + ], + "spans": [ + { + "bbox": [ + 321, + 210, + 395, + 301 + ], + "type": "image", + "image_path": "a2d0d2a7c6fc7b04c17becb19f33300d8aa8cf147d4405358f82dc9231fef45b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 417, + 210, + 507, + 302 + ], + "blocks": [ + { + "bbox": [ + 417, + 210, + 507, + 302 + ], + "lines": [ + { + "bbox": [ + 417, + 210, + 507, + 302 + ], + "spans": [ + { + "bbox": [ + 417, + 210, + 507, + 302 + ], + "type": "image", + "image_path": "bde31bbafc0dd86d2cacce81eadabd84b6642b05c7685ef1db226bcddcc21013.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 95, + 324, + 232, + 391 + ], + "blocks": [ + { + "bbox": [ + 78, + 313, + 94, + 331 + ], + "lines": [ + { + "bbox": [ + 78, + 313, + 94, + 331 + ], + "spans": [ + { + "bbox": [ + 78, + 313, + 94, + 331 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 95, + 312, + 265, + 324 + ], + "lines": [ + { + "bbox": [ + 95, + 312, + 265, + 324 + ], + "spans": [ + { + "bbox": [ + 95, + 312, + 265, + 324 + ], + "type": "text", + "content": "Turbulent jet (Schlieren velocimetry)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 95, + 324, + 232, + 391 + ], + "lines": [ + { + "bbox": [ + 95, + 324, + 232, + 391 + ], + "spans": [ + { + "bbox": [ + 95, + 324, + 232, + 391 + ], + "type": "image", + "image_path": "b8520abce2c4b102c3587cdadf9a75faceafd934ce44f67cb0d3a2e2c69cb107.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 403, + 524, + 465 + ], + "lines": [ + { + "bbox": [ + 68, + 403, + 524, + 465 + ], + "spans": [ + { + "bbox": [ + 68, + 403, + 524, + 465 + ], + "type": "text", + "content": "Figure 11: HFS impact on gradient magnitude for different problems. (a) Subcooled pool boiling. (b) Kolmogorov flow. (c) Schlieren velocimetry of turbulent jet. For each case, the first column shows the sample and the chosen region with high frequency features (dashed boxes), the second column shows the gradient magnitude, the third column shows the gradient magnitude after applying HFS to the sample, and the fourth column shows the ratio of the HFS-enhanced gradient magnitude to the baseline gradient magnitude." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 237, + 327, + 290, + 387 + ], + "blocks": [ + { + "bbox": [ + 237, + 327, + 290, + 387 + ], + "lines": [ + { + "bbox": [ + 237, + 327, + 290, + 387 + ], + "spans": [ + { + "bbox": [ + 237, + 327, + 290, + 387 + ], + "type": "image", + "image_path": "b6f0c5dd6c47a2958767bffd09c994b0edb2e6ba4a6e8bf3fdcb4e17cb43447e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 332, + 327, + 385, + 387 + ], + "blocks": [ + { + "bbox": [ + 332, + 327, + 385, + 387 + ], + "lines": [ + { + "bbox": [ + 332, + 327, + 385, + 387 + ], + "spans": [ + { + "bbox": [ + 332, + 327, + 385, + 387 + ], + "type": "image", + "image_path": "54d45f249993056243a6c7a732dd99069452cb3aadac4e89970dbe83138eb984.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 428, + 326, + 498, + 387 + ], + "blocks": [ + { + "bbox": [ + 428, + 326, + 498, + 387 + ], + "lines": [ + { + "bbox": [ + 428, + 326, + 498, + 387 + ], + "spans": [ + { + "bbox": [ + 428, + 326, + 498, + 387 + ], + "type": "image", + "image_path": "cd911c74dd595db620775568094d4217a9e0788b4a95e0be4c48f5928145f1f8.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 486, + 137, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 486, + 137, + 500 + ], + "spans": [ + { + "bbox": [ + 69, + 486, + 137, + 500 + ], + "type": "text", + "content": "5. Summary" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 509, + 525, + 768 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 509, + 525, + 768 + ], + "spans": [ + { + "bbox": [ + 68, + 509, + 525, + 768 + ], + "type": "text", + "content": "In this work, we proposed a new method named high-frequency scaling (HFS) to mitigate the spectral bias in convolutional-based neural operators. We demonstrated that integrating HFS with feature maps in the latent space of the neural operator reduces the prediction errors in two-phase flow problems and the Kolmogorov flow problem. Through spectral bias mitigation, HFS helps to better capture intricate features and sharp gradients commonly seen within the bubbles and induced vortices in subcooled pool boiling problem, and the small-scale features in the Kolmogorov flow. These high-frequency features are prone to over-smoothing when predicted with neural operators without HFS. HFS-enhanced neural operators can improve neural operator performance irrespective of the neural operator size. We showed that for different variants of ResUNet with number of parameters varying within " + }, + { + "bbox": [ + 68, + 509, + 525, + 768 + ], + "type": "inline_equation", + "content": "\\sim 2" + }, + { + "bbox": [ + 68, + 509, + 525, + 768 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 68, + 509, + 525, + 768 + ], + "type": "inline_equation", + "content": "\\sim 16" + }, + { + "bbox": [ + 68, + 509, + 525, + 768 + ], + "type": "text", + "content": " millions, HFS consistently reduces the prediction errors. Furthermore, a better energy spectra alignment is observed for the results of the neural operator with HFS. Additionally, we showed that the diffusion model predictions are strongly dependent on the quality of the prior neural operator predictions. Therefore, it is important to improve the neural operator prediction accuracy using HFS so that the diffusion model can further recover the missing high-frequencies in the solutions. Otherwise, the diffusion model can barely improve the erroneous large features or significantly over-smoothed predictions of the neural operator. The advantages of HFS are obtained with a negligible memory requirement and a small computational cost trade-off." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 289, + 771, + 303, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 289, + 771, + 303, + 782 + ], + "spans": [ + { + "bbox": [ + 289, + 771, + 303, + 782 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 87, + 525, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 87, + 525, + 202 + ], + "spans": [ + { + "bbox": [ + 67, + 87, + 525, + 202 + ], + "type": "text", + "content": "Finally, we investigated the effectiveness criteria for HFS approach by visualizing the gradient magnitudes of high-frequency regions of three different problems. We showed that HFS works the best on the subcooled pool boiling dataset due to the more localized features, which result in a selective gradient enhancement near the edges and high-frequency features. The HFS approach effectiveness decreases in the Kolmogorov flow problem, and is negligible in the turbulent jet problem. The gradient magnitude is scaled more uniformly in the Kolmogorov flow data and almost completely uniform in the turbulent jet problem, hence explaining why HFS is ineffective for this problem." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 219, + 292, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 219, + 292, + 234 + ], + "spans": [ + { + "bbox": [ + 69, + 219, + 292, + 234 + ], + "type": "text", + "content": "CRediT authorship contribution statement" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 243, + 525, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 525, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 525, + 344 + ], + "type": "text", + "content": "Siavash Khodakarami: Writing - review & editing, Writing - original draft, Visualization, Validation, Software, Methodology, Investigation, Formal analysis, Data Curation, Conceptualization. Vivek Oommen: Writing - review & editing, Writing - original draft, Visualization, Validation, Methodology, Investigation, Formal analysis, Data curation. Aniruddha Bora: Writing - review & editing, Writing - original draft, Validation, Methodology, Investigation. George Em Karniadakis Writing - review & editing, Writing - original draft, Supervision, Funding acquisition, Conceptualization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 361, + 244, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 361, + 244, + 375 + ], + "spans": [ + { + "bbox": [ + 69, + 361, + 244, + 375 + ], + "type": "text", + "content": "Declaration of competing interest" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 384, + 524, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 384, + 524, + 414 + ], + "spans": [ + { + "bbox": [ + 68, + 384, + 524, + 414 + ], + "type": "text", + "content": "The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 429, + 168, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 429, + 168, + 444 + ], + "spans": [ + { + "bbox": [ + 69, + 429, + 168, + 444 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 452, + 525, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 452, + 525, + 570 + ], + "spans": [ + { + "bbox": [ + 67, + 452, + 525, + 570 + ], + "type": "text", + "content": "We would like to acknowledge funding from the Office of Naval Research as part of MURI-METHODS project with grant number N00014242545. The authors would like to acknowledge the computational resources and services at the Center for Computation and Visualization (CCV), Brown University. The experiments were also partly conducted using the Delta AI computational resources at the National Center for Supercomputing Applications at the University of Illinois Urbana-Champaign through allocation CIS240932 from the Advanced Cyberinfrastructure Coordination Ecosystem: Services & Support (ACCESS) program, which is supported by the National Science Foundation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 585, + 160, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 585, + 160, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 585, + 160, + 601 + ], + "type": "text", + "content": "Data Availability" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 608, + 524, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 608, + 524, + 638 + ], + "spans": [ + { + "bbox": [ + 68, + 608, + 524, + 638 + ], + "type": "text", + "content": "All codes and datasets will be made publicly available at https://github.com/SiaK4/HFS_ResUNet.git upon publication." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 654, + 129, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 654, + 129, + 667 + ], + "spans": [ + { + "bbox": [ + 69, + 654, + 129, + 667 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 74, + 675, + 524, + 771 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 74, + 675, + 524, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 675, + 524, + 699 + ], + "spans": [ + { + "bbox": [ + 74, + 675, + 524, + 699 + ], + "type": "text", + "content": "[1] S. K. Godunov, I. Bohachevsky, Finite difference method for numerical computation of discontinuous solutions of the equations of fluid dynamics, Matematicheskij Sbornik 47 (1959) 271-306." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 74, + 700, + 524, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 700, + 524, + 722 + ], + "spans": [ + { + "bbox": [ + 74, + 700, + 524, + 722 + ], + "type": "text", + "content": "[2] R. Eymard, T. Gallouet, R. Herbin, Finite volume methods, Handbook of Numerical Analysis 7 (2000) 713-1018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 74, + 724, + 524, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 724, + 524, + 746 + ], + "spans": [ + { + "bbox": [ + 74, + 724, + 524, + 746 + ], + "type": "text", + "content": "[3] G. Karniadakis, S. J. Sherwin, Spectral/hp element methods for computational fluid dynamics, Oxford University Press, USA, 2005." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 74, + 748, + 524, + 771 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 748, + 524, + 771 + ], + "spans": [ + { + "bbox": [ + 74, + 748, + 524, + 771 + ], + "type": "text", + "content": "[4] T. J. Hughes, The Finite Element Method: Linear Static and Dynamic Finite Element Analysis, Courier Corporation, 2012." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 772, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 772, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 772, + 304, + 782 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 74, + 524, + 767 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 74, + 74, + 524, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 74, + 524, + 110 + ], + "spans": [ + { + "bbox": [ + 74, + 74, + 524, + 110 + ], + "type": "text", + "content": "[5] L. Lu, P. Jin, G. Pang, Z. Zhang, G. E. Karniadakis, Learning nonlinear operators via deeponet based on the universal approximation theorem of operators, Nature Machine Intelligence 3 (2021) 218-229. URL: https://doi.org/10.1038/s42256-021-00302-5. doi:10.1038/s42256-021-00302-5." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 74, + 111, + 524, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 111, + 524, + 134 + ], + "spans": [ + { + "bbox": [ + 74, + 111, + 524, + 134 + ], + "type": "text", + "content": "[6] Z. Li, N. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, A. Anandkumar, Fourier neural operator for parametric partial differential equations, arXiv preprint arXiv:2010.08895 (2020)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 134, + 524, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 134, + 524, + 158 + ], + "spans": [ + { + "bbox": [ + 75, + 134, + 524, + 158 + ], + "type": "text", + "content": "[7] Q. Cao, S. Goswami, G. E. Karniadakis, Laplace neural operator for solving differential equations, Nature Machine Intelligence 6 (2024) 631-640." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 74, + 158, + 524, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 158, + 524, + 192 + ], + "spans": [ + { + "bbox": [ + 74, + 158, + 524, + 192 + ], + "type": "text", + "content": "[8] T. Tripura, S. Chakraborty, Wavelet neural operator for solving parametric partial differential equations in computational mechanics problems, Computer Methods in Applied Mechanics and Engineering 404 (2023) 115783." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 74, + 193, + 524, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 193, + 524, + 229 + ], + "spans": [ + { + "bbox": [ + 74, + 193, + 524, + 229 + ], + "type": "text", + "content": "[9] O. Ovadia, V. Oommen, A. Kahana, A. Peyvan, E. Turkel, G. E. Karniadakis, Real-time inference and extrapolation via a diffusion-inspired temporal transformer operator (ditto), arXiv preprint arXiv:2307.09072 (2023)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 229, + 524, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 229, + 524, + 253 + ], + "spans": [ + { + "bbox": [ + 70, + 229, + 524, + 253 + ], + "type": "text", + "content": "[10] Z. Li, K. Meidani, A. B. Farimani, Transformer for partial differential equations' operator learning, arXiv preprint arXiv:2205.13671 (2022)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 253, + 524, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 253, + 524, + 277 + ], + "spans": [ + { + "bbox": [ + 70, + 253, + 524, + 277 + ], + "type": "text", + "content": "[11] A. Sharma, S. Singh, S. Ratna, Graph neural network operators: a review, Multimedia Tools and Applications 83 (2024) 23413-23436." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 277, + 524, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 277, + 524, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 277, + 524, + 312 + ], + "type": "text", + "content": "[12] T. Chen, H. Chen, Universal approximation to nonlinear operators by neural networks with arbitrary activation functions and its application to dynamical systems, IEEE Transactions on Neural Networks 6 (1995) 911-917. doi:10.1109/72.392253." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 312, + 524, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 312, + 524, + 349 + ], + "spans": [ + { + "bbox": [ + 70, + 312, + 524, + 349 + ], + "type": "text", + "content": "[13] R. Wan, E. Kharazmi, M. S. Triantafyllou, G. E. Karniadakis, Deepvivonet: Using deep neural operators to optimize sensor locations with application to vortex-induced vibrations, arXiv preprint arXiv:2501.04105 (2025)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 349, + 524, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 349, + 524, + 385 + ], + "spans": [ + { + "bbox": [ + 70, + 349, + 524, + 385 + ], + "type": "text", + "content": "[14] E. Kiyani, M. Manav, N. Kadivar, L. De Lorenzis, G. E. Karniadakis, Predicting crack nucleation and propagation in brittle materials using deep operator networks with diverse trunk architectures, arXiv preprint arXiv:2501.00016 (2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 385, + 524, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 385, + 524, + 408 + ], + "spans": [ + { + "bbox": [ + 70, + 385, + 524, + 408 + ], + "type": "text", + "content": "[15] A. Peyvan, V. Oommen, A. D. Jagtap, G. E. Karniadakis, Riemannonets: Interpretable neural operators for riemann problems, Computer Methods in Applied Mechanics and Engineering 426 (2024) 116996." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 408, + 524, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 408, + 524, + 432 + ], + "spans": [ + { + "bbox": [ + 70, + 408, + 524, + 432 + ], + "type": "text", + "content": "[16] Z. Li, W. Peng, Z. Yuan, J. Wang, Long-term predictions of turbulence by implicit u-net enhanced fourier neural operator, Physics of Fluids 35 (2023)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 432, + 524, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 432, + 524, + 456 + ], + "spans": [ + { + "bbox": [ + 70, + 432, + 524, + 456 + ], + "type": "text", + "content": "[17] Y. Jiang, Z. Li, Y. Wang, H. Yang, J. Wang, An implicit adaptive fourier neural operator for long-term predictions of three-dimensional turbulence, arXiv preprint arXiv:2501.12740 (2025)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 456, + 524, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 456, + 524, + 481 + ], + "spans": [ + { + "bbox": [ + 70, + 456, + 524, + 481 + ], + "type": "text", + "content": "[18] V. Gopakumar, S. Pamela, L. Zanisi, Z. Li, A. Anandkumar, M. Team, Fourier neural operator for plasma modelling, arXiv preprint arXiv:2302.06542 (2023)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 481, + 524, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 481, + 524, + 515 + ], + "spans": [ + { + "bbox": [ + 70, + 481, + 524, + 515 + ], + "type": "text", + "content": "[19] D. Montes de Oca Zapiain, J. A. Stewart, R. Dingreville, Accelerating phase-field-based microstructure evolution predictions via surrogate models trained by machine learning methods, npj Computational Materials 7 (2021) 3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 515, + 524, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 515, + 524, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 515, + 524, + 540 + ], + "type": "text", + "content": "[20] V. Oommen, K. Shukla, S. Goswami, R. Dingreville, G. E. Karniadakis, Learning two-phase microstructure evolution using neural operators and autoencoder architectures, npj Computational Materials 8 (2022) 190." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 540, + 524, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 540, + 524, + 565 + ], + "spans": [ + { + "bbox": [ + 70, + 540, + 524, + 565 + ], + "type": "text", + "content": "[21] V. Oommen, K. Shukla, S. Desai, R. Dingreville, G. E. Karniadakis, Rethinking materials simulations: Blending direct numerical simulations with neural operators, npj Computational Materials 10 (2024) 145." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 565, + 524, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 565, + 524, + 598 + ], + "spans": [ + { + "bbox": [ + 70, + 565, + 524, + 598 + ], + "type": "text", + "content": "[22] S. Khodakarami, Y. Suh, Y. Won, N. Miljkovic, An intelligent strategy for phase change heat and mass transfer: Application of machine learning, in: Advances in Heat Transfer, volume 56, Elsevier, 2023, pp. 113-168." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 599, + 524, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 599, + 524, + 647 + ], + "spans": [ + { + "bbox": [ + 70, + 599, + 524, + 647 + ], + "type": "text", + "content": "[23] N. Rahaman, A. Baratin, D. Arpit, F. Draxler, M. Lin, F. Hamprecht, Y. Bengio, A. Courville, On the spectral bias of neural networks, in: K. Chaudhuri, R. Salakhutdinov (Eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, PMLR, 2019, pp. 5301-5310. URL: https://proceedings.mlr.press/v97/rahaman19a.html." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 647, + 524, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 647, + 524, + 671 + ], + "spans": [ + { + "bbox": [ + 70, + 647, + 524, + 671 + ], + "type": "text", + "content": "[24] Z.-Q. J. Xu, Y. Zhang, T. Luo, Y. Xiao, Z. Ma, Frequency principle: Fourier analysis sheds light on deep neural networks, arXiv preprint arXiv:1901.06523 (2019)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 671, + 524, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 671, + 524, + 696 + ], + "spans": [ + { + "bbox": [ + 70, + 671, + 524, + 696 + ], + "type": "text", + "content": "[25] Z.-Q. J. Xu, L. Zhang, W. Cai, On understanding and overcoming spectral biases of deep neural network learning methods for solving pdes, arXiv preprint arXiv:2501.09987 (2025)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 696, + 524, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 524, + 719 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 524, + 719 + ], + "type": "text", + "content": "[26] C. Lin, Z. Li, L. Lu, S. Cai, M. Maxey, G. E. Karniadakis, Operator learning for predicting multiscale bubble growth dynamics, The Journal of Chemical Physics 154 (2021)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 719, + 524, + 743 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 719, + 524, + 743 + ], + "spans": [ + { + "bbox": [ + 70, + 719, + 524, + 743 + ], + "type": "text", + "content": "[27] N. Jain, S. Roy, H. Kodamana, P. Nair, Scaling the predictions of multiphase flow through porous media using operator learning, Chemical Engineering Journal 503 (2025) 157671." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 70, + 743, + 524, + 767 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 743, + 524, + 767 + ], + "spans": [ + { + "bbox": [ + 70, + 743, + 524, + 767 + ], + "type": "text", + "content": "[28] O. Ronneberger, P. Fischer, T. Brox, U-net: Convolutional networks for biomedical image segmentation, in: Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international confer" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 303, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 303, + 781 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 303, + 781 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 74, + 524, + 766 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 92, + 74, + 487, + 85 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 74, + 487, + 85 + ], + "spans": [ + { + "bbox": [ + 92, + 74, + 487, + 85 + ], + "type": "text", + "content": "ence, Munich, Germany, October 5-9, 2015, proceedings, part III 18, Springer, 2015, pp. 234-241." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 86, + 523, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 86, + 523, + 121 + ], + "spans": [ + { + "bbox": [ + 70, + 86, + 523, + 121 + ], + "type": "text", + "content": "[29] S. Qin, F. Lyu, W. Peng, D. Geng, J. Wang, N. Gao, X. Liu, L. L. Wang, Toward a better understanding of fourier neural operators: Analysis and improvement from a spectral perspective, arXiv preprint arXiv:2404.07200 (2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 122, + 524, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 122, + 524, + 158 + ], + "spans": [ + { + "bbox": [ + 70, + 122, + 524, + 158 + ], + "type": "text", + "content": "[30] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: A multiphase multiphysics dataset and benchmarks for machine learning, Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 158, + 524, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 158, + 524, + 181 + ], + "spans": [ + { + "bbox": [ + 70, + 158, + 524, + 181 + ], + "type": "text", + "content": "[31] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 182, + 524, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 524, + 205 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 524, + 205 + ], + "type": "text", + "content": "[32] X. Liu, B. Xu, S. Cao, L. Zhang, Mitigating spectral bias for the multiscale operator learning, Journal of Computational Physics 506 (2024) 112944." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 206, + 524, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 206, + 524, + 229 + ], + "spans": [ + { + "bbox": [ + 70, + 206, + 524, + 229 + ], + "type": "text", + "content": "[33] W. Cai, Z.-Q. J. Xu, Multi-scale deep neural networks for solving high dimensional pdes, arXiv preprint arXiv:1910.11710 (2019)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 230, + 524, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 230, + 524, + 265 + ], + "spans": [ + { + "bbox": [ + 70, + 230, + 524, + 265 + ], + "type": "text", + "content": "[34] M. Tancik, P. Srinivasan, B. Mildenhall, S. Fridovich-Keil, N. Raghavan, U. Singhal, R. Ramamoorthi, J. Barron, R. Ng, Fourier features let networks learn high frequency functions in low dimensional domains, Advances in neural information processing systems 33 (2020) 7537-7547." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 266, + 524, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 266, + 524, + 301 + ], + "spans": [ + { + "bbox": [ + 70, + 266, + 524, + 301 + ], + "type": "text", + "content": "[35] S. Wang, H. Wang, P. Perdikaris, On the eigenvector bias of fourier feature networks: From regression to solving multi-scale pdes with physics-informed neural networks, Computer Methods in Applied Mechanics and Engineering 384 (2021) 113938." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 301, + 524, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 301, + 524, + 337 + ], + "spans": [ + { + "bbox": [ + 70, + 301, + 524, + 337 + ], + "type": "text", + "content": "[36] M. Raissi, P. Perdikaris, G. E. Karniadakis, Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations, Journal of Computational physics 378 (2019) 686-707." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 338, + 524, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 338, + 524, + 371 + ], + "spans": [ + { + "bbox": [ + 70, + 338, + 524, + 371 + ], + "type": "text", + "content": "[37] J. D. Toscano, V. Oommen, A. J. Varghese, Z. Zou, N. A. Daryakenari, C. Wu, G. E. Karniadakis, From pinns to pikans: Recent advances in physics-informed machine learning, arXiv preprint arXiv:2410.13228 (2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 373, + 524, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 524, + 396 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 524, + 396 + ], + "type": "text", + "content": "[38] S. Liang, L. Lyu, C. Wang, H. Yang, Reproducing activation function for deep learning, arXiv preprint arXiv:2101.04844 (2021)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 397, + 524, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 397, + 524, + 420 + ], + "spans": [ + { + "bbox": [ + 70, + 397, + 524, + 420 + ], + "type": "text", + "content": "[39] A. D. Jagtap, K. Kawaguchi, G. E. Karniadakis, Adaptive activation functions accelerate convergence in deep and physics-informed neural networks, Journal of Computational Physics 404 (2020) 109136." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 421, + 524, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 421, + 524, + 444 + ], + "spans": [ + { + "bbox": [ + 70, + 421, + 524, + 444 + ], + "type": "text", + "content": "[40] W. Cai, X. Li, L. Liu, A phase shift deep neural network for high frequency approximation and wave problems, SIAM Journal on Scientific Computing 42 (2020) A3285-A3312." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 445, + 524, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 445, + 524, + 468 + ], + "spans": [ + { + "bbox": [ + 70, + 445, + 524, + 468 + ], + "type": "text", + "content": "[41] P. Lippe, B. Veeling, P. Perdikaris, R. Turner, J. Brandstetter, Pde-refiner: Achieving accurate long rollouts with neural pde solvers, Advances in Neural Information Processing Systems 36 (2023) 67398-67433." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 469, + 524, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 469, + 524, + 492 + ], + "spans": [ + { + "bbox": [ + 70, + 469, + 524, + 492 + ], + "type": "text", + "content": "[42] E. Zhang, A. Kahana, A. Kopaničáková, E. Turkel, R. Ranade, J. Pathak, G. E. Karniadakis, Blending neural operators and relaxation methods in pde numerical solvers, Nature Machine Intelligence (2024) 1-11." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 493, + 524, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 493, + 524, + 527 + ], + "spans": [ + { + "bbox": [ + 70, + 493, + 524, + 527 + ], + "type": "text", + "content": "[43] H. Wu, K. Zhang, D. Zhou, W.-L. Chen, Z. Han, Y. Cao, High-flexibility reconstruction of small-scale motions in wall turbulence using a generalized zero-shot learning, Journal of Fluid Mechanics 990 (2024) R1." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 528, + 524, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 528, + 524, + 552 + ], + "spans": [ + { + "bbox": [ + 70, + 528, + 524, + 552 + ], + "type": "text", + "content": "[44] Z. Wang, X. Li, L. Liu, X. Wu, P. Hao, X. Zhang, F. He, Deep-learning-based super-resolution reconstruction of high-speed imaging in fluids, Physics of Fluids 34 (2022)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 553, + 524, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 553, + 524, + 587 + ], + "spans": [ + { + "bbox": [ + 70, + 553, + 524, + 587 + ], + "type": "text", + "content": "[45] R. Molinaro, S. Lanthaler, B. Raonic, T. Rohner, V. Armegioiu, Z. Y. Wan, F. Sha, S. Mishra, L. Zepeda-Nuñez, Generative ai for fast and accurate statistical computation of fluids, arXiv preprint arXiv:2409.18359 (2024)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 588, + 524, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 588, + 524, + 623 + ], + "spans": [ + { + "bbox": [ + 70, + 588, + 524, + 623 + ], + "type": "text", + "content": "[46] J. W. Lockwood, A. Gori, P. Gentine, A generative super-resolution model for enhancing tropical cyclone wind field intensity and resolution, Journal of Geophysical Research: Machine Learning and Computation 1 (2024) e2024JH000375." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 624, + 524, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 624, + 524, + 647 + ], + "spans": [ + { + "bbox": [ + 70, + 624, + 524, + 647 + ], + "type": "text", + "content": "[47] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Integrating neural operators with diffusion models improves spectral representation in turbulence modeling, arXiv preprint arXiv:2409.08477 (2024)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 648, + 524, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 648, + 524, + 671 + ], + "spans": [ + { + "bbox": [ + 70, + 648, + 524, + 671 + ], + "type": "text", + "content": "[48] S. M. S. Hassan, A. Feeney, A. Dhruv, J. Kim, Y. Suh, J. Ryu, Y. Won, A. Chandramowlishwaran, Bubbleml: a multi-physics dataset and benchmarks for machine learning, arXiv preprint arXiv:2307.14623 (2023)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 672, + 524, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 672, + 524, + 706 + ], + "spans": [ + { + "bbox": [ + 70, + 672, + 524, + 706 + ], + "type": "text", + "content": "[49] F. I. Diakogiannis, F. Waldner, P. Caccetta, C. Wu, Resunet-a: A deep learning framework for semantic segmentation of remotely sensed data, ISPRS Journal of Photogrammetry and Remote Sensing 162 (2020) 94-114." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 708, + 524, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 708, + 524, + 731 + ], + "spans": [ + { + "bbox": [ + 70, + 708, + 524, + 731 + ], + "type": "text", + "content": "[50] H. Li, Z. Xu, G. Taylor, C. Studer, T. Goldstein, Visualizing the loss landscape of neural nets, Advances in neural information processing systems 31 (2018)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 70, + 732, + 524, + 766 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 732, + 524, + 766 + ], + "spans": [ + { + "bbox": [ + 70, + 732, + 524, + 766 + ], + "type": "text", + "content": "[51] X. Chen, C. Liang, D. Huang, E. Real, K. Wang, H. Pham, X. Dong, T. Luong, C.-J. Hsieh, Y. Lu, et al., Symbolic discovery of optimization algorithms, Advances in neural information processing systems 36 (2024)." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 781 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 781 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 74, + 523, + 396 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 69, + 74, + 497, + 85 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 74, + 497, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 74, + 497, + 85 + ], + "type": "text", + "content": "[52] D. P. Kingma, Adam: A method for stochastic optimization, arXiv preprint arXiv:1412.6980 (2014)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 86, + 523, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 86, + 523, + 110 + ], + "spans": [ + { + "bbox": [ + 69, + 86, + 523, + 110 + ], + "type": "text", + "content": "[53] A. Dubey, K. Weide, J. O'Neal, A. Dhruv, S. Couch, J. A. Harris, T. Klosterman, R. Jain, J. Rudi, B. Messer, et al., Flash-x: A multiphysics simulation software instrument, SoftwareX 19 (2022) 101168." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 111, + 523, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 111, + 523, + 133 + ], + "spans": [ + { + "bbox": [ + 70, + 111, + 523, + 133 + ], + "type": "text", + "content": "[54] M. Wei, X. Zhang, Super-resolution neural operator, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 18247-18256." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 134, + 523, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 134, + 523, + 169 + ], + "spans": [ + { + "bbox": [ + 70, + 134, + 523, + 169 + ], + "type": "text", + "content": "[55] R. Wang, K. Kashinath, M. Mustafa, A. Albert, R. Yu, Towards physics-informed deep learning for turbulent flow prediction, in: Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining, 2020, pp. 1457-1466." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 170, + 523, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 523, + 181 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 523, + 181 + ], + "type": "text", + "content": "[56] P. Chakrabarty, S. Maji, The spectral bias of the deep image prior, arXiv preprint arXiv:1912.08905 (2019)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 182, + 523, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 523, + 205 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 523, + 205 + ], + "type": "text", + "content": "[57] A. M. Saxe, P. W. Koh, Z. Chen, M. Bhand, B. Suresh, A. Y. Ng, On random weights and unsupervised feature learning., in: Icml, volume 2, 2011, p. 6." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 206, + 523, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 206, + 523, + 229 + ], + "spans": [ + { + "bbox": [ + 70, + 206, + 523, + 229 + ], + "type": "text", + "content": "[58] P. Wang, W. Zheng, T. Chen, Z. Wang, Anti-oversmoothing in deep vision transformers via the fourier domain analysis: From theory to practice, arXiv preprint arXiv:2203.05962 (2022)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 230, + 523, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 230, + 523, + 252 + ], + "spans": [ + { + "bbox": [ + 70, + 230, + 523, + 252 + ], + "type": "text", + "content": "[59] J. Ho, A. Jain, P. Abbeel, Denoising diffusion probabilistic models, Advances in neural information processing systems 33 (2020) 6840-6851." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 253, + 523, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 253, + 523, + 277 + ], + "spans": [ + { + "bbox": [ + 70, + 253, + 523, + 277 + ], + "type": "text", + "content": "[60] Y. Song, J. Sohl-Dickstein, D. P. Kingma, A. Kumar, S. Ermon, B. Poole, Score-based generative modeling through stochastic differential equations, arXiv preprint arXiv:2011.13456 (2020)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 278, + 523, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 278, + 523, + 301 + ], + "spans": [ + { + "bbox": [ + 70, + 278, + 523, + 301 + ], + "type": "text", + "content": "[61] Y. Song, S. Ermon, Generative modeling by estimating gradients of the data distribution, Advances in neural information processing systems 32 (2019)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 301, + 523, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 301, + 523, + 325 + ], + "spans": [ + { + "bbox": [ + 70, + 301, + 523, + 325 + ], + "type": "text", + "content": "[62] T. Karras, M. Aittala, T. Aila, S. Laine, Elucidating the design space of diffusion-based generative models, Advances in neural information processing systems 35 (2022) 26565-26577." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 326, + 523, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 326, + 523, + 371 + ], + "spans": [ + { + "bbox": [ + 70, + 326, + 523, + 371 + ], + "type": "text", + "content": "[63] V. Oommen, A. Bora, Z. Zhang, G. E. Karniadakis, Data for \"integrating neural operators with diffusion models improves spectral representation in turbulence modeling\" (kolmogorov flow case), 2025. URL: https://doi.org/10.6084/m9.figshare.28250960.v1. doi:10.6084/m9.figshare.28250960.v1." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 373, + 523, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 523, + 396 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 523, + 396 + ], + "type": "text", + "content": "[64] G. S. Settles, A. Liberzon, Schlieren and bos velocimetry of a round turbulent helium jet in air, Optics and Lasers in Engineering 156 (2022) 107104." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 69, + 415, + 405, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 415, + 405, + 430 + ], + "spans": [ + { + "bbox": [ + 69, + 415, + 405, + 430 + ], + "type": "text", + "content": "Appendix A. Training strategies and ResUNet prediction results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 439, + 524, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 439, + 524, + 599 + ], + "spans": [ + { + "bbox": [ + 68, + 439, + 524, + 599 + ], + "type": "text", + "content": "All the models were trained for " + }, + { + "bbox": [ + 68, + 439, + 524, + 599 + ], + "type": "inline_equation", + "content": "\\sim 1000" + }, + { + "bbox": [ + 68, + 439, + 524, + 599 + ], + "type": "text", + "content": " epochs (convergence typically happened earlier). The initial learning rate was set to " + }, + { + "bbox": [ + 68, + 439, + 524, + 599 + ], + "type": "inline_equation", + "content": "8\\times 10^{-4}" + }, + { + "bbox": [ + 68, + 439, + 524, + 599 + ], + "type": "text", + "content": " and it was reduced after the first 700 epochs using a linear step scheduler. We used GELU activation function and group normalization after convolutional layers. Lion optimizer with weight decay of 0.02 to 0.1 were used, depending on the neural operator size. Batch size of 4 or 8 was used, depending on the neural operator size. We found that gradient clipping at maximum gradient norm of 0.4 to 1 (depending on neural operator size) helps with the optimization. Our preliminary findings showed better results with the Lion optimizer compared to Adam and AdamW optimizers. Therefore, all the trainings for this work were conducted with the Lion optimizer. For all the neural operators, the number of layers in the encoder and decoder were kept constant and the number of parameters at each layer was modified to change the neural operator size." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 94, + 82, + 163, + 151 + ], + "blocks": [ + { + "bbox": [ + 82, + 110, + 94, + 126 + ], + "lines": [ + { + "bbox": [ + 82, + 110, + 94, + 126 + ], + "spans": [ + { + "bbox": [ + 82, + 110, + 94, + 126 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 94, + 82, + 163, + 151 + ], + "lines": [ + { + "bbox": [ + 94, + 82, + 163, + 151 + ], + "spans": [ + { + "bbox": [ + 94, + 82, + 163, + 151 + ], + "type": "image", + "image_path": "216db5d1060828f55173fc75cc3f6bdd527f94ae0d29cc7ad38e6bb2fa212e64.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 80, + 149, + 93, + 162 + ], + "lines": [ + { + "bbox": [ + 80, + 149, + 93, + 162 + ], + "spans": [ + { + "bbox": [ + 80, + 149, + 93, + 162 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 177, + 82, + 246, + 151 + ], + "blocks": [ + { + "bbox": [ + 177, + 82, + 246, + 151 + ], + "lines": [ + { + "bbox": [ + 177, + 82, + 246, + 151 + ], + "spans": [ + { + "bbox": [ + 177, + 82, + 246, + 151 + ], + "type": "image", + "image_path": "f9f5641b6c82f11306de0d3689768b2b13e8457b6f1cd3662c1b66e60e3ed261.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 261, + 82, + 330, + 151 + ], + "blocks": [ + { + "bbox": [ + 261, + 82, + 330, + 151 + ], + "lines": [ + { + "bbox": [ + 261, + 82, + 330, + 151 + ], + "spans": [ + { + "bbox": [ + 261, + 82, + 330, + 151 + ], + "type": "image", + "image_path": "d3c67e83e17b5fd46d7760afc8200d37ecb7166597966030724c7e43b7db60ed.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 343, + 82, + 413, + 151 + ], + "blocks": [ + { + "bbox": [ + 343, + 82, + 413, + 151 + ], + "lines": [ + { + "bbox": [ + 343, + 82, + 413, + 151 + ], + "spans": [ + { + "bbox": [ + 343, + 82, + 413, + 151 + ], + "type": "image", + "image_path": "96e9fe952ba04a71427afb1dec43bde98adb15cd8c90862e8d75dc70147f8e70.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 427, + 81, + 508, + 151 + ], + "blocks": [ + { + "bbox": [ + 427, + 81, + 508, + 151 + ], + "lines": [ + { + "bbox": [ + 427, + 81, + 508, + 151 + ], + "spans": [ + { + "bbox": [ + 427, + 81, + 508, + 151 + ], + "type": "image", + "image_path": "f986092ac9f6ad1661f7ee50cf594bcb849746ce965b8ad4798f4b49976d2df6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 94, + 157, + 163, + 226 + ], + "blocks": [ + { + "bbox": [ + 94, + 157, + 163, + 226 + ], + "lines": [ + { + "bbox": [ + 94, + 157, + 163, + 226 + ], + "spans": [ + { + "bbox": [ + 94, + 157, + 163, + 226 + ], + "type": "image", + "image_path": "9c5bc0ee49ccb11782a786fdf420a16404e092d08ea9b8b167f0def97ae4f912.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 79, + 225, + 92, + 238 + ], + "lines": [ + { + "bbox": [ + 79, + 225, + 92, + 238 + ], + "spans": [ + { + "bbox": [ + 79, + 225, + 92, + 238 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 177, + 157, + 246, + 226 + ], + "blocks": [ + { + "bbox": [ + 177, + 157, + 246, + 226 + ], + "lines": [ + { + "bbox": [ + 177, + 157, + 246, + 226 + ], + "spans": [ + { + "bbox": [ + 177, + 157, + 246, + 226 + ], + "type": "image", + "image_path": "32f06be7849e92231d12edb1f8aab5d30df58ae3d8707e22c1fe4771b3eb8a5e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 261, + 157, + 329, + 226 + ], + "blocks": [ + { + "bbox": [ + 261, + 157, + 329, + 226 + ], + "lines": [ + { + "bbox": [ + 261, + 157, + 329, + 226 + ], + "spans": [ + { + "bbox": [ + 261, + 157, + 329, + 226 + ], + "type": "image", + "image_path": "cd6f9024ad18056f68340ec20642f220bc4cfe1c04907395459421d74c229cb8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 343, + 157, + 413, + 226 + ], + "blocks": [ + { + "bbox": [ + 343, + 157, + 413, + 226 + ], + "lines": [ + { + "bbox": [ + 343, + 157, + 413, + 226 + ], + "spans": [ + { + "bbox": [ + 343, + 157, + 413, + 226 + ], + "type": "image", + "image_path": "24a5a142fba9df36a266ff532a6dd6f10514925e5ceb3121f29d52b03610d45f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 427, + 157, + 508, + 226 + ], + "blocks": [ + { + "bbox": [ + 427, + 157, + 508, + 226 + ], + "lines": [ + { + "bbox": [ + 427, + 157, + 508, + 226 + ], + "spans": [ + { + "bbox": [ + 427, + 157, + 508, + 226 + ], + "type": "image", + "image_path": "59f496ffcbc52b4a9b0fa7ec0bf05bdff02cdd881eb5c24ff6271a98046d3169.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 95, + 232, + 163, + 301 + ], + "blocks": [ + { + "bbox": [ + 95, + 232, + 163, + 301 + ], + "lines": [ + { + "bbox": [ + 95, + 232, + 163, + 301 + ], + "spans": [ + { + "bbox": [ + 95, + 232, + 163, + 301 + ], + "type": "image", + "image_path": "6af72eec91e07a50d37a4a08fa6586c52969c4291b266bc7426bfb1b0c1c374a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 80, + 301, + 93, + 315 + ], + "lines": [ + { + "bbox": [ + 80, + 301, + 93, + 315 + ], + "spans": [ + { + "bbox": [ + 80, + 301, + 93, + 315 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 177, + 232, + 246, + 301 + ], + "blocks": [ + { + "bbox": [ + 177, + 232, + 246, + 301 + ], + "lines": [ + { + "bbox": [ + 177, + 232, + 246, + 301 + ], + "spans": [ + { + "bbox": [ + 177, + 232, + 246, + 301 + ], + "type": "image", + "image_path": "68dddade02c2590e94e546d373570e8a15684914e477cd49190c23707cd2bc18.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 261, + 232, + 329, + 301 + ], + "blocks": [ + { + "bbox": [ + 261, + 232, + 329, + 301 + ], + "lines": [ + { + "bbox": [ + 261, + 232, + 329, + 301 + ], + "spans": [ + { + "bbox": [ + 261, + 232, + 329, + 301 + ], + "type": "image", + "image_path": "6c7b0c2e0c10c5a798fdcfa928b49ce73045efbd3b1cd8453688f7c99d17cd8a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 343, + 232, + 413, + 301 + ], + "blocks": [ + { + "bbox": [ + 343, + 232, + 413, + 301 + ], + "lines": [ + { + "bbox": [ + 343, + 232, + 413, + 301 + ], + "spans": [ + { + "bbox": [ + 343, + 232, + 413, + 301 + ], + "type": "image", + "image_path": "3379c693ec231bdecec176e4940ec55fb41c91a3584394e5c718fa44b432e3e9.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 427, + 232, + 508, + 301 + ], + "blocks": [ + { + "bbox": [ + 427, + 232, + 508, + 301 + ], + "lines": [ + { + "bbox": [ + 427, + 232, + 508, + 301 + ], + "spans": [ + { + "bbox": [ + 427, + 232, + 508, + 301 + ], + "type": "image", + "image_path": "58537f9f0b24681dc7cbeaeca1b1a9b091ce3452f0c6cb59fec6e162e5eb03b3.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 95, + 306, + 163, + 375 + ], + "blocks": [ + { + "bbox": [ + 95, + 306, + 163, + 375 + ], + "lines": [ + { + "bbox": [ + 95, + 306, + 163, + 375 + ], + "spans": [ + { + "bbox": [ + 95, + 306, + 163, + 375 + ], + "type": "image", + "image_path": "7afd174a61dc3d1de47273ea402d57ec42d4b4855f6e00716ed25610f3bf2e99.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 177, + 306, + 246, + 375 + ], + "blocks": [ + { + "bbox": [ + 177, + 306, + 246, + 375 + ], + "lines": [ + { + "bbox": [ + 177, + 306, + 246, + 375 + ], + "spans": [ + { + "bbox": [ + 177, + 306, + 246, + 375 + ], + "type": "image", + "image_path": "bf303478b135b4702e050d5bc329eafb8ed3bee1cea9c893e56c2e3acfcdfe30.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 261, + 306, + 329, + 375 + ], + "blocks": [ + { + "bbox": [ + 261, + 306, + 329, + 375 + ], + "lines": [ + { + "bbox": [ + 261, + 306, + 329, + 375 + ], + "spans": [ + { + "bbox": [ + 261, + 306, + 329, + 375 + ], + "type": "image", + "image_path": "46f47561b47a1d8e82a51bf46d73c89d671b14142e75b748b48a9f23c4bc18e5.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 343, + 306, + 413, + 375 + ], + "blocks": [ + { + "bbox": [ + 343, + 306, + 413, + 375 + ], + "lines": [ + { + "bbox": [ + 343, + 306, + 413, + 375 + ], + "spans": [ + { + "bbox": [ + 343, + 306, + 413, + 375 + ], + "type": "image", + "image_path": "fe38d8420ef1f2b61297d2ff15a9cd6a35da167a630d84b91c2c48f27b959981.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 427, + 306, + 508, + 375 + ], + "blocks": [ + { + "bbox": [ + 427, + 306, + 508, + 375 + ], + "lines": [ + { + "bbox": [ + 427, + 306, + 508, + 375 + ], + "spans": [ + { + "bbox": [ + 427, + 306, + 508, + 375 + ], + "type": "image", + "image_path": "0a2207b92d4a876252be664d76541511a159339b9f752eac681e2c9f2f8745fa.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 95, + 380, + 163, + 449 + ], + "blocks": [ + { + "bbox": [ + 80, + 376, + 93, + 389 + ], + "lines": [ + { + "bbox": [ + 80, + 376, + 93, + 389 + ], + "spans": [ + { + "bbox": [ + 80, + 376, + 93, + 389 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 95, + 380, + 163, + 449 + ], + "lines": [ + { + "bbox": [ + 95, + 380, + 163, + 449 + ], + "spans": [ + { + "bbox": [ + 95, + 380, + 163, + 449 + ], + "type": "image", + "image_path": "296cbe932f78eae1ce2bd146deed18f92446c6a21cc9c53a07e445e9df2d2011.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 177, + 380, + 246, + 449 + ], + "blocks": [ + { + "bbox": [ + 177, + 380, + 246, + 449 + ], + "lines": [ + { + "bbox": [ + 177, + 380, + 246, + 449 + ], + "spans": [ + { + "bbox": [ + 177, + 380, + 246, + 449 + ], + "type": "image", + "image_path": "a758c825f19ab19d8b155d31e8b085f5d408ecd312bcde014754a5ce38a102a6.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 261, + 380, + 329, + 449 + ], + "blocks": [ + { + "bbox": [ + 261, + 380, + 329, + 449 + ], + "lines": [ + { + "bbox": [ + 261, + 380, + 329, + 449 + ], + "spans": [ + { + "bbox": [ + 261, + 380, + 329, + 449 + ], + "type": "image", + "image_path": "c33f2ead32bb0936798071bc0c00592261094bdf3e45f08d526a0fbf6f153427.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 343, + 380, + 413, + 449 + ], + "blocks": [ + { + "bbox": [ + 343, + 380, + 413, + 449 + ], + "lines": [ + { + "bbox": [ + 343, + 380, + 413, + 449 + ], + "spans": [ + { + "bbox": [ + 343, + 380, + 413, + 449 + ], + "type": "image", + "image_path": "0d8bcff3f5dad1dfd457e9f5dd4d81f799dc0b6bcee229617b3874b126d9c772.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 427, + 380, + 508, + 450 + ], + "blocks": [ + { + "bbox": [ + 427, + 380, + 508, + 450 + ], + "lines": [ + { + "bbox": [ + 427, + 380, + 508, + 450 + ], + "spans": [ + { + "bbox": [ + 427, + 380, + 508, + 450 + ], + "type": "image", + "image_path": "0255dc7ff44b63e60640617dbe202f47d5e43c9354551d69843c1a39ce725f0f.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 470, + 524, + 508 + ], + "lines": [ + { + "bbox": [ + 68, + 470, + 524, + 508 + ], + "spans": [ + { + "bbox": [ + 68, + 470, + 524, + 508 + ], + "type": "text", + "content": "Figure A.12: Example of subcooled pool boiling temperature prediction results by neural operators. (a) Ground truth results, (b) UNet prediction results, (c) ResUNet prediction results, (d) UNet prediction errors, (e) ResUNet prediction errors. The results show five time-step predictions from left to right." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "bbox": [ + 69, + 526, + 384, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 526, + 384, + 554 + ], + "spans": [ + { + "bbox": [ + 69, + 526, + 384, + 554 + ], + "type": "text", + "content": "Appendix B. Boundary RMSE, Bubble RMSE, and Spectral Errors" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 68, + 561, + 524, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 561, + 524, + 591 + ], + "spans": [ + { + "bbox": [ + 68, + 561, + 524, + 591 + ], + "type": "text", + "content": "Boundary RMSE (BRMSE) for a single sample and time-step is defined by calculating the errors only at the boundaries of the domain:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 213, + 595, + 523, + 632 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 595, + 523, + 632 + ], + "spans": [ + { + "bbox": [ + 213, + 595, + 523, + 632 + ], + "type": "interline_equation", + "content": "\\operatorname {B R M S E} = \\sqrt {\\frac {1}{| \\partial \\Omega |} \\sum_ {\\mathbf {x} _ {i} \\in \\partial \\Omega} \\left(\\hat {T} _ {i} - T _ {i}\\right) ^ {2}}, \\tag {B.1}", + "image_path": "fed505b0b3fb58ffeec21fa0ee18d356e35b414f57d217bb2c2bca7bb7f7715b.jpg" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "spans": [ + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i\\in \\partial \\Omega" + }, + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "text", + "content": " specifies the points at the boundaries, " + }, + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "inline_equation", + "content": "\\hat{T}_i" + }, + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "text", + "content": " is the predicted temperature, and " + }, + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "inline_equation", + "content": "T_{i}" + }, + { + "bbox": [ + 68, + 639, + 523, + 682 + ], + "type": "text", + "content": " is the actual temperature. Similarly, bubble RMSE is defined by calculating the errors only within the bubble areas. These areas are specified through a level-set function in the simulations." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 143, + 687, + 523, + 724 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 687, + 523, + 724 + ], + "spans": [ + { + "bbox": [ + 143, + 687, + 523, + 724 + ], + "type": "interline_equation", + "content": "\\text {B u b b l e} = \\sqrt {\\frac {1}{| \\Omega_ {\\text {b u b b l e}} \\cup \\partial \\Omega_ {\\text {b u b b l e}} |} \\sum_ {\\mathbf {x} _ {i} \\in \\Omega_ {\\text {b u b b l e}} \\cup \\partial \\Omega_ {\\text {b u b b l e}}} (\\hat {y} _ {i} - y _ {i}) ^ {2}}, \\tag {B.2}", + "image_path": "5cbd1f6bf814e03424dbab09e92667104e5c0975fa72dd0b632444ed73b832f1.jpg" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 68, + 729, + 523, + 758 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 729, + 523, + 758 + ], + "spans": [ + { + "bbox": [ + 68, + 729, + 523, + 758 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 729, + 523, + 758 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i\\in \\Omega_{\\mathrm{bubble}}" + }, + { + "bbox": [ + 68, + 729, + 523, + 758 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 729, + 523, + 758 + ], + "type": "inline_equation", + "content": "\\partial \\Omega_{\\mathrm{bubble}}" + }, + { + "bbox": [ + 68, + 729, + 523, + 758 + ], + "type": "text", + "content": " specify the points inside the bubble areas and at the interfaces, respectively." + } + ] + } + ], + "index": 36 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 86, + 73, + 523, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 73, + 523, + 88 + ], + "spans": [ + { + "bbox": [ + 86, + 73, + 523, + 88 + ], + "type": "text", + "content": "The spectral errors in each of the low, mid, and high-frequency bands are defined as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 98, + 524, + 136 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 98, + 524, + 136 + ], + "spans": [ + { + "bbox": [ + 119, + 98, + 524, + 136 + ], + "type": "interline_equation", + "content": "F _ {\\text {b a n d}} = \\sqrt {\\frac {1}{N _ {\\text {b a n d}}} \\sum_ {k \\in \\text {b a n d}} \\left| \\mathcal {F} (T) (\\mathbf {k}) - \\mathcal {F} (\\hat {T}) (\\mathbf {k}) \\right| ^ {2}}, \\quad \\text {b a n d} \\in \\{\\text {l o w}, \\text {m i d}, \\text {h i g h} \\}, \\tag {B.3}", + "image_path": "750959e9badbced2a4d620b4cc1c1984ac361f39eab61640da800817a909a643.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "spans": [ + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "text", + "content": " is the spatial frequency component of the Fourier transformed solutions, " + }, + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "text", + "content": " denotes the Fourier transform, and " + }, + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{band}}" + }, + { + "bbox": [ + 68, + 146, + 524, + 219 + ], + "type": "text", + "content": " specifies the number of components at each frequency band. The low, mid, and high bands may be defined differently based on the underlying dataset and the amount of high-frequency components. In this work, these bands were set to the first 2%, the first 6.2% excluding the low band components, and the last 93.8% of the components." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 233, + 524, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 233, + 524, + 262 + ], + "spans": [ + { + "bbox": [ + 68, + 233, + 524, + 262 + ], + "type": "text", + "content": "Similarly, the energy spectrum error, showing the energy spectra misalignment at each frequency band is defined as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 272, + 524, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 272, + 524, + 312 + ], + "spans": [ + { + "bbox": [ + 92, + 272, + 524, + 312 + ], + "type": "interline_equation", + "content": "\\mathcal {E} _ {F _ {\\mathrm {b a n d}}} = \\sqrt {\\frac {1}{N _ {\\mathrm {b a n d}}} \\sum_ {k \\in \\mathrm {b a n d}} \\left(\\left| \\mathcal {F} (T) (\\mathbf {k}) \\right| ^ {2} - \\left| \\mathcal {F} (\\hat {T}) (\\mathbf {k}) \\right| ^ {2}\\right) ^ {2}}, \\quad \\mathrm {b a n d} \\in \\{\\text {l o w , m i d , h i g h} \\}, \\tag {B.4}", + "image_path": "1f4a0d9406d73a62956b40c8971057b6fabc517067f9a1e18c104fd4f627edbb.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 321, + 298, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 321, + 298, + 335 + ], + "spans": [ + { + "bbox": [ + 86, + 321, + 298, + 335 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 86, + 321, + 298, + 335 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 86, + 321, + 298, + 335 + ], + "type": "text", + "content": " denotes the energy spectrum error." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 353, + 524, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 353, + 524, + 381 + ], + "spans": [ + { + "bbox": [ + 69, + 353, + 524, + 381 + ], + "type": "text", + "content": "Appendix C. Summary of subcooled pool boiling prediction results with HFS-enhanced NO" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 391, + 525, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 391, + 525, + 522 + ], + "spans": [ + { + "bbox": [ + 68, + 391, + 525, + 522 + ], + "type": "text", + "content": "In this work, we tested different variants of ResUNet by varying number of parameters in the range of " + }, + { + "bbox": [ + 68, + 391, + 525, + 522 + ], + "type": "inline_equation", + "content": "\\sim 2" + }, + { + "bbox": [ + 68, + 391, + 525, + 522 + ], + "type": "text", + "content": " millions to " + }, + { + "bbox": [ + 68, + 391, + 525, + 522 + ], + "type": "inline_equation", + "content": "\\sim 16" + }, + { + "bbox": [ + 68, + 391, + 525, + 522 + ], + "type": "text", + "content": " millions. In the following table, we summarized the results of the two of the models (smallest and largest models), trained with optimal hyperparameters. Note that the same hyperparameters were used for training a neural operator with and without HFS. The parameters were first optimized for the NO without HFS and the same set of parameters were used for training the HFS-enhanced NO. The results of the other models are not included in this table for easier comparison and interpretation. We refer the reader to Figure 4 for observing the effect of HFS on all the tested models. Similar to the rest of the paper, the results are based on five time-step predictions." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 70, + 590, + 524, + 740 + ], + "blocks": [ + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "lines": [ + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "spans": [ + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "text", + "content": "Table C.4: Subcooled pool boiling temperature prediction errors with neural operator (NO) with and without high-frequency scaling (HFS) The columns correspond to the metrics, NO with " + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "inline_equation", + "content": "\\sim 1.7" + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "text", + "content": " millions parameters, HFS-enhanced NO with " + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "inline_equation", + "content": "\\sim 1.7" + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "text", + "content": " millions parameters, NO with " + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "inline_equation", + "content": "\\sim 16.2" + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "text", + "content": " millions parameters, and HFS-enhanced NO with " + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "inline_equation", + "content": "\\sim 16.2" + }, + { + "bbox": [ + 68, + 533, + 524, + 581 + ], + "type": "text", + "content": " millions parameters." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 590, + 524, + 740 + ], + "lines": [ + { + "bbox": [ + 70, + 590, + 524, + 740 + ], + "spans": [ + { + "bbox": [ + 70, + 590, + 524, + 740 + ], + "type": "table", + "html": "
NO, 1.7 MNO+HFS, 1.7 MNO, 16.2 MNO+HFS, 16.2 M
Rel. Error0.04140.03330.02510.0238
RMSE0.04030.03240.02440.0232
BRMSE0.09730.07290.05620.0505
Bubble RMSE0.19240.1430.1090.0985
Maxmean1.0190.8970.6850.656
Flow0.3230.2370.2120.141
Fmid0.2820.2180.1850.148
Fhigh0.04760.04000.03920.0296
Parameters [Millions]1.7111.71216.26316.268
", + "image_path": "bf8c0afdc7ba0107544fd83710ce308cb2759d8c05cdf5f7306cec86bee95cb2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 302, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 302, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 302, + 782 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 73, + 352, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 73, + 352, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 352, + 87 + ], + "type": "text", + "content": "Appendix D. Saturated pool boiling prediction results" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 95, + 525, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 95, + 525, + 196 + ], + "spans": [ + { + "bbox": [ + 68, + 95, + 525, + 196 + ], + "type": "text", + "content": "Saturated pool boiling dataset involves less complexity due to lower high-frequency components and small scale features. Therefore, a well-optimized NO without HFS can successfully resolve the solutions. However, HFS still enhances the prediction accuracies, especially at bubble areas. The following figure demonstrates an example of predictions using NO and HFS-enhanced NO for saturated pool boiling dataset. Generally, the errors are much smaller than subcooled pool boiling predictions. However, it can be seen that the errors in the regions with departed bubbles are reduced with HFS-enhanced NO." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 76, + 221, + 517, + 599 + ], + "blocks": [ + { + "bbox": [ + 76, + 221, + 517, + 599 + ], + "lines": [ + { + "bbox": [ + 76, + 221, + 517, + 599 + ], + "spans": [ + { + "bbox": [ + 76, + 221, + 517, + 599 + ], + "type": "image", + "image_path": "c875edc340d8e0893926ec11727820b8909097c64ebe04e21e7e0b12c6ba14a6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "lines": [ + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "spans": [ + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "text", + "content": "Figure D.13: Examples of saturated pool boiling temperature prediction results by NO and HFS-enhanced NO (a) Ground truth (GT) results. (b) NO predictions. (c) " + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "inline_equation", + "content": "\\mathrm{NO} + \\mathrm{HFS}" + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "text", + "content": " predictions. (d) Absolute prediction errors of NO " + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "inline_equation", + "content": "(E_{\\mathrm{NO}})" + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "text", + "content": ". (e) Absolute prediction errors of " + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "inline_equation", + "content": "\\mathrm{NO} + \\mathrm{HFS}" + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "inline_equation", + "content": "(E_{\\mathrm{NO} + \\mathrm{HFS}})" + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "text", + "content": ". The results are shown for five time-step predictions from left to right. The departed bubbles areas are circled (dashed red circles) in error maps for easier interpretation and comparison. The results are based on a NO with " + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 68, + 612, + 526, + 673 + ], + "type": "text", + "content": " millions parameter." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 691, + 526, + 762 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 691, + 526, + 762 + ], + "spans": [ + { + "bbox": [ + 67, + 691, + 526, + 762 + ], + "type": "text", + "content": "To further investigate if HFS can enhance the predictions with smaller NO on this simpler dataset, we trained another NO with the same structure (ResUNet) but with only " + }, + { + "bbox": [ + 67, + 691, + 526, + 762 + ], + "type": "inline_equation", + "content": "\\sim 0.6" + }, + { + "bbox": [ + 67, + 691, + 526, + 762 + ], + "type": "text", + "content": " millions parameters with and without HFS. Consistent with previous results, HFS enhanced the predictions by reducing the field errors such as RMSE and bubble RMSE as well the spectral errors. The prediction results of saturated pool boiling dataset using two different NOs with" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 73, + 524, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 73, + 524, + 103 + ], + "spans": [ + { + "bbox": [ + 68, + 73, + 524, + 103 + ], + "type": "text", + "content": "and without HFS are summarized in the following table. Similar to the rest of the paper, the results are based on five time-step predictions." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 69, + 158, + 525, + 314 + ], + "blocks": [ + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "lines": [ + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "spans": [ + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "text", + "content": "Table D.5: Saturated pool boiling temperature prediction errors of NO with and without HFS. The columns correspond to the metrics, NO with " + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "inline_equation", + "content": "\\sim 0.6" + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "text", + "content": " millions parameters, HFS-enhanced NO with " + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "inline_equation", + "content": "\\sim 0.6" + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "text", + "content": " millions parameters, NO with " + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "text", + "content": " millions parameters, and HFS-enhanced NO with " + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "inline_equation", + "content": "\\sim 3.5" + }, + { + "bbox": [ + 68, + 114, + 525, + 152 + ], + "type": "text", + "content": " millions parameters." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 158, + 525, + 314 + ], + "lines": [ + { + "bbox": [ + 69, + 158, + 525, + 314 + ], + "spans": [ + { + "bbox": [ + 69, + 158, + 525, + 314 + ], + "type": "table", + "html": "
NO, 0.6 MNO+HFS, 0.6 MNO, 3.5 MNO+HFS, 3.5 M
Rel. Error0.01730.01650.01490.0145
RMSE0.01710.01640.01480.0144
BRMSE0.04620.04500.03640.0355
Bubble RMSE0.09180.08980.07260.0692
Maxmean0.5920.5950.5530.544
Flow0.09640.08350.07450.0736
Fmid0.10860.09980.09190.0855
Fhigh0.02090.02080.01820.0180
Parameters [Millions]0.6140.6153.4803.481
", + "image_path": "7336432f3f14053949b40efa42bad4fd0c39bbab6cbcf2075dc6987d42cba825.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 91, + 145, + 299, + 584 + ], + "blocks": [ + { + "bbox": [ + 90, + 129, + 109, + 143 + ], + "lines": [ + { + "bbox": [ + 90, + 129, + 109, + 143 + ], + "spans": [ + { + "bbox": [ + 90, + 129, + 109, + 143 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 91, + 145, + 299, + 584 + ], + "lines": [ + { + "bbox": [ + 91, + 145, + 299, + 584 + ], + "spans": [ + { + "bbox": [ + 91, + 145, + 299, + 584 + ], + "type": "image", + "image_path": "109eabf1c2090326d7c86275a3cb013ee490da972d1d8047ce97ccfd130ab144.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 604, + 525, + 641 + ], + "lines": [ + { + "bbox": [ + 68, + 604, + 525, + 641 + ], + "spans": [ + { + "bbox": [ + 68, + 604, + 525, + 641 + ], + "type": "text", + "content": "Figure E.14: Effect of HFS on the latent space mean features. (a) Mean latent feature maps in decoder (downsampling) with five layers. (b) Mean latent feature maps in decoder (upsampling) with five layers. The results are based on a NO with " + }, + { + "bbox": [ + 68, + 604, + 525, + 641 + ], + "type": "inline_equation", + "content": "\\sim 16" + }, + { + "bbox": [ + 68, + 604, + 525, + 641 + ], + "type": "text", + "content": " millions parameters." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 305, + 143, + 511, + 584 + ], + "blocks": [ + { + "bbox": [ + 305, + 128, + 323, + 145 + ], + "lines": [ + { + "bbox": [ + 305, + 128, + 323, + 145 + ], + "spans": [ + { + "bbox": [ + 305, + 128, + 323, + 145 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 143, + 511, + 584 + ], + "lines": [ + { + "bbox": [ + 305, + 143, + 511, + 584 + ], + "spans": [ + { + "bbox": [ + 305, + 143, + 511, + 584 + ], + "type": "image", + "image_path": "f7993c093c7192f5cb3cbfb825bab433b9db09b4062eae2dea9b86fd1e4c6d5b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 73, + 288, + 87 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 73, + 288, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 288, + 87 + ], + "type": "text", + "content": "Appendix E. Visualization of latent space" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 304, + 782 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 478, + 88 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 478, + 88 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 478, + 88 + ], + "type": "text", + "content": "Appendix F. Additional visualizations of the subcooled pool boiling predictions" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 76, + 112, + 515, + 523 + ], + "blocks": [ + { + "bbox": [ + 76, + 112, + 515, + 523 + ], + "lines": [ + { + "bbox": [ + 76, + 112, + 515, + 523 + ], + "spans": [ + { + "bbox": [ + 76, + 112, + 515, + 523 + ], + "type": "image", + "image_path": "818538e3c98e7ef1cb7ca02c7839b776ab683c489ef3ad9ddcfc9a7d6277cc8e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 536, + 525, + 586 + ], + "lines": [ + { + "bbox": [ + 68, + 536, + 525, + 586 + ], + "spans": [ + { + "bbox": [ + 68, + 536, + 525, + 586 + ], + "type": "text", + "content": "Figure F.15: Examples of subcooled pool boiling prediction results by DM integrated with NO and HFS-enhanced NO. (a) Ground truth (GT) results. (b) NO predictions. (c) NO + DM predictions. (d) NO + HFS predictions. (e) NO + HFS + DM predictions. The results are shown for five time-step predictions from left to right." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 602, + 374, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 602, + 374, + 618 + ], + "spans": [ + { + "bbox": [ + 69, + 602, + 374, + 618 + ], + "type": "text", + "content": "Appendix G. Optimized scaling parameters, " + }, + { + "bbox": [ + 69, + 602, + 374, + 618 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 69, + 602, + 374, + 618 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 602, + 374, + 618 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "spans": [ + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "text", + "content": "The following figure demonstrates the learned " + }, + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "text", + "content": " across all the feature maps in the latent space of the encoder and decoder. The results are based on the training of a HFS-enhanced NO with " + }, + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "inline_equation", + "content": "\\sim 1.7" + }, + { + "bbox": [ + 68, + 626, + 525, + 671 + ], + "type": "text", + "content": " million parameters for the subcooled pool boiling problem." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 303, + 782 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 303, + 782 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 303, + 782 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 102, + 88, + 495, + 323 + ], + "blocks": [ + { + "bbox": [ + 102, + 88, + 495, + 323 + ], + "lines": [ + { + "bbox": [ + 102, + 88, + 495, + 323 + ], + "spans": [ + { + "bbox": [ + 102, + 88, + 495, + 323 + ], + "type": "image", + "image_path": "b32764dafa3d505c255f1f64f1257658a5fa838bf50794dd76059aea8719ee2a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 101, + 317, + 495, + 561 + ], + "blocks": [ + { + "bbox": [ + 101, + 317, + 495, + 561 + ], + "lines": [ + { + "bbox": [ + 101, + 317, + 495, + 561 + ], + "spans": [ + { + "bbox": [ + 101, + 317, + 495, + 561 + ], + "type": "image", + "image_path": "a0784824075ff041ec54bce51f4a9f920414699bc99c6bfb71782ab60b664316.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "lines": [ + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "spans": [ + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "text", + "content": "Figure G.16: (a) learned values of " + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "text", + "content": " in the encoder of the NO. (b) learned values of " + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "inline_equation", + "content": "\\lambda_{DC}" + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "inline_equation", + "content": "\\lambda_{HFC}" + }, + { + "bbox": [ + 67, + 572, + 525, + 609 + ], + "type": "text", + "content": " in the decoder of NO. Layers start from highest spatial resolution to the lowest in the encoder and vice versa for the decoder." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 630, + 327, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 630, + 327, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 630, + 327, + 645 + ], + "type": "text", + "content": "Appendix H. Kolmogorov flow prediction results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "spans": [ + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "text", + "content": "The vorticity formulation of the unsteady 2D incompressible Navier-Stokes equation for a viscous and incompressible fluid with the Kolmogorov forcing term is given as follows, where " + }, + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "text", + "content": " is the vorticity, " + }, + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "text", + "content": " is the velocity vector, and " + }, + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 68, + 652, + 525, + 697 + ], + "type": "text", + "content": " is the kinematic viscosity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 95, + 714, + 524, + 781 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 714, + 524, + 781 + ], + "spans": [ + { + "bbox": [ + 95, + 714, + 524, + 781 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l l} \\partial_ {t} \\omega + \\mathbf {u} \\cdot \\nabla \\omega = \\nu \\Delta \\omega + f (x, y), & (x, y) \\in (0, 2 \\pi) ^ {2}, t \\in (0, t _ {\\text {f i n a l}} ] \\\\ f (x, y) = \\chi (\\sin (2 \\pi (x + y)) + \\cos (2 \\pi (x + y))), & (x, y) \\in (0, 2 \\pi) ^ {2} \\\\ \\nabla \\cdot \\mathbf {u} = 0, & (x, y) \\in (0, 2 \\pi) ^ {2}, t \\in (0, t _ {\\text {f i n a l}} ] \\\\ \\omega (x, y, 0) = \\omega_ {0}, & (x, y) \\in (0, 2 \\pi) ^ {2} \\end{array} \\right. \\tag {H.1}", + "image_path": "3e6d7324658656d6206cc3e05f7607fad8522133a6e64cee125fd0325b76d3b3.jpg" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "text", + "content": "In this study, we used " + }, + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "inline_equation", + "content": "\\chi = 0.1" + }, + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "inline_equation", + "content": "\\nu = 10^{-5}" + }, + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "text", + "content": ", and periodic boundary conditions. The vorticity initial condition was sampled from a Gaussian random field according to the distribution " + }, + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,14^{0.5}(-\\Delta +196I)^{-1.5})" + }, + { + "bbox": [ + 68, + 72, + 525, + 132 + ], + "type": "text", + "content": ". The following figure demonstrate an example of the prediction results of the neural operator with and without HFS." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 76, + 147, + 521, + 340 + ], + "blocks": [ + { + "bbox": [ + 76, + 147, + 521, + 340 + ], + "lines": [ + { + "bbox": [ + 76, + 147, + 521, + 340 + ], + "spans": [ + { + "bbox": [ + 76, + 147, + 521, + 340 + ], + "type": "image", + "image_path": "1cfcf41b9fcb16a03911d047cd5dfb8c48cb6eb5ef8522a0e365588ab7ffadcb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 76, + 340, + 517, + 451 + ], + "blocks": [ + { + "bbox": [ + 76, + 340, + 517, + 451 + ], + "lines": [ + { + "bbox": [ + 76, + 340, + 517, + 451 + ], + "spans": [ + { + "bbox": [ + 76, + 340, + 517, + 451 + ], + "type": "image", + "image_path": "f2e000dac344778458cfa4d7d28377618422c7e3854b30636a302d1144e056ab.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 76, + 454, + 517, + 599 + ], + "blocks": [ + { + "bbox": [ + 76, + 454, + 517, + 599 + ], + "lines": [ + { + "bbox": [ + 76, + 454, + 517, + 599 + ], + "spans": [ + { + "bbox": [ + 76, + 454, + 517, + 599 + ], + "type": "image", + "image_path": "dfc42718a5b15fa76af837f1cf09d44379ea5e84068c7dfccc29da3080f16166.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 609, + 525, + 659 + ], + "lines": [ + { + "bbox": [ + 68, + 609, + 525, + 659 + ], + "spans": [ + { + "bbox": [ + 68, + 609, + 525, + 659 + ], + "type": "text", + "content": "Figure H.17: 2D Kolmogorov flow prediction results. (a) Ground truth solutions. (b) NO predictions. (c) HFS-enhanced NO predictions. (d) The corresponding energy spectra " + }, + { + "bbox": [ + 68, + 609, + 525, + 659 + ], + "type": "inline_equation", + "content": "((p(k))" + }, + { + "bbox": [ + 68, + 609, + 525, + 659 + ], + "type": "text", + "content": " for predictions at each time-step. (e) Zoomed-in view of energy spectra showing only the high wavenumbers for better visualization of the differences. The legends in (d) are applicable to (e) as well." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 771, + 303, + 781 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 771, + 303, + 781 + ], + "spans": [ + { + "bbox": [ + 290, + 771, + 303, + 781 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 36 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_content_list.json b/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6ae88be98bc56fac14a10c008e6521e382d4ad3a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_content_list.json @@ -0,0 +1,1646 @@ +[ + { + "type": "text", + "text": "Multi-modal Time Series Analysis: A Tutorial and Survey", + "text_level": 1, + "bbox": [ + 133, + 101, + 861, + 126 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yushan Jiang $^{1*}$ , Kanghui Ning $^{1*}$ , Zijie Pan $^{1*}$ , Xuyang Shen $^{1}$ , Jingchao Ni $^{2}$ , Wenchao Yu $^{4}$ , Anderson Schneider $^{3}$ , Haifeng Chen $^{4\\dagger}$ , Yuriy Nevmyvaka $^{3\\dagger}$ , Dongjin Song $^{1\\dagger}$", + "bbox": [ + 104, + 137, + 890, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Connecticut 2University of Houston", + "bbox": [ + 323, + 172, + 674, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Morgan Stanley $^{4}$ NEC Laboratories America", + "bbox": [ + 341, + 188, + 656, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 214, + 156, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multi-modal time series analysis has recently emerged as a prominent research area in data mining, driven by the increasing availability of diverse data modalities, such as text, images, and structured tabular data from real-world sources. However, effective analysis of multi-modal time series is hindered by data heterogeneity, modality gap, misalignment, and inherent noise. Recent advancements in multi-modal time series methods have exploited the multi-modal context via cross-modal interactions based on deep learning methods, significantly enhancing various downstream tasks. In this tutorial and survey, we present a systematic and up-to-date overview of multi-modal time series datasets and methods. We first state the existing challenges of multi-modal time series analysis and our motivations, with a brief introduction of preliminaries. Then, we summarize the general pipeline and categorize existing methods through a unified cross-modal interaction framework encompassing fusion, alignment, and transference at different levels (i.e., input, intermediate, output), where key concepts and ideas are highlighted. We also discuss the real-world applications of multi-modal analysis for both standard and spatial time series, tailored to general and specific domains. Finally, we discuss future research directions to help practitioners explore and exploit multi-modal time series. The up-to-date resources are provided in the GitHub repository1.", + "bbox": [ + 81, + 232, + 483, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords", + "text_level": 1, + "bbox": [ + 83, + 549, + 169, + 565 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multi-modal Time Series Analysis, Foundation Model, Large Language Model, Deep Learning", + "bbox": [ + 81, + 566, + 482, + 595 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 83, + 609, + 218, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Time series analysis is a fundamental task in data mining, driven by the proliferation of sequential data exhibiting rich temporal dynamics across diverse real-world systems. With the advent of deep learning, various methods have been proposed to effectively model complex temporal relationships within time series [9, 58, 61, 63, 78, 80, 88], facilitating downstream tasks in diverse domains, including healthcare [20, 32, 94], finance [64, 92], transportation [23, 29, 93] and environmental sciences [7, 8].", + "bbox": [ + 81, + 628, + 482, + 738 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In practice, time series are often associated with external contexts beyond their temporal dynamics [6, 77]. Such contexts are multi-modal, encompassing a variety of representations, such as texts [41, 73], images [18, 70], tables [6], and graphs [67], which carry rich semantic information for time series analysis. As such, incorporating the multi-modal contexts allows models to have a comprehensive view of underlying systems, capture subtle dependencies, and explain complex temporal behaviors more accurately.", + "bbox": [ + 81, + 739, + 482, + 849 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multi-modal Time Series Analysis", + "text_level": 1, + "bbox": [ + 593, + 213, + 841, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Background", + "text_level": 1, + "bbox": [ + 673, + 229, + 756, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Challenges, Our Motivations, Preliminaries, etc.", + "bbox": [ + 578, + 247, + 852, + 260 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Data, Methods & Applications", + "text_level": 1, + "bbox": [ + 614, + 270, + 820, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multi-modal Time Series Datas", + "bbox": [ + 619, + 291, + 797, + 303 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Modalities: Time Series, Text, Image, Tabular, Graph, etc.", + "2. Scope, Existing Datasets, Characteristics, Domain, etc." + ], + "bbox": [ + 539, + 305, + 862, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Taxonomy of Multi-modal Time Series Methods", + "text_level": 1, + "bbox": [ + 580, + 339, + 852, + 351 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Interaction Stage (Input, Intermediate, Output)", + "2. Interaction Strategy (Fusion, Alignment, Transference)", + "3. Specific Methods (Concatenate, Attention, Contrastive, Gating, etc.)" + ], + "bbox": [ + 539, + 353, + 890, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Domains & Tasks", + "text_level": 1, + "bbox": [ + 666, + 400, + 766, + 411 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. General, Finance, Healthcare, Traffic, Environment, etc.", + "2. Forecasting, Classification, Causal Discovery, Retrieval, etc." + ], + "bbox": [ + 539, + 412, + 872, + 436 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Future Research Directions", + "text_level": 1, + "bbox": [ + 622, + 449, + 805, + 462 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reasoning, Decision Making, Generalization, Contextual Noise,", + "bbox": [ + 532, + 464, + 893, + 476 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bias & Ethics", + "bbox": [ + 674, + 477, + 750, + 488 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Figure 1: The framework of our tutorial and survey.", + "bbox": [ + 539, + 511, + 885, + 525 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Effective analysis of multi-modal time series, however, is hindered by several key challenges in terms of data heterogeneity, modality gap and contextual relevance. First, different modalities exhibit distinct statistical properties, structures, and dimensionalities, leading to discrepancies in feature distributions and semantic meanings. For instance, while time series data is sequentially ordered with temporal dependencies, textual and image data contains rich contextual semantics and correlations. Aligning these heterogeneous data into a unified representation space is non-trivial. Second, the textual, tabular, or visual contexts may appear at different timesteps or granularities. Such temporal misalignment may impede meaningful cross-modal interactions. Third, real-world data is inevitably noisy with irrelevant information that may mislead correlation learning, resulting in suboptimal performance. For example, in finance, news articles related to stock market prediction often contain much redundant or speculative narratives that does not reflect actual market conditions. Therefore, the focus of multi-modal time series analysis is to effectively capture complementary and relevant information from multi-modal context and leverage it for predictive or analytical tasks.", + "bbox": [ + 511, + 549, + 915, + 825 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "More recently, an increasing number of multi-modal methods have shown promise in exploiting contextual information from diverse data sources, which boosts performance in wide tasks ranging from forecasting [41, 51], classification [42, 45], anomaly detection [82] to retrieval [3] and causal discovery [68, 97]. Despite", + "bbox": [ + 511, + 825, + 913, + 896 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13709v1 [cs.LG] 17 Mar 2025", + "bbox": [ + 22, + 260, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* equal contribution.† Yuriy Nevmyvaka, Haifeng Chen and Dongjin Song are the corresponding authors.", + "bbox": [ + 81, + 861, + 482, + 883 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ https://github.com/UCconn-DSIS/Multi-modal-Time-Series-Analysis", + "bbox": [ + 81, + 883, + 406, + 895 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the promising results of multi-modal time series methods, they are tailored for their own tasks with domain-specific applications. The existing literature lacks a comprehensive and systematic review that provides a unified perspective on the underlying principles and pipelines for multi-modal time series learning. In this survey, we provide a systematic and up-to-date overview of existing methods for multi-modal time series analysis. As shown in Figure 1, we discuss the challenges, motivations, and preliminaries of multi-modal time series. Then we introduce the general pipeline for multi-modal time series analysis and propose three types of interactions for cross-modal modeling between time series and other modalities - fusion, alignment, and transference - at the input, intermediate and output level, respectively. We also discuss the applications of multi-modal time series across multiple domains. Furthermore, we provide Table 2 to comprehensively summarize representative methods, encapsulating the modalities, fine-grained cross-modal interactions, real-world domains and tasks. Finally, we highlight potential future research opportunities to further advance time series analysis with multi-modal data. In summary, the major contributions of our survey are:", + "bbox": [ + 81, + 107, + 480, + 383 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We systematically catalog over 40 multi-modal time series methods with the corresponding open-source datasets.", + "- We uniquely categorize the existing methods into a unified cross-modal interaction framework, highlighting fusion, alignment, and transference at the input/intermediate/output levels.", + "- We discuss real-world applications of multi-modal time series and identify promising future directions, encouraging researchers and practitioners to explore and exploit multi-modal time series." + ], + "bbox": [ + 83, + 386, + 482, + 496 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Background and Our Scope", + "text_level": 1, + "bbox": [ + 81, + 508, + 334, + 525 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Multi-modal Machine Learning", + "text_level": 1, + "bbox": [ + 81, + 527, + 382, + 544 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advancements in multi-modal machine learning have significantly enhanced models' ability to process and integrate data from diverse modalities, such as language, acoustic, vision, and tabular data [25, 66, 91]. With the development of deep learning architectures and sophisticated interaction designs, models are able to learn, infer, and reason by integrating multiple communicative modalities. Current research in multi-modal machine learning spans multiple key areas, including (1) representing multi-modal data to encode joint and individual characteristics, (2) identifying interconnections between modality elements, (3) transferring knowledge across modalities, and (4) theoretically and empirically analyzing the underlying learning process in a quantitative manner. We refer the audiences to the recent surveys [2, 49] for a more detailed overview of general multi-modal machine learning research. Building upon these advancements, we investigate multi-modal time series analysis with a focus on modeling temporal dependencies and leveraging the data interactions across heterogeneous modalities for predictive and analytical tasks.", + "bbox": [ + 81, + 547, + 482, + 796 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Multi-modal Time Series Analysis", + "text_level": 1, + "bbox": [ + 81, + 808, + 403, + 824 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Multi-modal time series analysis aims to model time series data in combination with other complementary modalities. By leveraging cross-modal interactions, this approach yields deeper insights and more robust solutions for a wide range of predictive and analytical tasks across diverse real-world contexts.", + "bbox": [ + 81, + 825, + 480, + 895 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This survey aims to provide a unique and systematic perspective on effectively leveraging cross-modal interactions from relevant real-world contexts to advance multi-modal time series analysis, addressing both foundational principles and practical solutions. Our assessment is threefold: (1) reviewing multi-modal time series data (Section 3), (2) analyzing cross-modal interactions between time series and other modalities (Section 4), and (3) revealing the impact of multi-modal time series analysis in applications across diverse domains (Section 5).", + "bbox": [ + 511, + 107, + 913, + 231 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To resolve ambiguities, we define the scope of our survey by clarifying the types of time series considered and the criteria for multi-modal time series methods. First, we mainly consider standard time series and spatial time series. For the latter, spatial structures (often represented as graphs) are inherently paired with temporal data rather than treated as a separate modality. Second, we focus on methods that leverage multi-modal inputs from real-world contexts to provide complementary information, but for generation and retrieval tasks, the focus is more on transforming the input modality to another output modality. We acknowledge recent research on representing time series as a single modality (e.g., time series as images [15, 46, 62, 99, 100], time series as tabular data [27]) for downstream tasks. However, as these approaches are less relevant to our scope, we refer readers to their respective works.", + "bbox": [ + 511, + 232, + 913, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Besides, we would like to highlight the difference between our survey and recent related survey and position papers. Ni et al. [57] focuses on imaging-based transformations of time series and subsequent visual modeling techniques, where the discussion on multi-modal models is limited to those involving vision modalities. Kong et al. [39] concentrates on the use of multi-modal large language models (LLMs) for enhancing reasoning capabilities (e.g., causal reasoning, QA, planning, etc.) with multi-modal context. In contrast, our survey provides a broader and structured framework by delivering a systematic and unified perspective of multi-modal time series analysis, not limited to a specific modality or task type.", + "bbox": [ + 511, + 426, + 913, + 578 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 Multi-modal Time Series Data", + "text_level": 1, + "bbox": [ + 513, + 608, + 790, + 622 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1 Modalities in Multi-modal Time Series Data", + "text_level": 1, + "bbox": [ + 513, + 628, + 911, + 642 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Multi-modal time series data often originate from diverse sources, each exhibiting unique characteristics that influence how they are processed and analyzed. Besides Time Series, i.e., continuous or discrete measurements recorded over time, such as sensor readings, financial metrics, or physiological signals, their modalities often include: 1) Tabular: Time-indexed records that are inherently organized in a tabular format, such as event logs, transaction records, or demographic information. 2) Text: Time-stamped or domain-specific textual information – like clinical notes, financial reports, news articles, or social media posts – that provides contextual or interpretative insights. 3) Image: Visual data acquired as images over time, such as photographs, medical images (e.g., X-rays, MRI), satellite imagery, or visual representations generated from time series data. 4) Graph: Relational data representing interactions or structural dependencies among entities that evolve. They are typically modeled as networks or graphs, where the connections may change dynamically. Although audio is widely studied as an independent modality in multi-modal research, we consider it a special", + "bbox": [ + 511, + 646, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Jiang, et al.", + "bbox": [ + 856, + 75, + 911, + 85 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/bf8f5117d91e3da11dba5893a8b9ac5948fa50f19246cf4dd41c242bc923a774.jpg", + "table_caption": [ + "Table 1: Representative open-source multi-modal time series datasets and across domains." + ], + "table_footnote": [], + "table_body": "
DomainDataset (Superscripts include the URLs to the datasets)Modalities
HealthcareMIMIC-III [35][1], MIMIC-IV [34][2]TS, Text, Tabular
ICBHI [65][3], Coswara [4][4], KAUH [21][5], PTB-XL [71][6], ZuCo [14, 26][7]TS, Text
Image-EEG [22][8]TS, Image
FinanceFNSPID [17][9], ACL18 [84][10], CIKM18 [79][11], DOW30 [11][12]TS, Text
Multi-domainTime-MMD [53][13], TimeCAP [42][14], NewsForecast [73][15], TTC [37][16], CiK [77][17], TSQA [38][18]TS, Text
RetailVISUELLE [70][19]TS, Image, Text
IoTLEMMA-RCA [40][20]TS, Text
SpeechLRS3 [1][21], VoxCeleb2 [13][22]TS (Audio), Image
TrafficNYC-taxi, NYC-bike [48][23]ST, Text
EnvironmentTerra [10][24]ST, Text
", + "bbox": [ + 161, + 131, + 836, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "form of time series in this survey and briefly discuss representative works within this scope.", + "bbox": [ + 81, + 323, + 480, + 351 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Common Datasets and Benchmarks", + "text_level": 1, + "bbox": [ + 83, + 369, + 416, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multi-modal time series datasets vary a lot and are domain-dependent, each with unique data characteristics and modalities. In Table 1 we provide representative datasets categorized by domain, along with their respective modalities:", + "bbox": [ + 81, + 388, + 490, + 443 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Healthcare: In this domain, physiological signals (e.g., ECG, EEG) are extensively analyzed alongside textual data such as clinical notes, patient demographics, and tabular data including vital signs and laboratory results. Common datasets include MIMIC-III [35], a comprehensive dataset containing electronic health records (EHRs) of ICU patients with physiological measurements, clinical notes, and diagnostic information, widely used for tasks like patient monitoring, mortality prediction, and clinical decision support. MIMIC-IV [34] is an extension of MIMIC-III, which provide detailed physiological signals, clinical narratives, medication records, and demographic data from a large population of critically ill patients, frequently utilized for predictive modeling, clinical outcome analysis, and health informatics research. Other notable healthcare datasets include ICBHI [65], which contains respiratory sound recordings paired with clinical annotations for respiratory disease classification; Coswara [4], which provides respiratory audio samples and rich metadata for COVID-19 detection tasks; KAUH [21], which comprises audio records and corresponding annotations for healthcare analytics; PTB-XL [71], a large-scale ECG dataset annotated with diagnostic labels for cardiac monitoring and diagnosis; ZuCo [14, 26], which consists of simultaneous EEG and textual data from reading comprehension tasks, being useful for cognitive neuroscience studies; and Image-EEG [22], which pairs EEG signals with images of objects on a natural background, aiding studies in visual neuroscience and computer vision.", + "bbox": [ + 81, + 443, + 482, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finance: Datasets that combine time series data with financial news and reports are instrumental in financial analysis and modeling. Notable examples include ACL18 [84], CIKM18 [79], and DOW30 [11]. These datasets focus on high-trade-volume stocks from the U.S. stock markets, providing historical stock price data, such as opening, high, low, and closing prices; alongside related textual information, including tweets or financial news. Another", + "bbox": [ + 81, + 797, + 482, + 896 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "large-scale dataset, FNSPID [17], consists of stock prices and time-aligned financial news records, covering over 4,000 companies from 1999 to 2023.", + "bbox": [ + 513, + 323, + 913, + 364 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multi-domain: Datasets featuring general-purpose numerical time series combined with textual data are suitable for broad analytical applications. Examples include Time-MMD [53], which encompasses nine primary data domains: Agriculture, Climate, Economy, Energy, Environment, Health, Security, Social Good, and Traffic, while ensuring fine-grained alignment between time series and textual data; TimeCAP [42] compiles seven real-world time series datasets across three domains: weather, finance, and healthcare. To generate textual descriptions for each time series, a large language model (LLM) agent is employed, leveraging contextual information and domain-specific knowledge. NewsForecast [73] integrates task-specific time series data with verified public news reports across various domains, including finance, energy, traffic, and cryptocurrency; TTC [37] is a meticulously curated, time-aligned dataset designed for multimodal forecasting. It consists of paired time series and text data synchronized to timestamps, spanning two distinct domains: climate science and healthcare; CiK [77] is a dataset comprising 71 forecasting tasks across seven real-world domains. Each task necessitates the integration of both numerical data and textual information. The covered domains include Climatology, Economics, Energy, Mechanics, Public Safety, Transportation, and Retail. The TSQA [38] dataset consists of 200k question-answer pairs derived from time series data across 12 domains: healthcare, finance, energy, traffic, environment, IoT, nature, transport, human activities, machine sensors, AIOps, and the web. These QA pairs are designed to support five key tasks: forecasting, imputation, anomaly detection, classification, and open-ended reasoning.", + "bbox": [ + 511, + 390, + 913, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Other domains: Beyond the previously discussed major sectors, multi-modal time series analysis extends to various other domains. In Retail, datasets such as VISUELLE [70] integrate numerical sales data with product images and textual descriptions, facilitating thorough analyses of consumer behavior and inventory management. The Internet of Things (IoT) domain benefits from datasets such as LEMMA-RCA [40], which combine time series sensor data with textual metadata, enabling enhanced monitoring and more robust and secure methodologies that ensure the high performance of modern", + "bbox": [ + 511, + 770, + 913, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Multi-modal Time Series Analysis: A Tutorial and Survey", + "bbox": [ + 84, + 74, + 354, + 87 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "systems. In the Speech domain, datasets like LRS3 [1] and VoxCeleb2 [13] integrate audio recordings with corresponding visual data, supporting advancements in speech recognition and speaker identification technologies. In the Traffic domain, datasets like NYC-Taxi, NYC-Bike [48] contain spatial-temporal (ST) data alongside associated textual metadata. These integrations allow LLMs to effectively capture and utilize spatial-temporal contextual signals. In the Environment domain, Terra [10] collect 45 years of global geographic spatial-temporal data, supplemented with textual descriptions.", + "bbox": [ + 81, + 106, + 482, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Cross-modal Interactions with Time Series", + "text_level": 1, + "bbox": [ + 81, + 261, + 460, + 275 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we conduct a detailed review of existing research on multi-modal time series analysis by thoroughly analyzing cross-modal interactions. We also elaborate how existing multi-modal methods are tailored for domain-specific applications in Section 5. The detailed taxonomy is provided in Table 2.", + "bbox": [ + 81, + 279, + 482, + 348 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We define three fundamental types of interactions between time series and other modalities, including fusion, alignment, and transference, which occur at different stages within a framework - input, intermediate (i.e., representations or intermediate outputs), and output. The representative examples are provided in Figure 2.", + "bbox": [ + 81, + 349, + 482, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Fusion", + "text_level": 1, + "bbox": [ + 83, + 434, + 184, + 448 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fusion refers to the process of integrating heterogeneous modalities in a way that captures complementary information across diverse sources to improve time series modeling. To fuse multi-modal inputs, a common practice is to directly integrate time series, tabular data and texts into a unified textual prompt, then use it to query LLMs for downstream tasks. This is typically facilitated by instruction fine-tuning for task-oriented analysis [19, 24, 38, 48, 56, 73, 90]. Some works also leverage the zero-shot reasoning and inference capability of pretrained LLMs (e.g., GPT-4 and its variants) [72, 81, 89]. Recent research efforts like TaTS [47] attempt to integrate paired text embedding as an additional variable of time series for temporal modeling, yielding competitive task performance.", + "bbox": [ + 81, + 452, + 482, + 618 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Most existing methods perform cross-modal fusion at the intermediate stage, such as adding and concatenating multi-modal representations, where each individual modal encoder first maps the raw data into a shared latent space. Addition combines time series and other modalities by summing up encoded representations, effectively blending shared information while preserving their interconnections in the latent space [6, 30, 51, 85, 95, 97]. On the other hand, concatenation stacks multi-modal representations along the same dimension, retaining modality-specific characteristics and allowing models to capture joint relationships between the modalities [16, 36, 36, 37]. To effectively leverage cross-modal information, existing methods often incorporate alignment designs after concatenating representations [5, 11, 12, 18, 31, 33, 41, 42, 44, 54, 55, 60, 69, 70, 74, 85, 96]. Alignment is also used in the aforementioned additions, which will be detailed in Section 4.2.", + "bbox": [ + 81, + 619, + 482, + 825 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "When fusion is performed at the output level, different modalities contribute separately to the final output, allowing each modality to retain its unique predictive signal [31, 41, 42, 53]. Time-MMD [53] provides a paradigm that fuses predictions from both state-of-the-art forecasters and a pretrained language model with a projection", + "bbox": [ + 81, + 825, + 482, + 896 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/dfc75126255eb54797d7990d9a20668b0a1b9ee7743a4acf0e3af423c98e1049.jpg", + "image_caption": [ + "Figure 2: Categorization of cross-modal interaction methods and representative examples." + ], + "image_footnote": [], + "bbox": [ + 519, + 104, + 911, + 277 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "layer, in an end-to-end manner. MOAT [41] introduces a two-stage framework for multi-modal time series forecasting. In the first stage, the model is optimized to generate forecasts from decomposed time series and text embeddings. In the second stage, an offline synthesis via MLP is applied to dynamically fuse different components, yielding the final forecast based on their relative contributions. Beyond fusing outputs from a single model, TimeCAP [42] enhances performance by combining predictions from both a multi-modal predictor and a pretrained LLM, which synergizes the gradient-based method and LLM agents reasoning on real-world contexts. Output fusion gains advantage of design flexibility and robustness, but it may not fully utilize the complementary relationship between modalities without additional countermeasures.", + "bbox": [ + 511, + 338, + 913, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Cross-modal fusion relies on well-aligned multi-modal data for effective exploitation of the contextual information. However, ideally-aligned data may not be given in real-world scenarios. As such, existing methods also leverage alignment mechanisms to mitigate the challenge.", + "bbox": [ + 513, + 518, + 918, + 588 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Alignment", + "text_level": 1, + "bbox": [ + 514, + 599, + 648, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Alignment ensures that the relationships between different modalities are preserved and semantically coherent when integrated into a unified learning framework. At the input level, we primarily refer alignment to data preprocessing techniques that aim at mitigating temporal misalignment caused by missing values, irregular sampling intervals, and differing granularities across modalities. This process is crucial for ensuring that data from multiple sources are properly synchronized before fusion, where domain knowledge is usually needed to handle such inconsistencies [10, 53, 73]. In addition, none of the existing methods we reviewed explicitly perform output alignment. However, the aforementioned output fusion can be easily adapted to alignment through the incorporation of a gating or attention mechanism that we will introduce shortly.", + "bbox": [ + 511, + 618, + 913, + 797 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Alignment at the intermediate stage plays a crucial role in multimodal interactions. We first introduce the alignment of multi-modal representations, spanning a range of techniques from model component design to learning objectives. The common component designs include self-attention [5, 12, 30, 33, 41, 42, 44, 54, 55, 69], cross-attention [6, 18, 51, 60, 70, 82, 85, 98] and gating mechanisms [82, 98]. Self-attention is often used to fuse multi-modal representations. It", + "bbox": [ + 511, + 799, + 913, + 896 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Jiang, et al.", + "bbox": [ + 856, + 75, + 911, + 85 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/3c674ad0e2e84929c9f56d3334e69ed89be4011b598a814f2e99ea5de4015cb1.jpg", + "table_caption": [ + "Table 2: Taxonomy of representative multi-modal time series methods. Modality refers to the different data modalities involved in each method. TS represents standard time series, $ST$ denotes spatial time series. The Method column lists the techniques used for each interaction, separated by semicolons, where each interaction may include one or more techniques, separated by commas. Superscripts in the Code column include the URLs to Github repositories." + ], + "table_footnote": [], + "table_body": "
MethodModalityDomainTaskCross-Modal InteractionLarge ModelYearCode
StageFusionAlign.Trans.Method
Time-MMD [53]TS, TextGeneralForecastingOutputXXAdditionMultiple2024Yes[1]
Wang et al. [73]TS, TextGeneralForecastingInput IntermediateXXPrompt Prompt; LLM ReasoningLLaMa2 GPT-4 Turbo2024Yes[2]
GPT4MTS [30]TS, TextGeneralForecastingIntermediateXAddition; Self-attentionGPT-22024No
TimeCMA [51]TS, TextGeneralForecastingInput IntermediateXXMeta-description Addition; Cross-attentionGPT-22025Yes[3]
MOAT [41]TS, TextGeneralForecastingIntermediate OutputXConcat.; Self-attention Offline Synthesis (MLP)S-Bert2024No
TimeCAP [42]TS, TextGeneralClassificationInput Intermediate OutputXXLLM Generation Concat.; Self-attention, Retrieval AdditionBert, GPT-42024No
TimeXL [31]TS, TextGeneralClassification ForecastingIntermediate OutputXConcat., Prompt; LLM Reasoning AdditionBert, S-Bert GPT-4o2025No
Hybrid-MMF [37]TS, TextGeneralForecastingIntermediateXXConcat.GPT-4o2024Yes[4]
Time-LLM [33]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionLLaMA, GPT-22024Yes[5]
Time-VLM [98]TS, Text, ImageGeneralForecastingInput IntermediateXXFeat. Imaging, Meta-description Addition; Gating, Cross-attentionViLT, CLIP BLIP-22025No
Unitime [55]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionGPT-22024Yes[6]
TESSA [50]TS, TextGeneralAnnotationIntermediatePrompt; RL; LLM GenerationGPT-4o2024No
InstruTime [12]TS, TextGeneralClassificationIntermediateXConcat.; Self-attentionGPT-22025Yes[7]
MATMCD [68]TS, Text, GraphGeneralCausal DiscoveryIntermediatePrompt; LLM Reasoning; SupervisionMultiple2025No
STG-LLM [54]ST, TextGeneralForecastingIntermediateXConcat.; Self-attentionGPT-22024No
TableTime [72]TS, TextGeneralClassificationInputXPrompt; ReformulateMultiple2024Yes[8]
ContextFormer [6]TS, TabularGeneralForecastingIntermediateXAddition; Cross-attentionNo2025No
Time-MQA [38]TS, TextGeneralMultipleInputXXPromptMultiple2025Yes[9]
MAN-SF [67]TS, Text, GraphFinanceClassificationIntermediateXBilinear; Graph ConvolutionUSE2020No
Bamford et al. [3]TS, Text, TS, ImageFinanceRetrievalIntermediate OutputXXSupervisionS-Bert2024No
Chen et al. [11]TS, Text, GraphFinanceClassificationIntermediateXXLLM Generation Concat.; Graph ConvolutionChatGPT2023No
Xie et al. [81]TS, TextFinanceClassificationInputXXPromptChatGPT2023No
Yu et al. [89]TS, TextFinanceForecastingInputXXPromptGPT-4, Open LLaMA2023No
MedTsLLM [5]TS, Text, TabularHealthcareMultipleIntermediateXConcat.; Self-attentionLlama22024Yes[10]
RespLLM [95]TS (Audio), TextHealthcareClassificationIntermediateXAddition, Self-attentionOpenBioLLM-8B2024No
METS [45]TS, TextHealthcareClassificationOutputXXContrastiveClinicalBert2023No
Wang et al. [75]TS, TextHealthcareClassificationIntermediateXXSupervisionBart, Bert, Roberta2021No
EEG2TEXT [52]TS, TextHealthcareGenerationOutputXXSelf-supervision, SupervisionBart2024No
MEDHMP [74]TS, TextHealthcareClassificationIntermediateXConcat.; Self-attention, ContrastiveClinicalT52023Yes[11]
Deznabi et al. [16]TS, TextHealthcareClassificationIntermediateXXConcat.Bio+Clinical Bert2021Yes[12]
Niu et al. [60]TS, TextHealthcareClassificationIntermediateXConcat.; Cross-attentionBioBERT2023No
Yang et al. [85]TS, TextHealthcareClassificationIntermediateXConcat.; Addition; GatingClinicalBERT2021Yes[13]
Liu et al. [56]TS, TextHealthcareClassification RegressionInputXXPromptPaLM2023Yes[14]
xTP-LLM [24]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionLlama2-7B-chat2024Yes[15]
UrbanGPT [48]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionVicuna-7B2024Yes[16]
CityGPT [19]ST, TextMobilityInputXXPromptMultiple2025Yes[17]
MULAN [97]TS, Text, GraphIoT Causal DiscoveryIntermediateAddition; Contrastive; SupervisionNo2024No
MIA [82]TS, ImageIoT Anomaly DetectionIntermediateXAddition; Cross-attention, GatingNo2023No
Ekambaram et al. [18]TS, Image, TextRetail ForecastingIntermediateXConcat.; Self & Cross-attentionNo2020Yes[18]
Skenderi et al. [70]TS, Image, TextRetail ForecastingIntermediateXConcat.; Cross-attentionNo2024Yes[19]
VIMTS [96]ST, Image EnvironmentImputationIntermediateXConcat.; SupervisionNo2022No
LTE [44]ST, Text, Image EnvironmentForecastingIntermediateXConcat.; Self-attentionLLaMA-2-7B2024Yes[20]
AV-HubERT [69]TS (Audio), Image SpeechClassificationIntermediateXConcat.; Self-attentionHuBert2022Yes[21]
SpeechGPT [90]TS(Audio), TextGeneration intermediateXConcat.; Self-attentionLLaMA-13B2023Yes[22]
LA-GCN [83]ST, TextClassificationIntermediateXSupervisionBert2023Yes[23]
", + "bbox": [ + 84, + 143, + 926, + 878 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Multi-modal Time Series Analysis: A Tutorial and Survey", + "bbox": [ + 84, + 75, + 352, + 85 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "enables a joint and undirected alignment across all modalities by dynamically attending to important features. Given multi-modal embeddings $E_{\\mathrm{mm}} \\in \\mathbb{R}^{n \\times d}$ , where $n$ is the total number of modality tokens and $d$ is the embedding dimension, self-attention is computed as follows:", + "bbox": [ + 81, + 106, + 480, + 176 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {A t t e n t i o n} \\left(E _ {\\mathrm {m m}}\\right) = \\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {\\top}}{\\sqrt {d _ {k}}}\\right) V\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 181, + 401, + 219 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where the queries $Q$ , keys $K$ , and values $V$ are linear projections of $E_{\\mathrm{mm}}$ : $Q = E_{\\mathrm{mm}}W_{Q}$ , $K = E_{\\mathrm{mm}}W_{K}$ , $V = E_{\\mathrm{mm}}W_{V}$ with learnable weights of dimensionality $d_{k}$ : $W_{Q}, W_{K}, W_{V} \\in \\mathbb{R}^{d \\times d_{k}}$ .", + "bbox": [ + 81, + 223, + 480, + 267 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In cross-attention, time series serves as the query modality to get contextualized by other modalities, providing a directed alignment that ensures auxiliary modalities contribute relevant contextual information while preserving the temporal structure of time series. Given a query embedding $E_{\\mathrm{ts}} \\in \\mathbb{R}^{n \\times d}$ and that of auxiliary modalities $E_{c} \\in \\mathbb{R}^{n \\times d}$ as keys and values:", + "bbox": [ + 81, + 267, + 482, + 351 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\text {C r o s s A t t e n t i o n} \\left(E _ {\\mathrm {t s}}, E _ {\\mathrm {c}}\\right) = \\operatorname {s o f t m a x} \\left(\\frac {Q _ {\\mathrm {t s}} K _ {\\mathrm {c}} ^ {\\top}}{\\sqrt {d _ {k}}}\\right) V _ {\\mathrm {c}}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 356, + 426, + 393 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where the query, key and value are denoted as $Q_{\\mathrm{ts}} = E_{\\mathrm{ts}}W_{Q}$ , $K_{\\mathrm{c}} = E_{\\mathrm{c}}W_{K}$ , $V_{\\mathrm{c}} = E_{\\mathrm{c}}W_{V}$ . Note that existing methods adopt multi-head attentions, which is omitted here for simplicity.", + "bbox": [ + 81, + 397, + 480, + 438 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Similarly, the gating mechanism is a parametric filtering operation that explicitly regulates the influence of time series and other modalities on the fused embeddings in $E$ :", + "bbox": [ + 81, + 439, + 482, + 479 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nG = \\sigma \\left(W _ {g} \\left[ E _ {\\mathrm {t s}}; E _ {c} \\right] + b _ {g}\\right), \\quad E = G \\odot E _ {\\mathrm {t s}} + (1 - G) \\odot E _ {c}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 486, + 444, + 502 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\sigma (\\cdot)$ denotes the sigmoid function, the learnable weight and bias are denoted as $W_{g}\\in \\mathbb{R}^{2d\\times d}$ and $b_{g}\\in \\mathbb{R}^{d}$ , respectively.", + "bbox": [ + 81, + 506, + 480, + 536 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "When a graph modality is available from external contexts, the underlying topological insights can be leveraged for graph-based alignment [11, 67]. Unlike the above methods that rely solely on feature interactions, it explicitly aligns multi-modal representations with relational structures through graph convolution, enabling context-aware feature propagation across modalities.", + "bbox": [ + 81, + 536, + 482, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Representation alignments can also be achieved by learning objectives [3, 45, 83, 96, 97]. For example, MULAN [97] extracts modality-invariant and modality-specific representations from multi-modal time series. It employs contrastive learning to enhance cross-modal alignment by maximizing the similarity between invariant representations across modalities while minimizing the similarity between invariant and specific representations of the same modality. Moreover, Bamford et al. [3] align cross-modal representations by using the mean of uni-modal cosine similarities as the target similarity and optimizing cross-modal similarity via cross-entropy loss, which effectively connects both modalities in a shared latent space for time series retrieval tasks. In general, this branch of methods is effective as it directly integrates the alignment objective into the optimization process, ensuring that meaningful representations are explicitly learned.", + "bbox": [ + 81, + 619, + 485, + 825 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Lastly, we introduce the intermediate alignment of component outputs within a framework, extending beyond representation alignment within a model. The most recent studies explore the synergy between time series models and LLM agents, leveraging the strong reasoning capabilities of pretrained LLMs to provide", + "bbox": [ + 81, + 825, + 480, + 896 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "contextual understanding and calibration in real-world scenarios [31, 42, 50, 68, 73]. We briefly discuss a few representative examples for demonstration. TimeCAP [42] utilizes the embedding space of a trained multi-modal encoder to retrieve in-context examples with the highest cosine similarity. These retrieved examples with ground truth labels are then fed, along with the query text, into an LLM to provide contextual guidance and improve outcome prediction. TimeXL [31] incorporates a multi-modal prototype-based encoder to generate explainable case-based rationales for both time series and texts, integrating three LLM agents, where prediction, reflection, and refinement LLMs collaborate to iteratively enhance prediction accuracy, identify textual inconsistencies or noise, and calibrate textual contexts, yielding more accurate predictions and explanations. NewsForecast [73] also employs reflection in language agents to iteratively select relevant news from a large database, enhancing alignment of textual information for text-based forecasting. Similarly, MATLAB [68] ensures alignment between statistical causal discovery on time series and LLM reasoning on textual context by leveraging iterative self-reflective tool-calling to structure textual context, which is then used to explain and refine causal constraints.", + "bbox": [ + 511, + 106, + 913, + 396 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In a nutshell, alignment aims to calibrate real-world contexts and effectively capture relevant multi-modal elements for a semantically coherent time series modeling. It enhances task performance, robustness and explanation, ensuring that models leverage meaningful contextual information for improved decision-making.", + "bbox": [ + 513, + 397, + 913, + 467 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 Transference", + "text_level": 1, + "bbox": [ + 514, + 503, + 666, + 517 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Transference refers to the process of mapping between different modalities. It allows one modality to be inferred, translated, or synthesized from another. This concept plays a crucial role across different stages of multi-modal time series analysis. The input-level transference typically serves for modality augmentation. It helps introduce contextual priors, enrich training samples, and provide alternative representations. This is particularly useful in scenarios of data scarcity and imbalance. In existing literature, a common practice is to use meta information to describe the narrative of real-world contexts (e.g., domain, data statistics and granularity, variable descriptions, other co-variates, etc.) [19, 24, 33, 48, 51, 55, 72, 98] or leverage pretrained LLMs to generate fine-grained textual contexts [42] or graphs [11] for real-world time series, serving as an augmented modality. In addition to texts, time series can also be transformed into high-dimensional images via feature imaging, such as stacking the original data with frequency and periodicity features [98]. Alternatively, time series can be represented in tabular form, transforming time series analysis into a table understanding task [72]. Note that the aforementioned uni-modal methods for transforming time series into other single modalities can also be integrated into multi-modal time series frameworks [15, 27, 46, 62, 99, 100]. The exploitation of input-level transference is two-fold. First, the embedding of generated modality can serve as semantic anchors that guides time series modeling via representation alignment, improving downstream supervised tasks [33, 51, 55, 98]. Second, it provides additional contextual guidance for pretrained LLMs through input fusion and prompting [19, 24, 48, 72].", + "bbox": [ + 511, + 521, + 913, + 896 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Jiang, et al.", + "bbox": [ + 856, + 75, + 911, + 85 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "At the intermediate [11, 50, 68, 75, 97] and output [3, 52] levels, transference are more task-oriented. The output-level transference typically refers to the end-to-end generation of new modalities, such as text-based and image-based time series retrieval, where users provide textual descriptions or sketched trends to query relevant time series data [3]. This also includes EEG-to-text conversion, enabling direct transformation from physiological signals to human-readable narratives [52].", + "bbox": [ + 86, + 107, + 480, + 217 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The output of intermediate transference typically serves as an initial solution to be refined for modality generation tasks [50, 68, 97] or a medium to be inferred for predictive tasks [75], facilitating downstream reasoning and further alignment within the multimodal framework. MATMCD [68] generates an initial causal graph from time series, achieving modality transference in the intermediate level. Subsequently, it incorporates textual modality to refine the causal graph, ensuring improved alignment and interpretability. Moreover, Wang et al. [75] adopt a two-stage mechanism for sentiment classification based on EEG data, where the model first converts EEG signals into reading texts and then employs a pretrained LLM based on texts for classification, achieving impressive zero-shot results.", + "bbox": [ + 86, + 218, + 480, + 396 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 Applications of Multi-modal Time Series Analysis", + "text_level": 1, + "bbox": [ + 86, + 409, + 442, + 440 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we review the existing applications of multi-modal time series analysis for both standard and spatial time series, covering diverse domains such as healthcare, finance, transportation, environment, retail, and the Internet of Things (IoT).", + "bbox": [ + 86, + 444, + 480, + 498 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Standard Time Series", + "text_level": 1, + "bbox": [ + 86, + 511, + 256, + 525 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1 Healthcare", + "text_level": 1, + "bbox": [ + 86, + 531, + 215, + 544 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Recent studies in healthcare highlight the multi-modal analysis of diverse medical data sources, such as EHRs (Electronic Health Records, containing lab values and clinical reports, etc.), audio, EEG (Electroencephalogram), ECG (Electrocardiogram), and other wearable and medical sensor recordings, for better disease diagnosis and patient monitoring. For multi-modal analysis on EHR data, a common modeling strategy involves the interaction between lab values and clinical reports, including the concatenation [16] and attention mechanisms [60, 85] on modality embeddings. Moreover, existing methods explore different modeling techniques to better exploit the clinical notes, via domain-specific text encoders (e.g., ClinicalBERT [28, 85] and BioBERT [43, 60]) and different processing strategies. For example, text embeddings can be modeled separately based on patient groups [86] or through a decaying mechanism based on time intervals [36] before interacting with time series embeddings, which leads to improved mortality prediction.", + "bbox": [ + 86, + 549, + 480, + 770 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In addition to EHRs, multi-modal modeling methods have been tailored for audio [95], ECG [45], and EEG [75]. Zhang et al. [95] focus on a respiratory health classification task by integrating both audio and textual descriptions. Li et al. [45] propose a multi-modal contrastive learning framework, constructing positive and negative samples by pairing patients' report texts with corresponding ECG signals for self-supervised pretraining. The classification task is then performed by computing the cosine similarity between different text representations and the target ECG representation. Wang", + "bbox": [ + 86, + 771, + 480, + 895 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "et al. [75] propose a two-stage method for zero-shot EEG-based sentiment classification. First, a pretrained BART model is used for EEG-to-text decoding, followed by a trained text sentiment classifier that converts the generated text into sentiment categories.", + "bbox": [ + 517, + 107, + 911, + 161 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Similarly, Liu et al. [56] fuses physiological and behavioral time-series sensor data with real-world contextual information to effectively harness LLMs for wellness assessment. By fine-tuning the models with few-shot question-answer pairs that include contextual details, they improve performance on various healthcare tasks—such as cardiac signal analysis, physical activity recognition, and calorie-burn estimation, which outperform both supervised feedforward neural networks and zero-shot LLM baselines.", + "bbox": [ + 517, + 162, + 911, + 271 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 Finance", + "text_level": 1, + "bbox": [ + 517, + 289, + 622, + 301 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Recently, multi-modal time series analysis has received increasing attention in financial applications. Yu et al. [89] and Xie et al. [81] focus on stock prediction by integrating stock price movements, company profiles, and news directly into structured LLM prompts, enabling models to perform reasoning over multiple modalities. Yu et al. [89] applies GPT-4 and Open LLaMA to forecast NASDAQ-100 stock returns through instruction-based prompting and fine-tuning, demonstrating that structured LLM-driven inference can outperform traditional econometric models. Meanwhile, Xie et al. [81] conducts a zero-shot analysis of ChatGPT's capabilities for multimodal stock movement prediction, incorporating CoT prompting to assess the impact of social media sentiment on stock trends. Chen et al.[11] and Sawhneyet al.[67] also incorporate graph structures for stock movement prediction. For instance, Chen et al.[11] uses ChatGPT to infer dynamic stock relationship graphs from news, which reflects market conditions and enhances prediction accuracy.", + "bbox": [ + 517, + 306, + 911, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Beyond the predictive tasks, Bamford et al. [3] proposes a multimodal retrieval framework, where the model aligns both modalities in a shared latent space through contrastive learning. This framework allows users to search for financial time series through textual descriptions or sketched trends, offering greater flexibility. It also significantly improves retrieval speed and accuracy compared to traditional SQL-based search methods.", + "bbox": [ + 517, + 529, + 911, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3 Others", + "text_level": 1, + "bbox": [ + 517, + 642, + 612, + 655 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Multi-modal time series analysis also exists in other domains, such as retail, IoT, computer vision and audio. In the retail sector, Ekambaram et al. [18] utilizes product images and textual descriptions, including attributes like color, pattern, and sleeve style, while incorporating temporal and exogenous features for new product sales forecasting. More recently, Skenderi et al. [70] integrates additional modality data, including product images and text descriptions, along with Google Trends data for sales forecasting. In IoT applications, MIA [82] enhances power transformer fault diagnosis by integrating multi-modal data, including dissolved gas analysis (DGA) and infrared images, to improve accuracy and efficiency. MULAN [97] converts log sequences into time-series data using a log-tailored language model and employs contrastive learning to leverage multi-modal data, facilitating root-cause discovery for system failures. In computer vision, LA-GCN [83] utilizes textual embeddings of joint names and action labels to generate faithful structural priors, enhancing skeleton-based action modeling and", + "bbox": [ + 517, + 660, + 911, + 895 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Multi-modal Time Series Analysis: A Tutorial and Survey", + "bbox": [ + 84, + 75, + 354, + 85 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "improving recognition tasks. In speech applications, AV-HuBERT [69] employs a self-supervised representation learning framework to leverage correlated audio and visual information [90], while SpeechGPT [69, 90] integrates audio and text to enhance generation performance.", + "bbox": [ + 84, + 107, + 480, + 175 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Spatial Time Series", + "text_level": 1, + "bbox": [ + 84, + 190, + 240, + 205 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4 Transportation and Mobility", + "text_level": 1, + "bbox": [ + 84, + 209, + 357, + 224 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Several recent studies on traffic prediction highlight the importance of multi-modal contexts to enhance forecasting accuracy. Guo et al. [24] transforms California traffic data into structured LLM prompts. The method uses LLaMA models and instruction fine-tuning to improve spatial-temporal learning. Meanwhile, Li et al. [48] employs a spatial-temporal dependency encoder to align numerical New York City traffic data with LLMs, incorporating weather, geographic context, and historical flow patterns to refine predictions. Similarly, Feng et al. [19] proposes CityGPT, enhancing LLMs' spatial cognition for urban reasoning, mobility prediction, and navigation by integrating urban mobility data, road networks, and human behavior through instruction tuning. These studies demonstrate that LLM-based multi-modal fusion not only enhances traffic forecasting but also improves model interpretability and adaptability across diverse urban scenarios.", + "bbox": [ + 86, + 228, + 480, + 435 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.5 Environment", + "text_level": 1, + "bbox": [ + 84, + 450, + 235, + 464 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Integrating multi-modal information benefits environmental studies, particularly by addressing the prevalent challenge of missing values. VIMTS [96] utilizes a structured variational approximation technique to impute missing high-dimensional modalities (stream image data) by transforming them into low-dimensional features derived from simpler, related modalities (meteorological time series records), ensuring cross-modal correlations and interpretability of the imputation process. Additionally, LITE [44] addresses the imputation of missing features through a sparse Mixture of Experts framework. It integrates and encodes various environmental variables through a unified encoder. Directed by domain-specific instructions, a language model is utilized to merge these multi-modal representations, thereby improving the accuracy of environmental spatial-temporal predictions.", + "bbox": [ + 86, + 468, + 480, + 661 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Future Research Directions", + "text_level": 1, + "bbox": [ + 84, + 676, + 334, + 690 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we outline several underexplored research directions that open up opportunities for future advancements.", + "bbox": [ + 84, + 694, + 480, + 722 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reasoning with Multi-modal Time Series. Enhancing reasoning with multi-modal time series is pivotal for the development of intelligent systems. Future research should focus on creating a unified framework that can seamlessly integrate temporal reasoning with contextual understanding, enabling models to handle multiple time series tasks with interpretability. One potential path is to incorporate external knowledge bases and real-world context, such as developing a retrieval-augmented generation (RAG) [59] system, to enhance the reasoning process and allow models to make informed inferences beyond the immediate data. It is also promising to synergize time series model and language agents to provide more faithful and reliable reasoning on real-world contexts [31, 68].", + "bbox": [ + 86, + 729, + 480, + 895 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The recent development of LLM reasoning models, such as chain of thoughts [76] and tree of thoughts [87], also offers potential solutions to improve reasoning quality.", + "bbox": [ + 517, + 107, + 911, + 148 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Decision Making. Multi-modal time series analysis presents a promising future direction to enhance decision-making processes, which is crucial in high-stakes applications. By leveraging predictive signals and explanations from multi-modal contexts, future research can develop more adaptive, interpretable, and reliable decision-support systems, to facilitate the downstream optimization tasks such as resource allocation and risk management.", + "bbox": [ + 517, + 151, + 913, + 248 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Domain Generalization. One key challenge in multi-modal time series analysis is domain generalization, which enables a model trained on one or more source domains to effectively generalize to unseen target domains, ensuring robustness against distribution shifts. In multi-modal time series, distribution shifts can be multifaceted, stemming not only from time series, but also from other modalities. Therefore, it is crucial to develop specialized domain generalization methods for effective multi-modal time series analysis, including strategies to identify and preserve domain-invariant components across modalities while capturing modality-specific variations for rapid adaptation. Additionally, disentangling the effects of each modality is essential to better understand their individual contributions and mitigate cross-modal interference.", + "bbox": [ + 517, + 252, + 913, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Robustness to Missing and Noisy Modalities. Multi-modal time series analysis often frequently encounters messy real-world contexts with incomplete or noisy data. Existing methods employ an iterative context-refinement algorithm [31] that filters out less relevant information, thereby enhancing the predictive insights derived from multi-modal time series. Nonetheless, effectively dealing with missing and noisy modalities still demands further exploration. In particular, developing strategies for modality-specific imputation, noise reduction, and relevance quantification will be crucial to improving the real-world applicability of existing multi-modal time series methods.", + "bbox": [ + 517, + 435, + 913, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ethical Considerations and Bias Mitigation. In light of potential biases in multi-modal time series datasets, future research should integrate fairness-aware techniques, such as fairness constraints, counterfactual analysis, and adversarial debiasing. These methods should be combined with robust bias assessment frameworks to systematically detect and mitigate inequities, ensuring outcomes that are both equitable and socially responsible.", + "bbox": [ + 517, + 590, + 911, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 517, + 704, + 637, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this survey, we provide a comprehensive overview of existing multi-modal time series methods. We first discuss the multi-modal time series used in existing methods. Then, we propose a taxonomy based on cross-modal interactions between time series and other modalities. The existing methods are categorized and summarized accordingly. We also discuss the real-world applications and highlight future research directions in this promising area.", + "bbox": [ + 517, + 722, + 913, + 819 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 517, + 835, + 668, + 851 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This research was supported in part by the National Science Foundation (NSF) CAREER IIS-2338878, as well as by generous research gifts from NEC Labs America Inc. and Morgan Stanley.", + "bbox": [ + 517, + 854, + 913, + 895 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Jiang, et al.", + "bbox": [ + 856, + 75, + 911, + 85 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 104, + 176, + 119 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. 2018. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv:1809.00496 [cs.CV] https://arxiv.org/abs/1809.00496", + "[2] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence 41, 2 (2018), 423-443.", + "[3] Tom Bamford, Andrea Coletta, Elizabeth Fons, Sriram Gopalakrishnan, Svitlana Vyetrenko, Tucker Balch, and Manuela Veloso. 2023. Multi-Modal Financial Time-Series Retrieval Through Latent Space Projections. In Proceedings of the Fourth ACM International Conference on AI in Finance (Brooklyn, NY, USA) (ICAIF '23). Association for Computing Machinery, New York, NY, USA, 498-506. doi:10.1145/3604237.3626901", + "[4] Dhananjay Bhattacharya, Nayan K Sharma, Debottam Dutta, Srikanth R Chetupalli, Prashant Mote, Sriram Ganapathy, Jyothi Bhat, Shreyas Ramoji, Pravin Ghosh, Aswin Subramanian, et al. 2023. Coswara: a respiratory sounds and symptoms dataset for remote screening of SARS-CoV-2 infection. Scientific Data 10, 1 (2023), 397.", + "[5] Nimeesha Chan, Felix Parker, William Bennett, Tianyi Wu, Mung Yao Jia, James Fackler, and Kimia Ghobadi. 2024. Medtsllm: Leveraging llms for multimodal medical time series analysis. arXiv preprint arXiv:2408.07773 (2024).", + "[6] Sameep Chattopadhyay, Pulkit Paliwal, Sai Shankar Narasimhan, Shubhankar Agarwal, and Sandeep P. Chinchali. 2025. Context Matters: Leveraging Contextual Features for Time Series Forecasting. arXiv:2410.12672 [cs.LG] https://arxiv.org/abs/2410.12672", + "[7] Shengyu Chen, Yiqun Xie, Xiang Li, Xu Liang, and Xiaowei Jia. 2023. Physics-guided meta-learning method in baseflow prediction over large regions. In Proceedings of the 2023 SIAM International Conference on Data Mining (SDM). SIAM, 217-225.", + "[8] Shengyu Chen, Jacob A Zwart, and Xiaowei Jia. 2022. Physics-guided graph meta learning for predicting water temperature and streamflow in stream networks. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 2752-2761.", + "[9] Si-An Chen, Chun-Liang Li, Sercan O Arik, Nathanael Christian Yoder, and Tomas Pfister. 2023. TSMixer: An All-MLP Architecture for Time Series Forecasting. Transactions on Machine Learning Research (2023).", + "[10] Wei Chen, Xixuan Hao, Yuankai Wu, and Yuxuan Liang. 2024. Terra: A Multimodal Spatio-Temporal Dataset Spanning the Earth. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 66329-66356. https://proceedings.neurips.cc/paper_files/paper/2024/file/7a6a7fbd1ee0c9684b3f919f79d129ef-Paper-Datasets_and_Benchmarks_Track.pdf", + "[11] Zihan Chen, Lei Nico Zheng, Cheng Lu, Jialu Yuan, and Di Zhu. 2023. ChatGPT Informed Graph Neural Network for Stock Movement Prediction. Available at SSRN 4464002 (2023).", + "[12] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, Yuong Luo, and Enhong Chen. 2025. InstrucTime: Advancing Time Series Classification with Multimodal Language Modeling. In Proceedings of the Eighteenth ACM International Conference on Web Search and Data Mining (Hannover, Germany) (WSDM '25). Association for Computing Machinery, New York, NY, USA, 792-800. doi:10.1145/3701551.3703499", + "[13] Joon Son Chung, Arsha Nagrani, and Andrew Zisserman. 2018. VoxCeleb2: Deep Speaker Recognition. In Interspeech 2018 (Interspeech Proceedings). ISCA. doi:10.21437/Interspeech.2018-1929", + "[14] Helena Cousijn, Patricia Feeney, Daan Lowenberg, Elisa Presani, and Natasha Simons. 2019. A data citation roadmap for scholarly data repositories. *Scientific Data* 6, 1 (2019), 28.", + "[15] Mayank Daswani, Mathias MJ Bellaiche, Marc Wilson, Desislav Ivanov, Mikhail Papkov, Eva Schnider, Jing Tang, Kay Lamerigts, Gabriela Botea, Michael A Sanchez, et al. 2024. Plots Unlock Time-Series Understanding in Multimodal Models. arXiv preprint arXiv:2410.02637 (2024).", + "[16] Iman Deznabi, Mohit Iyyer, and Madalina Fiterau. 2021. Predicting in-hospital mortality by combining clinical notes with time-series data. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, Online, 4026-4031. doi:10.18653/v1/2021-findings-acl.352", + "[17] Zihan Dong, Xinyu Fan, and Zhiyuan Peng. 2024. FNSPID: A Comprehensive Financial News Dataset in Time Series. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (Barcelona, Spain) (KDD '24). Association for Computing Machinery, New York, NY, USA, 4918-4927. doi:10.1145/3637528.3671629", + "[18] Vijay Ekambaram, Kushagra Manglik, Sumanta Mukherjee, Surya Shravan Kumar Sajja, Satyam Dwivedi, and Vikas Raykar. 2020. Attention based multimodal new product sales time-series forecasting. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining. 3110-3118." + ], + "bbox": [ + 91, + 123, + 482, + 887 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[19] Jie Feng, Yuwei Du, Tianhui Liu, Siqi Guo, Yuming Lin, and Yong Li. 2024. Citygpt: Empowering urban spatial cognition of large language models. arXiv preprint arXiv:2406.13948 (2024).", + "[20] Stefan Feuerriegel, Dennis Frauen, Valentyn Melnychuk, Jonas Schweisthal, Konstantin Hess, Alicia Curth, Stefan Bauer, Niki Kilbertus, Isaac S Kohane, and Mihaela van der Schaar. 2024. Causal machine learning for predicting treatment outcomes. Nature Medicine 30, 4 (2024), 958-968.", + "[21] Mohammad Fraiwan, Luay Fraiwan, Basheer Khassawneh, and Ali Ibnian. 2021. A dataset of lung sounds recorded from the chest wall using an electronic stethoscope. Data in Brief 35, 106913. doi:10.1016/j.dib.2021.106913", + "[22] Alessandro T. Gifford, Kshitij Dwivedi, Gemma Roig, and Radoslaw M. Cichy. 2022. A large and rich EEG dataset for modeling human visual object recognition. NeuroImage 264 (2022), 119754. doi:10.1016/j.neuroimage.2022.119754", + "[23] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In Proceedings of the AAAI conference on artificial intelligence, Vol. 33, 922-929.", + "[24] Xusen Guo, Qiming Zhang, Junyue Jiang, Mingxing Peng, Meixin Zhu, and Hao Frank Yang. 2024. Towards explainable traffic flow prediction with large language models. Communications in Transportation Research 4 (2024), 100150.", + "[25] Paul Hager, Martin J Menten, and Daniel Rueckert. 2023. Best of both worlds: Multimodal contrastive learning with tabular and imaging data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 23924-23935.", + "[26] Nora Hollenstein, Marius Troendle, Ce Zhang, and Nicolas Langer. 2020. ZuCo 2.0: A Dataset of Physiological Recordings During Natural Reading and Annotation. In Proceedings of the Twelfth Language Resources and Evaluation Conference, Nicoletta Calzolari, Frédéric Béchet, Philippe Blache, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, and Stelios Piperidis (Eds.). European Language Resources Association, Marseille, France, 138-146. https://aclanthology.org/2020.lrec-1.18/", + "[27] Shi Bin Hoo, Samuel Müller, David Salinas, and Frank Hutter. 2025. The tabular foundation model TabPFN outperforms specialized time series forecasting models based on simple features. arXiv preprint arXiv:2501.02945 (2025).", + "[28] Kexin Huang, Jaan Altsoaar, and Rajesh Ranganath. 2020. Clinical-BERT: Modeling Clinical Notes and Predicting Hospital Readmission. arXiv:1904.05342 [cs.CL] https://arxiv.org/abs/1904.05342", + "[29] Jiahao Ji, Jingyuan Wang, Chao Huang, Junjie Wu, Boren Xu, Zhenhe Wu, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal self-supervised learning for traffic flow prediction. In Proceedings of the AAAI conference on artificial intelligence, Vol. 37. 4356-4364.", + "[30] Furong Jia, Kevin Wang, Yixiang Zheng, Defu Cao, and Yan Liu. 2024. GPT4MTS: Prompt-based Large Language Model for Multimodal Time-series Forecasting. Proceedings of the AAI Conference on Artificial Intelligence 38, 21 (Mar. 2024), 23343-23351. doi:10.1609/aaaai.v38i21.30383", + "[31] Yushan Jiang, Wenchao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2025. Explainable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv:2503.01013 [cs.LG] https://arxiv.org/abs/2503.01013", + "[32] Bo Jin, Haoyu Yang, Leilei Sun, Chuanren Liu, Yue Qu, and Jianing Tong. 2018. A treatment engine by predicting next-period prescriptions. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 1608-1616.", + "[33] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y. Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, and Qingsong Wen. 2024. Time-LLM: Time Series Forecasting by Reprogramming Large Language Models. In The Twelfth International Conference on Learning Representations. https://openreview.net/forum?id=Unb5CVPtae", + "[34] Alistair Johnson, Lucas Bulgarelli, Tom Pollard, Steven Horng, Leo Anthony Celi, and Roger Mark. 2021. MIMIC-IV (version 1.0). doi:10.13026/s6n6-xd98", + "[35] Alistair E. W. Johnson, Tom J. Pollard, Lu Shen, Li-wei H. Lehman, Mengling Feng, Mohammad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G. Mark. 2016. MIMIC-III, a freely accessible critical care database. Scientific Data 3 (2016), 160035.", + "[36] Swaraj Khadanga, Karan Aggarwal, Shafiq Joty, and Jaideep Srivastava. 2019. Using Clinical Notes with Time Series Data for ICU Management. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IFCNLP). Association for Computational Linguistics, Hong Kong, China, 6432-6437. doi:10.18653/v1/D19-1678", + "[37] Kai Kim, Howard Tsai, Rajat Sen, Abhimanyu Das, Zihao Zhou, Abhishek Tanpure, Mathew Luo, and Rose Yu. 2024. Multi-Modal Forecaster: Jointly Predicting Time Series and Textual Data. arXiv:2411.06735 [cs.AI] https://arxiv.org/abs/2411.06735", + "[38] Yaxuan Kong, Yiyuan Yang, Yoontae Hwang, Wenjie Du, Stefan Zohren, Zhangyang Wang, Ming Jin, and Qingsong Wen. 2025. Time-MQA: Time Series Multi-Task Question Answering with Context Enhancement." + ], + "bbox": [ + 522, + 109, + 913, + 893 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Multi-modal Time Series Analysis: A Tutorial and Survey", + "bbox": [ + 84, + 75, + 354, + 85 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "arXiv:2503.01875 [cs.CL] https://arxiv.org/abs/2503.01875", + "[39] Yaxuan Kong, Yiyuan Yang, Shiyu Wang, Chenghao Liu, Yuxuan Liang, Ming Jin, Stefan Zohren, Dan Pei, Yan Liu, and Qingsong Wen. 2025. Position: Empowering Time Series Reasoning with Multimodal LLMs. arXiv preprint arXiv:2502.01477 (2025).", + "[40] Dongjie Wang Chengyuan Deng Reon Matsuoka Lecheng Zheng, Zhengzhang Chen and Haifeng Chen. 2024. LEMMA-RCA: A Large Multi-modal Multi-domain Dataset for Root Cause Analysis. arXiv:2406.05375 [cs.AI]", + "[41] Geon Lee, Wenchao Yu, Wei Cheng, and Haifeng Chen. 2024. MoAT: Multi-Modal Augmented Time Series Forecasting. https://openreview.net/forum?id=uRXxnoqDHH", + "[42] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. In AAAI.", + "[43] Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan So, and Jaewoo Kang. 2019. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics (Oxford, England) 36 (09 2019). doi:10.1093/bioinformatics/btz682", + "[44] Haoran Li, Junqi Liu, Zexian Wang, Shiuyuan Luo, Xiaowei Jia, and Huaxiu Yao. 2024. LITE: Modeling Environmental Ecosystems with Multimodal Large Language Models. arXiv preprint arXiv:2404.01165 (2024).", + "[45] Jun Li, Che Liu, Sibo Cheng, Rossella Arcucci, and Shenda Hong. 2023. Frozen Language Model Helps ECG Zero-Shot Learning. In Medical Imaging with Deep Learning.", + "[46] Zekun Li, Shiyang Li, and Xifeng Yan. 2023. Time series as images: Vision transformer for irregularly sampled time series. Advances in Neural Information Processing Systems 36 (2023), 49187-49204.", + "[47] Zihao Li, Xiao Lin, Zhining Liu, Jiaru Zou, Ziwei Wu, Lecheng Zheng, Dongqi Fu, Yada Zhu, Hendrik Hamann, Hanghang Tong, and Jingrui He. 2025. Language in the Flow of Time: Time-Series-Paired Texts Weaved into a Unified Temporal Narrative. arXiv:2502.08942 [cs.LG] https://arxiv.org/abs/2502.08942", + "[48] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. UrbanGPT: Spatio-Temporal Large Language Models. ArXiv abs/2403.00813 (2024). https://api(semanticscholar.org/CorpusID:268230972", + "[49] Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency. 2024. Foundations & trends in multimodal machine learning: Principles, challenges, and open questions. Comput. Surveys 56, 10 (2024), 1-42.", + "[50] Minhua Lin, Zhengzhang Chen, Yanchi Liu, Xujiang Zhao, Zongyu Wu, Junxiang Wang, Xiang Zhang, Suhang Wang, and Haifeng Chen. 2024. Decoding Time Series with LLMs: A Multi-Agent Framework for Cross-Domain Annotation. arXiv:2410.17462 [cs.AI] https://arxiv.org/abs/2410.17462", + "[51] Chenxi Liu, Qianxiong Xu, Hao Miao, Sun Yang, Lingzheng Zhang, Cheng Long, Ziyue Li, and Rui Zhao. 2025. TimeCMA: Towards LLM-Empowered Multivariate Time Series Forecasting via Cross-Modality Alignment. In AAAI.", + "[52] Hanwen Liu, Daniel Hajialigol, Benny Antony, Aiguo Han, and Xuan Wang. 2024. EEG2Text: Open Vocabulary EEG-to-Text Translation with Multi-View Transformer. In 2024 IEEE International Conference on Big Data (BigData). IEEE Computer Society, Los Alamitos, CA, USA, 1824-1833. doi:10.1109/ BigData62323.2024.10825980", + "[53] Haoxin Liu, Shangqing Xu, Zhiyuan Zhao, Lingkai Kong, Harshavardhan Kamarthi, Aditya B. Sasanur, Megha Sharma, Jiaming Cui, Qingsong Wen, Chao Zhang, and B. Aditya Prakash. 2024. Time-MMD: Multi-Domain Multimodal Dataset for Time Series Analysis. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. https://openreview.net/forum?id=fuD0h4R1IL", + "[54] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024).", + "[55] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. UniTime: A Language-Empowered Unified Model for Cross-Domain Time Series Forecasting. In Proceedings of the ACM Web Conference 2024 (Singapore, Singapore) (WWW'24). Association for Computing Machinery, New York, NY, USA, 4095-4106. doi:10.1145/3589334.3645434", + "[56] Xin Liu, Daniel McDuff, Geza Kovacs, Isaac Galatzer-Levy, Jacob Sunshine, Jiening Zhan, Ming-Zher Poh, Shun Liao, Paolo Di Achille, and Shwetak Patel. 2023. Large Language Models are Few-Shot Health Learners. arXiv preprint arXiv:2305.15525 (2023). https://arxiv.org/abs/2305.15525", + "[57] Jingchao Ni, Ziming Zhao, ChengAo Shen, Hanghang Tong, Dongjin Song, Wei Cheng, Dongsheng Luo, and Haifeng Chen. 2025. Harnessing Vision Models for Time Series Analysis: A Survey. arXiv preprint arXiv:2502.08869 (2025).", + "[58] Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. 2023. A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. In The Eleventh International Conference on Learning Representations.", + "[59] Kanghui Ning, Zijie Pan, Yu Liu, Yushan Jiang, James Y. Zhang, Kashif Rasul, Anderson Schneider, Lintao Ma, Yuriy Nevmvaka, and Dongjin Song. 2025. TS-RAG: Retrieval-Augmented Generation based Time Series Foundation Models are Stronger Zero-Shot Forecaster. arXiv:2503.07649 [cs.LG] https://arxiv.org/" + ], + "bbox": [ + 91, + 109, + 480, + 895 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "abs/2503.07649", + "[60] K. Niu, K. Zhang, X. Peng, Y. Pan, and N. Xiao. 2023. Deep Multi-Modal Intermediate Fusion of Clinical Record and Time Series Data in Mortality Prediction. Frontiers in Molecular Biosciences 10 (2023), 1136071. doi:10.3389/fmolb.2023.1136071", + "[61] Zijie Pan, Yushan Jiang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. $S^2$ IP-LLM: Semantic Space Informed Prompt Learning with LLM for Time Series Forecasting. In Forty-first International Conference on Machine Learning.", + "[62] Vinay Prithyani, Mohsin Mohammed, Richa Gadgil, Ricardo Buitrago, Vinija Jain, and Aman Chadha. 2024. On the Feasibility of Vision-Language Models for Time-Series Classification. arXiv preprint arXiv:2412.17304 (2024).", + "[63] Yao Qin, Dongjin Song, Haifeng Cheng, Wei Cheng, Guofei Jiang, and Garrison W Cottrell. 2017. A dual-stage attention-based recurrent neural network for time series prediction. In Proceedings of the 26th International Joint Conference on Artificial Intelligence, 2627-2633.", + "[64] Hadi Rezaei, Hamidreza Faaljou, and Gholamreza Mansourfar. 2021. Stock price prediction using deep learning and frequency decomposition. Expert Systems with Applications 169 (2021), 114332.", + "[65] Bruno M Rocha, Dimitris Filos, Luis Mendes, Gorkem Serbes, Sezer Ulukaya, Yasemin P Kahya, Niksa Jakovljevic, Tatjana L Turukalo, Ioannis M Vogiatzis, Eleni Perantoni, Evangelos Kaimakamis, Pantelis Natsivas, Ana Oliveira, Cristina Jacome, Alda Marques, Nicos Maglaveras, Rui Pedro Paiva, Ioanna Chouvarda, and Paulo de Carvalho. 2019. An open access database for the evaluation of respiratory sound classification algorithms. Physiological Measurement 40, 3 (2019), 035001.", + "[66] Ludan Ruan, Yiyang Ma, Huan Yang, Huiguo He, Bei Liu, Jianlong Fu, Nicholas Jing Yuan, Qin Jin, and Baining Guo. 2023. Mm-diffusion: Learning multi-modal diffusion models for joint audio and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10219-10228.", + "[67] Ramit Sawhney, Shivam Agarwal, Arnav Wadhwa, and Rajiv Ratn Shah. 2020. Deep Attentive Learning for Stock Movement Prediction From Social Media Text and Company Correlations. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, Online, 8415-8426. doi:10.18653/v1/2020.emnlp-main.676", + "[68] ChengAo Shen, Zhengzhang Chen, Dongsheng Luo, Dongkuan Xu, Haifeng Chen, and Jingchao Ni. 2024. Exploring Multi-Modal Integration with Tool-Augmented LLM Agents for Precise Causal Discovery. arXiv:2412.13667 [cs.LG] https://arxiv.org/abs/2412.13667", + "[69] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. 2022. Learning Audio-Visual Speech Representation by Masked Multimodal Cluster Prediction. In International Conference on Learning Representations. https://openreview.net/forum?id=Z1Qlm11uOM", + "[70] Geri Skenderi, Christian Joppi, Matteo Denitto, and Marco Cristani. 2024. Well googled is half done: Multimodal forecasting of new fashion product sales with image-based google trends. Journal of Forecasting 43, 6 (2024), 1982-1997.", + "[71] Patrick Wagner, Nils Strothhoff, Ralf-Dieter Bousseljot, Dieter Kreiseler, Fatima I Lunze, Wojciech Samek, and Tobias Schaeffer. 2020. PTB-XL, a large publicly available electrocardiography dataset. Scientific Data 7, 1 (2020), 154.", + "[72] Jiahao Wang, Mingyue Cheng, Qingyang Mao, Yitong Zhou, Feiyang Xu, and Xin Li. 2025. TableTime: Reformulating Time Series Classification as Training-Free Table Understanding with Large Language Models. arXiv:2411.15737 [cs.AI] https://arxiv.org/abs/2411.15737", + "[73] Xinlei Wang, Maike Feng, Jing Qiu, JINJIN GU, and Junhua Zhao. 2024. From News to Forecast: Integrating Event Analysis in LLM-Based Time Series Forecasting with Reflection. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 58118-58153. https://proceedings.neurips.cc/paper_files/paper/2024/file/6aef8bffb372096ee73d98da30119f89-Paper-Conference.pdf", + "[74] Xiaochen Wang, Junyu Luo, Jiaqi Wang, Ziyi Yin, Suhan Cui, Yuan Zhong, Yaqing Wang, and Fenglong Ma. 2023. Hierarchical Pretraining on Multimodal Electronic Health Records. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, Singapore, 2839-2852. doi:10.18653/v1/2023.emnlp-main.171", + "[75] Zhenhailong Wang and Heng Ji. 2021. Open Vocabulary Electroencephalography-To-Text Decoding and Zero-shot Sentiment Classification. In AAAI Conference on Artificial Intelligence. https://apisemantic scholar.org/CorpusID:244909027", + "[76] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models (NIPS '22). Curran Associates Inc., Red Hook, NY, USA, Article 1800, 14 pages." + ], + "bbox": [ + 522, + 109, + 911, + 875 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Jiang, et al.", + "bbox": [ + 856, + 75, + 911, + 85 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[77] Andrew Robert Williams, Arjun Ashok, Étienne Marcotte, Valentina Zantedeschi, Jithendarraa Subramanian, Roland Riachi, James Requeima, Alexandre Lacoste, Irina Rish, Nicolas Chapados, et al. 2024. Context is key: A benchmark for forecasting with essential textual information. arXiv preprint arXiv:2410.18959 (2024).", + "[78] Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. 2023. TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis. In The Eleventh International Conference on Learning Representations.", + "[79] Huizhe Wu, Wei Zhang, Weiwei Shen, and Jun Wang. 2018. Hybrid Deep Sequential Modeling for Social Text-Driven Stock Prediction. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (Torino, Italy) (CIKM '18). Association for Computing Machinery, New York, NY, USA, 1627–1630. doi:10.1145/3269206.3269290", + "[80] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, 753-763.", + "[81] Qianqian Xie, Weiguang Han, Yanzhao Lai, Min Peng, and Jimin Huang. 2023. The wall street neophyte: A zero-shot analysis of chatgpt over multimodal stock movement prediction challenges. arXiv preprint arXiv:2304.05351 (2023).", + "[82] Zhikai Xing and Yigang He. 2023. Multi-modal information analysis for fault diagnosis with time-series data from power transformer. International Journal of Electrical Power & Energy Systems 144 (2023), 108567. doi:10.1016/j.ijepes.2022.108567", + "[83] Haojun Xu, Yan Gao, Zheng Hui, Jie Li, and Xinbo Gao. 2023. Language Knowledge-Assisted Representation Learning for Skeleton-Based Action Recognition. arXiv:2305.12398 [cs.CV] https://arxiv.org/abs/2305.12398", + "[84] Yumo Xu and Shay B. Cohen. 2018. Stock Movement Prediction from Tweets and Historical Prices. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Iryna Gurevych and Yusuke Miyao (Eds.). Association for Computational Linguistics, Melbourne, Australia, 1970-1979. doi:10.18653/v1/P18-1183", + "[85] Bo Yang and Lijun Wu. 2021. How to Leverage the Multimodal EHR Data for Better Medical Prediction?. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics, 4029-4038. doi:10.18653/v1/2021.emnlp-main.329", + "[86] Haiyang Yang, Li Kuang, and FengQiang Xia. 2021. Multimodal Temporal-Clinical Note Network for Mortality Prediction. Journal of Biomedical Semantics 12, 1 (2021), 1-14. doi:10.1186/s13326-021-00235-3", + "[87] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. 2023. Tree of Thoughts: Deliberate Problem Solving with Large Language Models. In Thirty-seventh Conference on Neural Information Processing Systems. https://openreview.net/forum?id=5Xc1ecxO1h", + "[88] Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. 2024. Frequency-domain MLPs are more effective learners in time series forecasting. Advances in Neural Information Processing Systems 36 (2024).", + "[89] Xinli Yu, Zheng Chen, and Yanbin Lu. 2023. Harnessing LLMs for Temporal Data - A Study on Explainable Financial Time Series Forecasting. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, Mingxuan Wang and Imed Zitouni (Eds.). Association for Computational Linguistics, Singapore, 739-753. doi:10.18653/v1/2023.emnlp-industry.69", + "[90] Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. 2023. SpeechGPT: Empowering Large Language Models with Intrinsic Cross-Modal Conversational Abilities. arXiv:2305.11000 [cs.CL] https://arxiv.org/abs/2305.11000", + "[91] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024).", + "[92] Liheng Zhang, Charu Aggarwal, and Guo-Jun Qi. 2017. Stock price prediction via discovering multi-frequency trading patterns. In Proceedings of the 23rd ACM SIGKDD international conference on knowledge discovery and data mining. 2141-2149.", + "[93] Xiyue Zhang, Chao Huang, Yong Xu, Lianghao Xia, Peng Dai, Liefeng Bo, Junbo Zhang, and Yu Zheng. 2021. Traffic flow forecasting with spatial-temporal graph diffusion network. In Proceedings of the AAAI conference on artificial intelligence, Vol. 35. 15008-15015.", + "[94] Xiang Zhang, Lina Yao, Manqing Dong, Zhe Liu, Yu Zhang, and Yong Li. 2020. Adversarial representation learning for robust patient-independent epileptic seizure detection. IEEE journal of biomedical and health informatics 24, 10 (2020), 2852-2859.", + "[95] Yuwei Zhang, Tong Xia, Aaqib Saeed, and Cecilia Mascolo. 2024. RespLLM: Unifying Audio and Text with Multimodal LLMs for Generalized Respiratory Health Prediction. arXiv:2410.05361 [cs.LG] https://arxiv.org/abs/2410.05361", + "[96] Xiaohu Zhao, Kebin Jia, Benjamin Letcher, Jennifer Fair, Yiqun Xie, and Xiaowei Jia. 2022. VIMTS: Variational-based Imputation for Multi-modal Time Series. In 2022 IEEE International Conference on Big Data (Big Data). IEEE, 349-358." + ], + "bbox": [ + 89, + 108, + 483, + 883 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[97] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. MU-LAN: multi-modal causal structure learning and root cause analysis for microservice systems. In Proceedings of the ACM Web Conference 2024. 4107-4116.", + "[98] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv:2502.04395 [cs.CV] https://arxiv.org/abs/2502.04395", + "[99] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024).", + "[100] Jiaxin Zhuang, Leon Yan, Zhenwei Zhang, Ruiqi Wang, Jiawei Zhang, and Yuantao Gu. 2024. See it, Think it, Sorted: Large Multimodal Models are Few-shot Time Series Anomaly Analyzers. arXiv preprint arXiv:2411.02465 (2024)." + ], + "bbox": [ + 519, + 108, + 913, + 231 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Multi-modal Time Series Analysis: A Tutorial and Survey", + "bbox": [ + 84, + 74, + 354, + 85 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_model.json b/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8577248436f74ccf805e904ca3f7801fe1d52dc4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_model.json @@ -0,0 +1,2763 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.261, + 0.058, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.13709v1 [cs.LG] 17 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.135, + 0.102, + 0.862, + 0.127 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.138, + 0.892, + 0.175 + ], + "angle": 0, + "content": "Yushan Jiang\\(^{1*}\\), Kanghui Ning\\(^{1*}\\), Zijie Pan\\(^{1*}\\), Xuyang Shen\\(^{1}\\), Jingchao Ni\\(^{2}\\), Wenchao Yu\\(^{4}\\), Anderson Schneider\\(^{3}\\), Haifeng Chen\\(^{4\\dagger}\\), Yuriy Nevmyvaka\\(^{3\\dagger}\\), Dongjin Song\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.325, + 0.174, + 0.675, + 0.189 + ], + "angle": 0, + "content": "1University of Connecticut 2University of Houston" + }, + { + "type": "text", + "bbox": [ + 0.342, + 0.189, + 0.657, + 0.205 + ], + "angle": 0, + "content": "\\(^{3}\\)Morgan Stanley \\(^{4}\\)NEC Laboratories America" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.215, + 0.158, + 0.228 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.233, + 0.484, + 0.538 + ], + "angle": 0, + "content": "Multi-modal time series analysis has recently emerged as a prominent research area in data mining, driven by the increasing availability of diverse data modalities, such as text, images, and structured tabular data from real-world sources. However, effective analysis of multi-modal time series is hindered by data heterogeneity, modality gap, misalignment, and inherent noise. Recent advancements in multi-modal time series methods have exploited the multi-modal context via cross-modal interactions based on deep learning methods, significantly enhancing various downstream tasks. In this tutorial and survey, we present a systematic and up-to-date overview of multi-modal time series datasets and methods. We first state the existing challenges of multi-modal time series analysis and our motivations, with a brief introduction of preliminaries. Then, we summarize the general pipeline and categorize existing methods through a unified cross-modal interaction framework encompassing fusion, alignment, and transference at different levels (i.e., input, intermediate, output), where key concepts and ideas are highlighted. We also discuss the real-world applications of multi-modal analysis for both standard and spatial time series, tailored to general and specific domains. Finally, we discuss future research directions to help practitioners explore and exploit multi-modal time series. The up-to-date resources are provided in the GitHub repository1." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.55, + 0.17, + 0.566 + ], + "angle": 0, + "content": "Keywords" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.568, + 0.483, + 0.597 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis, Foundation Model, Large Language Model, Deep Learning" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.611, + 0.219, + 0.625 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.629, + 0.483, + 0.739 + ], + "angle": 0, + "content": "Time series analysis is a fundamental task in data mining, driven by the proliferation of sequential data exhibiting rich temporal dynamics across diverse real-world systems. With the advent of deep learning, various methods have been proposed to effectively model complex temporal relationships within time series [9, 58, 61, 63, 78, 80, 88], facilitating downstream tasks in diverse domains, including healthcare [20, 32, 94], finance [64, 92], transportation [23, 29, 93] and environmental sciences [7, 8]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.74, + 0.483, + 0.851 + ], + "angle": 0, + "content": "In practice, time series are often associated with external contexts beyond their temporal dynamics [6, 77]. Such contexts are multi-modal, encompassing a variety of representations, such as texts [41, 73], images [18, 70], tables [6], and graphs [67], which carry rich semantic information for time series analysis. As such, incorporating the multi-modal contexts allows models to have a comprehensive view of underlying systems, capture subtle dependencies, and explain complex temporal behaviors more accurately." + }, + { + "type": "title", + "bbox": [ + 0.594, + 0.214, + 0.842, + 0.23 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis" + }, + { + "type": "title", + "bbox": [ + 0.674, + 0.231, + 0.758, + 0.244 + ], + "angle": 0, + "content": "Background" + }, + { + "type": "text", + "bbox": [ + 0.58, + 0.248, + 0.853, + 0.261 + ], + "angle": 0, + "content": "Challenges, Our Motivations, Preliminaries, etc." + }, + { + "type": "title", + "bbox": [ + 0.616, + 0.271, + 0.821, + 0.285 + ], + "angle": 0, + "content": "Data, Methods & Applications" + }, + { + "type": "text", + "bbox": [ + 0.62, + 0.292, + 0.799, + 0.304 + ], + "angle": 0, + "content": "Multi-modal Time Series Datas" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.306, + 0.864, + 0.317 + ], + "angle": 0, + "content": "1. Modalities: Time Series, Text, Image, Tabular, Graph, etc." + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.318, + 0.848, + 0.329 + ], + "angle": 0, + "content": "2. Scope, Existing Datasets, Characteristics, Domain, etc." + }, + { + "type": "list", + "bbox": [ + 0.54, + 0.306, + 0.864, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.581, + 0.34, + 0.854, + 0.352 + ], + "angle": 0, + "content": "Taxonomy of Multi-modal Time Series Methods" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.354, + 0.794, + 0.366 + ], + "angle": 0, + "content": "1. Interaction Stage (Input, Intermediate, Output)" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.367, + 0.828, + 0.378 + ], + "angle": 0, + "content": "2. Interaction Strategy (Fusion, Alignment, Transference)" + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.378, + 0.891, + 0.39 + ], + "angle": 0, + "content": "3. Specific Methods (Concatenate, Attention, Contrastive, Gating, etc.)" + }, + { + "type": "list", + "bbox": [ + 0.54, + 0.354, + 0.891, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.667, + 0.401, + 0.767, + 0.412 + ], + "angle": 0, + "content": "Domains & Tasks" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.414, + 0.856, + 0.425 + ], + "angle": 0, + "content": "1. General, Finance, Healthcare, Traffic, Environment, etc." + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.426, + 0.874, + 0.438 + ], + "angle": 0, + "content": "2. Forecasting, Classification, Causal Discovery, Retrieval, etc." + }, + { + "type": "list", + "bbox": [ + 0.54, + 0.414, + 0.874, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.624, + 0.45, + 0.806, + 0.463 + ], + "angle": 0, + "content": "Future Research Directions" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.465, + 0.895, + 0.477 + ], + "angle": 0, + "content": "Reasoning, Decision Making, Generalization, Contextual Noise," + }, + { + "type": "text", + "bbox": [ + 0.676, + 0.478, + 0.751, + 0.489 + ], + "angle": 0, + "content": "Bias & Ethics" + }, + { + "type": "image_caption", + "bbox": [ + 0.54, + 0.512, + 0.887, + 0.526 + ], + "angle": 0, + "content": "Figure 1: The framework of our tutorial and survey." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.55, + 0.916, + 0.827 + ], + "angle": 0, + "content": "Effective analysis of multi-modal time series, however, is hindered by several key challenges in terms of data heterogeneity, modality gap and contextual relevance. First, different modalities exhibit distinct statistical properties, structures, and dimensionalities, leading to discrepancies in feature distributions and semantic meanings. For instance, while time series data is sequentially ordered with temporal dependencies, textual and image data contains rich contextual semantics and correlations. Aligning these heterogeneous data into a unified representation space is non-trivial. Second, the textual, tabular, or visual contexts may appear at different timesteps or granularities. Such temporal misalignment may impede meaningful cross-modal interactions. Third, real-world data is inevitably noisy with irrelevant information that may mislead correlation learning, resulting in suboptimal performance. For example, in finance, news articles related to stock market prediction often contain much redundant or speculative narratives that does not reflect actual market conditions. Therefore, the focus of multi-modal time series analysis is to effectively capture complementary and relevant information from multi-modal context and leverage it for predictive or analytical tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.827, + 0.915, + 0.897 + ], + "angle": 0, + "content": "More recently, an increasing number of multi-modal methods have shown promise in exploiting contextual information from diverse data sources, which boosts performance in wide tasks ranging from forecasting [41, 51], classification [42, 45], anomaly detection [82] to retrieval [3] and causal discovery [68, 97]. Despite" + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.862, + 0.483, + 0.884 + ], + "angle": 0, + "content": "* equal contribution.† Yuriy Nevmyvaka, Haifeng Chen and Dongjin Song are the corresponding authors." + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.884, + 0.407, + 0.896 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/UCconn-DSIS/Multi-modal-Time-Series-Analysis" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.862, + 0.483, + 0.896 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.857, + 0.076, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Jiang, et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.108, + 0.482, + 0.384 + ], + "angle": 0, + "content": "the promising results of multi-modal time series methods, they are tailored for their own tasks with domain-specific applications. The existing literature lacks a comprehensive and systematic review that provides a unified perspective on the underlying principles and pipelines for multi-modal time series learning. In this survey, we provide a systematic and up-to-date overview of existing methods for multi-modal time series analysis. As shown in Figure 1, we discuss the challenges, motivations, and preliminaries of multi-modal time series. Then we introduce the general pipeline for multi-modal time series analysis and propose three types of interactions for cross-modal modeling between time series and other modalities - fusion, alignment, and transference - at the input, intermediate and output level, respectively. We also discuss the applications of multi-modal time series across multiple domains. Furthermore, we provide Table 2 to comprehensively summarize representative methods, encapsulating the modalities, fine-grained cross-modal interactions, real-world domains and tasks. Finally, we highlight potential future research opportunities to further advance time series analysis with multi-modal data. In summary, the major contributions of our survey are:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.387, + 0.483, + 0.414 + ], + "angle": 0, + "content": "- We systematically catalog over 40 multi-modal time series methods with the corresponding open-source datasets." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.415, + 0.483, + 0.455 + ], + "angle": 0, + "content": "- We uniquely categorize the existing methods into a unified cross-modal interaction framework, highlighting fusion, alignment, and transference at the input/intermediate/output levels." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.457, + 0.482, + 0.497 + ], + "angle": 0, + "content": "- We discuss real-world applications of multi-modal time series and identify promising future directions, encouraging researchers and practitioners to explore and exploit multi-modal time series." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.387, + 0.483, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.51, + 0.336, + 0.526 + ], + "angle": 0, + "content": "2 Background and Our Scope" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.529, + 0.383, + 0.545 + ], + "angle": 0, + "content": "2.1 Multi-modal Machine Learning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.548, + 0.483, + 0.797 + ], + "angle": 0, + "content": "Recent advancements in multi-modal machine learning have significantly enhanced models' ability to process and integrate data from diverse modalities, such as language, acoustic, vision, and tabular data [25, 66, 91]. With the development of deep learning architectures and sophisticated interaction designs, models are able to learn, infer, and reason by integrating multiple communicative modalities. Current research in multi-modal machine learning spans multiple key areas, including (1) representing multi-modal data to encode joint and individual characteristics, (2) identifying interconnections between modality elements, (3) transferring knowledge across modalities, and (4) theoretically and empirically analyzing the underlying learning process in a quantitative manner. We refer the audiences to the recent surveys [2, 49] for a more detailed overview of general multi-modal machine learning research. Building upon these advancements, we investigate multi-modal time series analysis with a focus on modeling temporal dependencies and leveraging the data interactions across heterogeneous modalities for predictive and analytical tasks." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.809, + 0.405, + 0.825 + ], + "angle": 0, + "content": "2.2 Multi-modal Time Series Analysis" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.827, + 0.482, + 0.896 + ], + "angle": 0, + "content": "Multi-modal time series analysis aims to model time series data in combination with other complementary modalities. By leveraging cross-modal interactions, this approach yields deeper insights and more robust solutions for a wide range of predictive and analytical tasks across diverse real-world contexts." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.108, + 0.915, + 0.232 + ], + "angle": 0, + "content": "This survey aims to provide a unique and systematic perspective on effectively leveraging cross-modal interactions from relevant real-world contexts to advance multi-modal time series analysis, addressing both foundational principles and practical solutions. Our assessment is threefold: (1) reviewing multi-modal time series data (Section 3), (2) analyzing cross-modal interactions between time series and other modalities (Section 4), and (3) revealing the impact of multi-modal time series analysis in applications across diverse domains (Section 5)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.233, + 0.915, + 0.426 + ], + "angle": 0, + "content": "To resolve ambiguities, we define the scope of our survey by clarifying the types of time series considered and the criteria for multi-modal time series methods. First, we mainly consider standard time series and spatial time series. For the latter, spatial structures (often represented as graphs) are inherently paired with temporal data rather than treated as a separate modality. Second, we focus on methods that leverage multi-modal inputs from real-world contexts to provide complementary information, but for generation and retrieval tasks, the focus is more on transforming the input modality to another output modality. We acknowledge recent research on representing time series as a single modality (e.g., time series as images [15, 46, 62, 99, 100], time series as tabular data [27]) for downstream tasks. However, as these approaches are less relevant to our scope, we refer readers to their respective works." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.427, + 0.915, + 0.579 + ], + "angle": 0, + "content": "Besides, we would like to highlight the difference between our survey and recent related survey and position papers. Ni et al. [57] focuses on imaging-based transformations of time series and subsequent visual modeling techniques, where the discussion on multi-modal models is limited to those involving vision modalities. Kong et al. [39] concentrates on the use of multi-modal large language models (LLMs) for enhancing reasoning capabilities (e.g., causal reasoning, QA, planning, etc.) with multi-modal context. In contrast, our survey provides a broader and structured framework by delivering a systematic and unified perspective of multi-modal time series analysis, not limited to a specific modality or task type." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.609, + 0.791, + 0.623 + ], + "angle": 0, + "content": "3 Multi-modal Time Series Data" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.629, + 0.912, + 0.643 + ], + "angle": 0, + "content": "3.1 Modalities in Multi-modal Time Series Data" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.647, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Multi-modal time series data often originate from diverse sources, each exhibiting unique characteristics that influence how they are processed and analyzed. Besides Time Series, i.e., continuous or discrete measurements recorded over time, such as sensor readings, financial metrics, or physiological signals, their modalities often include: 1) Tabular: Time-indexed records that are inherently organized in a tabular format, such as event logs, transaction records, or demographic information. 2) Text: Time-stamped or domain-specific textual information – like clinical notes, financial reports, news articles, or social media posts – that provides contextual or interpretative insights. 3) Image: Visual data acquired as images over time, such as photographs, medical images (e.g., X-rays, MRI), satellite imagery, or visual representations generated from time series data. 4) Graph: Relational data representing interactions or structural dependencies among entities that evolve. They are typically modeled as networks or graphs, where the connections may change dynamically. Although audio is widely studied as an independent modality in multi-modal research, we consider it a special" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.075, + 0.355, + 0.088 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + }, + { + "type": "table_caption", + "bbox": [ + 0.198, + 0.105, + 0.798, + 0.119 + ], + "angle": 0, + "content": "Table 1: Representative open-source multi-modal time series datasets and across domains." + }, + { + "type": "table", + "bbox": [ + 0.162, + 0.132, + 0.837, + 0.304 + ], + "angle": 0, + "content": "
DomainDataset (Superscripts include the URLs to the datasets)Modalities
HealthcareMIMIC-III [35][1], MIMIC-IV [34][2]TS, Text, Tabular
ICBHI [65][3], Coswara [4][4], KAUH [21][5], PTB-XL [71][6], ZuCo [14, 26][7]TS, Text
Image-EEG [22][8]TS, Image
FinanceFNSPID [17][9], ACL18 [84][10], CIKM18 [79][11], DOW30 [11][12]TS, Text
Multi-domainTime-MMD [53][13], TimeCAP [42][14], NewsForecast [73][15], TTC [37][16], CiK [77][17], TSQA [38][18]TS, Text
RetailVISUELLE [70][19]TS, Image, Text
IoTLEMMA-RCA [40][20]TS, Text
SpeechLRS3 [1][21], VoxCeleb2 [13][22]TS (Audio), Image
TrafficNYC-taxi, NYC-bike [48][23]ST, Text
EnvironmentTerra [10][24]ST, Text
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.324, + 0.482, + 0.352 + ], + "angle": 0, + "content": "form of time series in this survey and briefly discuss representative works within this scope." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.37, + 0.418, + 0.384 + ], + "angle": 0, + "content": "3.2 Common Datasets and Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.389, + 0.491, + 0.444 + ], + "angle": 0, + "content": "Multi-modal time series datasets vary a lot and are domain-dependent, each with unique data characteristics and modalities. In Table 1 we provide representative datasets categorized by domain, along with their respective modalities:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.444, + 0.483, + 0.79 + ], + "angle": 0, + "content": "Healthcare: In this domain, physiological signals (e.g., ECG, EEG) are extensively analyzed alongside textual data such as clinical notes, patient demographics, and tabular data including vital signs and laboratory results. Common datasets include MIMIC-III [35], a comprehensive dataset containing electronic health records (EHRs) of ICU patients with physiological measurements, clinical notes, and diagnostic information, widely used for tasks like patient monitoring, mortality prediction, and clinical decision support. MIMIC-IV [34] is an extension of MIMIC-III, which provide detailed physiological signals, clinical narratives, medication records, and demographic data from a large population of critically ill patients, frequently utilized for predictive modeling, clinical outcome analysis, and health informatics research. Other notable healthcare datasets include ICBHI [65], which contains respiratory sound recordings paired with clinical annotations for respiratory disease classification; Coswara [4], which provides respiratory audio samples and rich metadata for COVID-19 detection tasks; KAUH [21], which comprises audio records and corresponding annotations for healthcare analytics; PTB-XL [71], a large-scale ECG dataset annotated with diagnostic labels for cardiac monitoring and diagnosis; ZuCo [14, 26], which consists of simultaneous EEG and textual data from reading comprehension tasks, being useful for cognitive neuroscience studies; and Image-EEG [22], which pairs EEG signals with images of objects on a natural background, aiding studies in visual neuroscience and computer vision." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.799, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Finance: Datasets that combine time series data with financial news and reports are instrumental in financial analysis and modeling. Notable examples include ACL18 [84], CIKM18 [79], and DOW30 [11]. These datasets focus on high-trade-volume stocks from the U.S. stock markets, providing historical stock price data, such as opening, high, low, and closing prices; alongside related textual information, including tweets or financial news. Another" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.324, + 0.915, + 0.365 + ], + "angle": 0, + "content": "large-scale dataset, FNSPID [17], consists of stock prices and time-aligned financial news records, covering over 4,000 companies from 1999 to 2023." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.391, + 0.915, + 0.765 + ], + "angle": 0, + "content": "Multi-domain: Datasets featuring general-purpose numerical time series combined with textual data are suitable for broad analytical applications. Examples include Time-MMD [53], which encompasses nine primary data domains: Agriculture, Climate, Economy, Energy, Environment, Health, Security, Social Good, and Traffic, while ensuring fine-grained alignment between time series and textual data; TimeCAP [42] compiles seven real-world time series datasets across three domains: weather, finance, and healthcare. To generate textual descriptions for each time series, a large language model (LLM) agent is employed, leveraging contextual information and domain-specific knowledge. NewsForecast [73] integrates task-specific time series data with verified public news reports across various domains, including finance, energy, traffic, and cryptocurrency; TTC [37] is a meticulously curated, time-aligned dataset designed for multimodal forecasting. It consists of paired time series and text data synchronized to timestamps, spanning two distinct domains: climate science and healthcare; CiK [77] is a dataset comprising 71 forecasting tasks across seven real-world domains. Each task necessitates the integration of both numerical data and textual information. The covered domains include Climatology, Economics, Energy, Mechanics, Public Safety, Transportation, and Retail. The TSQA [38] dataset consists of 200k question-answer pairs derived from time series data across 12 domains: healthcare, finance, energy, traffic, environment, IoT, nature, transport, human activities, machine sensors, AIOps, and the web. These QA pairs are designed to support five key tasks: forecasting, imputation, anomaly detection, classification, and open-ended reasoning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.771, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Other domains: Beyond the previously discussed major sectors, multi-modal time series analysis extends to various other domains. In Retail, datasets such as VISUELLE [70] integrate numerical sales data with product images and textual descriptions, facilitating thorough analyses of consumer behavior and inventory management. The Internet of Things (IoT) domain benefits from datasets such as LEMMA-RCA [40], which combine time series sensor data with textual metadata, enabling enhanced monitoring and more robust and secure methodologies that ensure the high performance of modern" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.857, + 0.076, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Jiang, et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.483, + 0.246 + ], + "angle": 0, + "content": "systems. In the Speech domain, datasets like LRS3 [1] and VoxCeleb2 [13] integrate audio recordings with corresponding visual data, supporting advancements in speech recognition and speaker identification technologies. In the Traffic domain, datasets like NYC-Taxi, NYC-Bike [48] contain spatial-temporal (ST) data alongside associated textual metadata. These integrations allow LLMs to effectively capture and utilize spatial-temporal contextual signals. In the Environment domain, Terra [10] collect 45 years of global geographic spatial-temporal data, supplemented with textual descriptions." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.262, + 0.461, + 0.276 + ], + "angle": 0, + "content": "4 Cross-modal Interactions with Time Series" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.28, + 0.483, + 0.349 + ], + "angle": 0, + "content": "In this section, we conduct a detailed review of existing research on multi-modal time series analysis by thoroughly analyzing cross-modal interactions. We also elaborate how existing multi-modal methods are tailored for domain-specific applications in Section 5. The detailed taxonomy is provided in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.35, + 0.483, + 0.419 + ], + "angle": 0, + "content": "We define three fundamental types of interactions between time series and other modalities, including fusion, alignment, and transference, which occur at different stages within a framework - input, intermediate (i.e., representations or intermediate outputs), and output. The representative examples are provided in Figure 2." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.435, + 0.185, + 0.449 + ], + "angle": 0, + "content": "4.1 Fusion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.453, + 0.483, + 0.619 + ], + "angle": 0, + "content": "Fusion refers to the process of integrating heterogeneous modalities in a way that captures complementary information across diverse sources to improve time series modeling. To fuse multi-modal inputs, a common practice is to directly integrate time series, tabular data and texts into a unified textual prompt, then use it to query LLMs for downstream tasks. This is typically facilitated by instruction fine-tuning for task-oriented analysis [19, 24, 38, 48, 56, 73, 90]. Some works also leverage the zero-shot reasoning and inference capability of pretrained LLMs (e.g., GPT-4 and its variants) [72, 81, 89]. Recent research efforts like TaTS [47] attempt to integrate paired text embedding as an additional variable of time series for temporal modeling, yielding competitive task performance." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.62, + 0.483, + 0.827 + ], + "angle": 0, + "content": "Most existing methods perform cross-modal fusion at the intermediate stage, such as adding and concatenating multi-modal representations, where each individual modal encoder first maps the raw data into a shared latent space. Addition combines time series and other modalities by summing up encoded representations, effectively blending shared information while preserving their interconnections in the latent space [6, 30, 51, 85, 95, 97]. On the other hand, concatenation stacks multi-modal representations along the same dimension, retaining modality-specific characteristics and allowing models to capture joint relationships between the modalities [16, 36, 36, 37]. To effectively leverage cross-modal information, existing methods often incorporate alignment designs after concatenating representations [5, 11, 12, 18, 31, 33, 41, 42, 44, 54, 55, 60, 69, 70, 74, 85, 96]. Alignment is also used in the aforementioned additions, which will be detailed in Section 4.2." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.827, + 0.483, + 0.897 + ], + "angle": 0, + "content": "When fusion is performed at the output level, different modalities contribute separately to the final output, allowing each modality to retain its unique predictive signal [31, 41, 42, 53]. Time-MMD [53] provides a paradigm that fuses predictions from both state-of-the-art forecasters and a pretrained language model with a projection" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.106, + 0.912, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.294, + 0.915, + 0.323 + ], + "angle": 0, + "content": "Figure 2: Categorization of cross-modal interaction methods and representative examples." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.339, + 0.915, + 0.519 + ], + "angle": 0, + "content": "layer, in an end-to-end manner. MOAT [41] introduces a two-stage framework for multi-modal time series forecasting. In the first stage, the model is optimized to generate forecasts from decomposed time series and text embeddings. In the second stage, an offline synthesis via MLP is applied to dynamically fuse different components, yielding the final forecast based on their relative contributions. Beyond fusing outputs from a single model, TimeCAP [42] enhances performance by combining predictions from both a multi-modal predictor and a pretrained LLM, which synergizes the gradient-based method and LLM agents reasoning on real-world contexts. Output fusion gains advantage of design flexibility and robustness, but it may not fully utilize the complementary relationship between modalities without additional countermeasures." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.519, + 0.919, + 0.589 + ], + "angle": 0, + "content": "Cross-modal fusion relies on well-aligned multi-modal data for effective exploitation of the contextual information. However, ideally-aligned data may not be given in real-world scenarios. As such, existing methods also leverage alignment mechanisms to mitigate the challenge." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.601, + 0.649, + 0.616 + ], + "angle": 0, + "content": "4.2 Alignment" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.619, + 0.915, + 0.799 + ], + "angle": 0, + "content": "Alignment ensures that the relationships between different modalities are preserved and semantically coherent when integrated into a unified learning framework. At the input level, we primarily refer alignment to data preprocessing techniques that aim at mitigating temporal misalignment caused by missing values, irregular sampling intervals, and differing granularities across modalities. This process is crucial for ensuring that data from multiple sources are properly synchronized before fusion, where domain knowledge is usually needed to handle such inconsistencies [10, 53, 73]. In addition, none of the existing methods we reviewed explicitly perform output alignment. However, the aforementioned output fusion can be easily adapted to alignment through the incorporation of a gating or attention mechanism that we will introduce shortly." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.8, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Alignment at the intermediate stage plays a crucial role in multimodal interactions. We first introduce the alignment of multi-modal representations, spanning a range of techniques from model component design to learning objectives. The common component designs include self-attention [5, 12, 30, 33, 41, 42, 44, 54, 55, 69], cross-attention [6, 18, 51, 60, 70, 82, 85, 98] and gating mechanisms [82, 98]. Self-attention is often used to fuse multi-modal representations. It" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.354, + 0.087 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.107, + 0.913, + 0.139 + ], + "angle": 0, + "content": "Table 2: Taxonomy of representative multi-modal time series methods. Modality refers to the different data modalities involved in each method. TS represents standard time series, \\(ST\\) denotes spatial time series. The Method column lists the techniques used for each interaction, separated by semicolons, where each interaction may include one or more techniques, separated by commas. Superscripts in the Code column include the URLs to Github repositories." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.144, + 0.927, + 0.88 + ], + "angle": 0, + "content": "
MethodModalityDomainTaskCross-Modal InteractionLarge ModelYearCode
StageFusionAlign.Trans.Method
Time-MMD [53]TS, TextGeneralForecastingOutputXXAdditionMultiple2024Yes[1]
Wang et al. [73]TS, TextGeneralForecastingInput IntermediateXXPrompt Prompt; LLM ReasoningLLaMa2 GPT-4 Turbo2024Yes[2]
GPT4MTS [30]TS, TextGeneralForecastingIntermediateXAddition; Self-attentionGPT-22024No
TimeCMA [51]TS, TextGeneralForecastingInput IntermediateXXMeta-description Addition; Cross-attentionGPT-22025Yes[3]
MOAT [41]TS, TextGeneralForecastingIntermediate OutputXConcat.; Self-attention Offline Synthesis (MLP)S-Bert2024No
TimeCAP [42]TS, TextGeneralClassificationInput Intermediate OutputXXLLM Generation Concat.; Self-attention, Retrieval AdditionBert, GPT-42024No
TimeXL [31]TS, TextGeneralClassification ForecastingIntermediate OutputXConcat., Prompt; LLM Reasoning AdditionBert, S-Bert GPT-4o2025No
Hybrid-MMF [37]TS, TextGeneralForecastingIntermediateXXConcat.GPT-4o2024Yes[4]
Time-LLM [33]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionLLaMA, GPT-22024Yes[5]
Time-VLM [98]TS, Text, ImageGeneralForecastingInput IntermediateXXFeat. Imaging, Meta-description Addition; Gating, Cross-attentionViLT, CLIP BLIP-22025No
Unitime [55]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionGPT-22024Yes[6]
TESSA [50]TS, TextGeneralAnnotationIntermediatePrompt; RL; LLM GenerationGPT-4o2024No
InstruTime [12]TS, TextGeneralClassificationIntermediateXConcat.; Self-attentionGPT-22025Yes[7]
MATMCD [68]TS, Text, GraphGeneralCausal DiscoveryIntermediatePrompt; LLM Reasoning; SupervisionMultiple2025No
STG-LLM [54]ST, TextGeneralForecastingIntermediateXConcat.; Self-attentionGPT-22024No
TableTime [72]TS, TextGeneralClassificationInputXPrompt; ReformulateMultiple2024Yes[8]
ContextFormer [6]TS, TabularGeneralForecastingIntermediateXAddition; Cross-attentionNo2025No
Time-MQA [38]TS, TextGeneralMultipleInputXXPromptMultiple2025Yes[9]
MAN-SF [67]TS, Text, GraphFinanceClassificationIntermediateXBilinear; Graph ConvolutionUSE2020No
Bamford et al. [3]TS, Text, TS, ImageFinanceRetrievalIntermediate OutputXXSupervisionS-Bert2024No
Chen et al. [11]TS, Text, GraphFinanceClassificationIntermediateXXLLM Generation Concat.; Graph ConvolutionChatGPT2023No
Xie et al. [81]TS, TextFinanceClassificationInputXXPromptChatGPT2023No
Yu et al. [89]TS, TextFinanceForecastingInputXXPromptGPT-4, Open LLaMA2023No
MedTsLLM [5]TS, Text, TabularHealthcareMultipleIntermediateXConcat.; Self-attentionLlama22024Yes[10]
RespLLM [95]TS (Audio), TextHealthcareClassificationIntermediateXAddition, Self-attentionOpenBioLLM-8B2024No
METS [45]TS, TextHealthcareClassificationOutputXXContrastiveClinicalBert2023No
Wang et al. [75]TS, TextHealthcareClassificationIntermediateXXSupervisionBart, Bert, Roberta2021No
EEG2TEXT [52]TS, TextHealthcareGenerationOutputXXSelf-supervision, SupervisionBart2024No
MEDHMP [74]TS, TextHealthcareClassificationIntermediateXConcat.; Self-attention, ContrastiveClinicalT52023Yes[11]
Deznabi et al. [16]TS, TextHealthcareClassificationIntermediateXXConcat.Bio+Clinical Bert2021Yes[12]
Niu et al. [60]TS, TextHealthcareClassificationIntermediateXConcat.; Cross-attentionBioBERT2023No
Yang et al. [85]TS, TextHealthcareClassificationIntermediateXConcat.; Addition; GatingClinicalBERT2021Yes[13]
Liu et al. [56]TS, TextHealthcareClassification RegressionInputXXPromptPaLM2023Yes[14]
xTP-LLM [24]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionLlama2-7B-chat2024Yes[15]
UrbanGPT [48]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionVicuna-7B2024Yes[16]
CityGPT [19]ST, TextMobilityInputXXPromptMultiple2025Yes[17]
MULAN [97]TS, Text, GraphIoT Causal DiscoveryIntermediateAddition; Contrastive; SupervisionNo2024No
MIA [82]TS, ImageIoT Anomaly DetectionIntermediateXAddition; Cross-attention, GatingNo2023No
Ekambaram et al. [18]TS, Image, TextRetail ForecastingIntermediateXConcat.; Self & Cross-attentionNo2020Yes[18]
Skenderi et al. [70]TS, Image, TextRetail ForecastingIntermediateXConcat.; Cross-attentionNo2024Yes[19]
VIMTS [96]ST, Image EnvironmentImputationIntermediateXConcat.; SupervisionNo2022No
LTE [44]ST, Text, Image EnvironmentForecastingIntermediateXConcat.; Self-attentionLLaMA-2-7B2024Yes[20]
AV-HubERT [69]TS (Audio), Image SpeechClassificationIntermediateXConcat.; Self-attentionHuBert2022Yes[21]
SpeechGPT [90]TS(Audio), TextGeneration intermediateXConcat.; Self-attentionLLaMA-13B2023Yes[22]
LA-GCN [83]ST, TextClassificationIntermediateXSupervisionBert2023Yes[23]
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.857, + 0.076, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Jiang, et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.178 + ], + "angle": 0, + "content": "enables a joint and undirected alignment across all modalities by dynamically attending to important features. Given multi-modal embeddings \\( E_{\\mathrm{mm}} \\in \\mathbb{R}^{n \\times d} \\), where \\( n \\) is the total number of modality tokens and \\( d \\) is the embedding dimension, self-attention is computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.164, + 0.183, + 0.403, + 0.22 + ], + "angle": 0, + "content": "\\[\n\\operatorname {A t t e n t i o n} \\left(E _ {\\mathrm {m m}}\\right) = \\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {\\top}}{\\sqrt {d _ {k}}}\\right) V\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.224, + 0.482, + 0.268 + ], + "angle": 0, + "content": "where the queries \\(Q\\), keys \\(K\\), and values \\(V\\) are linear projections of \\(E_{\\mathrm{mm}}\\): \\(Q = E_{\\mathrm{mm}}W_{Q}\\), \\(K = E_{\\mathrm{mm}}W_{K}\\), \\(V = E_{\\mathrm{mm}}W_{V}\\) with learnable weights of dimensionality \\(d_{k}\\): \\(W_{Q}, W_{K}, W_{V} \\in \\mathbb{R}^{d \\times d_{k}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.268, + 0.483, + 0.352 + ], + "angle": 0, + "content": "In cross-attention, time series serves as the query modality to get contextualized by other modalities, providing a directed alignment that ensures auxiliary modalities contribute relevant contextual information while preserving the temporal structure of time series. Given a query embedding \\( E_{\\mathrm{ts}} \\in \\mathbb{R}^{n \\times d} \\) and that of auxiliary modalities \\( E_{c} \\in \\mathbb{R}^{n \\times d} \\) as keys and values:" + }, + { + "type": "equation", + "bbox": [ + 0.139, + 0.357, + 0.427, + 0.394 + ], + "angle": 0, + "content": "\\[\n\\text {C r o s s A t t e n t i o n} \\left(E _ {\\mathrm {t s}}, E _ {\\mathrm {c}}\\right) = \\operatorname {s o f t m a x} \\left(\\frac {Q _ {\\mathrm {t s}} K _ {\\mathrm {c}} ^ {\\top}}{\\sqrt {d _ {k}}}\\right) V _ {\\mathrm {c}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.398, + 0.482, + 0.439 + ], + "angle": 0, + "content": "where the query, key and value are denoted as \\( Q_{\\mathrm{ts}} = E_{\\mathrm{ts}}W_{Q} \\), \\( K_{\\mathrm{c}} = E_{\\mathrm{c}}W_{K} \\), \\( V_{\\mathrm{c}} = E_{\\mathrm{c}}W_{V} \\). Note that existing methods adopt multi-head attentions, which is omitted here for simplicity." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.44, + 0.483, + 0.481 + ], + "angle": 0, + "content": "Similarly, the gating mechanism is a parametric filtering operation that explicitly regulates the influence of time series and other modalities on the fused embeddings in \\( E \\):" + }, + { + "type": "equation", + "bbox": [ + 0.118, + 0.487, + 0.446, + 0.503 + ], + "angle": 0, + "content": "\\[\nG = \\sigma \\left(W _ {g} \\left[ E _ {\\mathrm {t s}}; E _ {c} \\right] + b _ {g}\\right), \\quad E = G \\odot E _ {\\mathrm {t s}} + (1 - G) \\odot E _ {c}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.507, + 0.482, + 0.537 + ], + "angle": 0, + "content": "where \\(\\sigma (\\cdot)\\) denotes the sigmoid function, the learnable weight and bias are denoted as \\(W_{g}\\in \\mathbb{R}^{2d\\times d}\\) and \\(b_{g}\\in \\mathbb{R}^{d}\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.537, + 0.483, + 0.62 + ], + "angle": 0, + "content": "When a graph modality is available from external contexts, the underlying topological insights can be leveraged for graph-based alignment [11, 67]. Unlike the above methods that rely solely on feature interactions, it explicitly aligns multi-modal representations with relational structures through graph convolution, enabling context-aware feature propagation across modalities." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.62, + 0.486, + 0.827 + ], + "angle": 0, + "content": "Representation alignments can also be achieved by learning objectives [3, 45, 83, 96, 97]. For example, MULAN [97] extracts modality-invariant and modality-specific representations from multi-modal time series. It employs contrastive learning to enhance cross-modal alignment by maximizing the similarity between invariant representations across modalities while minimizing the similarity between invariant and specific representations of the same modality. Moreover, Bamford et al. [3] align cross-modal representations by using the mean of uni-modal cosine similarities as the target similarity and optimizing cross-modal similarity via cross-entropy loss, which effectively connects both modalities in a shared latent space for time series retrieval tasks. In general, this branch of methods is effective as it directly integrates the alignment objective into the optimization process, ensuring that meaningful representations are explicitly learned." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.827, + 0.482, + 0.897 + ], + "angle": 0, + "content": "Lastly, we introduce the intermediate alignment of component outputs within a framework, extending beyond representation alignment within a model. The most recent studies explore the synergy between time series models and LLM agents, leveraging the strong reasoning capabilities of pretrained LLMs to provide" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.397 + ], + "angle": 0, + "content": "contextual understanding and calibration in real-world scenarios [31, 42, 50, 68, 73]. We briefly discuss a few representative examples for demonstration. TimeCAP [42] utilizes the embedding space of a trained multi-modal encoder to retrieve in-context examples with the highest cosine similarity. These retrieved examples with ground truth labels are then fed, along with the query text, into an LLM to provide contextual guidance and improve outcome prediction. TimeXL [31] incorporates a multi-modal prototype-based encoder to generate explainable case-based rationales for both time series and texts, integrating three LLM agents, where prediction, reflection, and refinement LLMs collaborate to iteratively enhance prediction accuracy, identify textual inconsistencies or noise, and calibrate textual contexts, yielding more accurate predictions and explanations. NewsForecast [73] also employs reflection in language agents to iteratively select relevant news from a large database, enhancing alignment of textual information for text-based forecasting. Similarly, MATLAB [68] ensures alignment between statistical causal discovery on time series and LLM reasoning on textual context by leveraging iterative self-reflective tool-calling to structure textual context, which is then used to explain and refine causal constraints." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.398, + 0.915, + 0.468 + ], + "angle": 0, + "content": "In a nutshell, alignment aims to calibrate real-world contexts and effectively capture relevant multi-modal elements for a semantically coherent time series modeling. It enhances task performance, robustness and explanation, ensuring that models leverage meaningful contextual information for improved decision-making." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.504, + 0.667, + 0.518 + ], + "angle": 0, + "content": "4.3 Transference" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.522, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Transference refers to the process of mapping between different modalities. It allows one modality to be inferred, translated, or synthesized from another. This concept plays a crucial role across different stages of multi-modal time series analysis. The input-level transference typically serves for modality augmentation. It helps introduce contextual priors, enrich training samples, and provide alternative representations. This is particularly useful in scenarios of data scarcity and imbalance. In existing literature, a common practice is to use meta information to describe the narrative of real-world contexts (e.g., domain, data statistics and granularity, variable descriptions, other co-variates, etc.) [19, 24, 33, 48, 51, 55, 72, 98] or leverage pretrained LLMs to generate fine-grained textual contexts [42] or graphs [11] for real-world time series, serving as an augmented modality. In addition to texts, time series can also be transformed into high-dimensional images via feature imaging, such as stacking the original data with frequency and periodicity features [98]. Alternatively, time series can be represented in tabular form, transforming time series analysis into a table understanding task [72]. Note that the aforementioned uni-modal methods for transforming time series into other single modalities can also be integrated into multi-modal time series frameworks [15, 27, 46, 62, 99, 100]. The exploitation of input-level transference is two-fold. First, the embedding of generated modality can serve as semantic anchors that guides time series modeling via representation alignment, improving downstream supervised tasks [33, 51, 55, 98]. Second, it provides additional contextual guidance for pretrained LLMs through input fusion and prompting [19, 24, 48, 72]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.355, + 0.087 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.108, + 0.482, + 0.218 + ], + "angle": 0, + "content": "At the intermediate [11, 50, 68, 75, 97] and output [3, 52] levels, transference are more task-oriented. The output-level transference typically refers to the end-to-end generation of new modalities, such as text-based and image-based time series retrieval, where users provide textual descriptions or sketched trends to query relevant time series data [3]. This also includes EEG-to-text conversion, enabling direct transformation from physiological signals to human-readable narratives [52]." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.219, + 0.482, + 0.397 + ], + "angle": 0, + "content": "The output of intermediate transference typically serves as an initial solution to be refined for modality generation tasks [50, 68, 97] or a medium to be inferred for predictive tasks [75], facilitating downstream reasoning and further alignment within the multimodal framework. MATMCD [68] generates an initial causal graph from time series, achieving modality transference in the intermediate level. Subsequently, it incorporates textual modality to refine the causal graph, ensuring improved alignment and interpretability. Moreover, Wang et al. [75] adopt a two-stage mechanism for sentiment classification based on EEG data, where the model first converts EEG signals into reading texts and then employs a pretrained LLM based on texts for classification, achieving impressive zero-shot results." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.41, + 0.443, + 0.441 + ], + "angle": 0, + "content": "5 Applications of Multi-modal Time Series Analysis" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.445, + 0.482, + 0.499 + ], + "angle": 0, + "content": "In this section, we review the existing applications of multi-modal time series analysis for both standard and spatial time series, covering diverse domains such as healthcare, finance, transportation, environment, retail, and the Internet of Things (IoT)." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.512, + 0.257, + 0.526 + ], + "angle": 0, + "content": "Standard Time Series" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.532, + 0.217, + 0.545 + ], + "angle": 0, + "content": "5.1 Healthcare" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.55, + 0.482, + 0.771 + ], + "angle": 0, + "content": "Recent studies in healthcare highlight the multi-modal analysis of diverse medical data sources, such as EHRs (Electronic Health Records, containing lab values and clinical reports, etc.), audio, EEG (Electroencephalogram), ECG (Electrocardiogram), and other wearable and medical sensor recordings, for better disease diagnosis and patient monitoring. For multi-modal analysis on EHR data, a common modeling strategy involves the interaction between lab values and clinical reports, including the concatenation [16] and attention mechanisms [60, 85] on modality embeddings. Moreover, existing methods explore different modeling techniques to better exploit the clinical notes, via domain-specific text encoders (e.g., ClinicalBERT [28, 85] and BioBERT [43, 60]) and different processing strategies. For example, text embeddings can be modeled separately based on patient groups [86] or through a decaying mechanism based on time intervals [36] before interacting with time series embeddings, which leads to improved mortality prediction." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.772, + 0.482, + 0.896 + ], + "angle": 0, + "content": "In addition to EHRs, multi-modal modeling methods have been tailored for audio [95], ECG [45], and EEG [75]. Zhang et al. [95] focus on a respiratory health classification task by integrating both audio and textual descriptions. Li et al. [45] propose a multi-modal contrastive learning framework, constructing positive and negative samples by pairing patients' report texts with corresponding ECG signals for self-supervised pretraining. The classification task is then performed by computing the cosine similarity between different text representations and the target ECG representation. Wang" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.108, + 0.913, + 0.162 + ], + "angle": 0, + "content": "et al. [75] propose a two-stage method for zero-shot EEG-based sentiment classification. First, a pretrained BART model is used for EEG-to-text decoding, followed by a trained text sentiment classifier that converts the generated text into sentiment categories." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.163, + 0.913, + 0.272 + ], + "angle": 0, + "content": "Similarly, Liu et al. [56] fuses physiological and behavioral time-series sensor data with real-world contextual information to effectively harness LLMs for wellness assessment. By fine-tuning the models with few-shot question-answer pairs that include contextual details, they improve performance on various healthcare tasks—such as cardiac signal analysis, physical activity recognition, and calorie-burn estimation, which outperform both supervised feedforward neural networks and zero-shot LLM baselines." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.29, + 0.623, + 0.303 + ], + "angle": 0, + "content": "5.2 Finance" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.308, + 0.913, + 0.528 + ], + "angle": 0, + "content": "Recently, multi-modal time series analysis has received increasing attention in financial applications. Yu et al. [89] and Xie et al. [81] focus on stock prediction by integrating stock price movements, company profiles, and news directly into structured LLM prompts, enabling models to perform reasoning over multiple modalities. Yu et al. [89] applies GPT-4 and Open LLaMA to forecast NASDAQ-100 stock returns through instruction-based prompting and fine-tuning, demonstrating that structured LLM-driven inference can outperform traditional econometric models. Meanwhile, Xie et al. [81] conducts a zero-shot analysis of ChatGPT's capabilities for multimodal stock movement prediction, incorporating CoT prompting to assess the impact of social media sentiment on stock trends. Chen et al.[11] and Sawhneyet al.[67] also incorporate graph structures for stock movement prediction. For instance, Chen et al.[11] uses ChatGPT to infer dynamic stock relationship graphs from news, which reflects market conditions and enhances prediction accuracy." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.53, + 0.913, + 0.625 + ], + "angle": 0, + "content": "Beyond the predictive tasks, Bamford et al. [3] proposes a multimodal retrieval framework, where the model aligns both modalities in a shared latent space through contrastive learning. This framework allows users to search for financial time series through textual descriptions or sketched trends, offering greater flexibility. It also significantly improves retrieval speed and accuracy compared to traditional SQL-based search methods." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.643, + 0.614, + 0.656 + ], + "angle": 0, + "content": "5.3 Others" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.661, + 0.913, + 0.896 + ], + "angle": 0, + "content": "Multi-modal time series analysis also exists in other domains, such as retail, IoT, computer vision and audio. In the retail sector, Ekambaram et al. [18] utilizes product images and textual descriptions, including attributes like color, pattern, and sleeve style, while incorporating temporal and exogenous features for new product sales forecasting. More recently, Skenderi et al. [70] integrates additional modality data, including product images and text descriptions, along with Google Trends data for sales forecasting. In IoT applications, MIA [82] enhances power transformer fault diagnosis by integrating multi-modal data, including dissolved gas analysis (DGA) and infrared images, to improve accuracy and efficiency. MULAN [97] converts log sequences into time-series data using a log-tailored language model and employs contrastive learning to leverage multi-modal data, facilitating root-cause discovery for system failures. In computer vision, LA-GCN [83] utilizes textual embeddings of joint names and action labels to generate faithful structural priors, enhancing skeleton-based action modeling and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.857, + 0.076, + 0.912, + 0.086 + ], + "angle": 0, + "content": "Jiang, et al." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.108, + 0.482, + 0.176 + ], + "angle": 0, + "content": "improving recognition tasks. In speech applications, AV-HuBERT [69] employs a self-supervised representation learning framework to leverage correlated audio and visual information [90], while SpeechGPT [69, 90] integrates audio and text to enhance generation performance." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.191, + 0.241, + 0.206 + ], + "angle": 0, + "content": "Spatial Time Series" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.21, + 0.358, + 0.226 + ], + "angle": 0, + "content": "5.4 Transportation and Mobility" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.229, + 0.482, + 0.436 + ], + "angle": 0, + "content": "Several recent studies on traffic prediction highlight the importance of multi-modal contexts to enhance forecasting accuracy. Guo et al. [24] transforms California traffic data into structured LLM prompts. The method uses LLaMA models and instruction fine-tuning to improve spatial-temporal learning. Meanwhile, Li et al. [48] employs a spatial-temporal dependency encoder to align numerical New York City traffic data with LLMs, incorporating weather, geographic context, and historical flow patterns to refine predictions. Similarly, Feng et al. [19] proposes CityGPT, enhancing LLMs' spatial cognition for urban reasoning, mobility prediction, and navigation by integrating urban mobility data, road networks, and human behavior through instruction tuning. These studies demonstrate that LLM-based multi-modal fusion not only enhances traffic forecasting but also improves model interpretability and adaptability across diverse urban scenarios." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.451, + 0.236, + 0.465 + ], + "angle": 0, + "content": "5.5 Environment" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.469, + 0.482, + 0.662 + ], + "angle": 0, + "content": "Integrating multi-modal information benefits environmental studies, particularly by addressing the prevalent challenge of missing values. VIMTS [96] utilizes a structured variational approximation technique to impute missing high-dimensional modalities (stream image data) by transforming them into low-dimensional features derived from simpler, related modalities (meteorological time series records), ensuring cross-modal correlations and interpretability of the imputation process. Additionally, LITE [44] addresses the imputation of missing features through a sparse Mixture of Experts framework. It integrates and encodes various environmental variables through a unified encoder. Directed by domain-specific instructions, a language model is utilized to merge these multi-modal representations, thereby improving the accuracy of environmental spatial-temporal predictions." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.677, + 0.335, + 0.691 + ], + "angle": 0, + "content": "6 Future Research Directions" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.695, + 0.482, + 0.723 + ], + "angle": 0, + "content": "In this section, we outline several underexplored research directions that open up opportunities for future advancements." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.73, + 0.482, + 0.896 + ], + "angle": 0, + "content": "Reasoning with Multi-modal Time Series. Enhancing reasoning with multi-modal time series is pivotal for the development of intelligent systems. Future research should focus on creating a unified framework that can seamlessly integrate temporal reasoning with contextual understanding, enabling models to handle multiple time series tasks with interpretability. One potential path is to incorporate external knowledge bases and real-world context, such as developing a retrieval-augmented generation (RAG) [59] system, to enhance the reasoning process and allow models to make informed inferences beyond the immediate data. It is also promising to synergize time series model and language agents to provide more faithful and reliable reasoning on real-world contexts [31, 68]." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.108, + 0.912, + 0.15 + ], + "angle": 0, + "content": "The recent development of LLM reasoning models, such as chain of thoughts [76] and tree of thoughts [87], also offers potential solutions to improve reasoning quality." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.152, + 0.914, + 0.249 + ], + "angle": 0, + "content": "Decision Making. Multi-modal time series analysis presents a promising future direction to enhance decision-making processes, which is crucial in high-stakes applications. By leveraging predictive signals and explanations from multi-modal contexts, future research can develop more adaptive, interpretable, and reliable decision-support systems, to facilitate the downstream optimization tasks such as resource allocation and risk management." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.253, + 0.914, + 0.432 + ], + "angle": 0, + "content": "Domain Generalization. One key challenge in multi-modal time series analysis is domain generalization, which enables a model trained on one or more source domains to effectively generalize to unseen target domains, ensuring robustness against distribution shifts. In multi-modal time series, distribution shifts can be multifaceted, stemming not only from time series, but also from other modalities. Therefore, it is crucial to develop specialized domain generalization methods for effective multi-modal time series analysis, including strategies to identify and preserve domain-invariant components across modalities while capturing modality-specific variations for rapid adaptation. Additionally, disentangling the effects of each modality is essential to better understand their individual contributions and mitigate cross-modal interference." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.436, + 0.914, + 0.587 + ], + "angle": 0, + "content": "Robustness to Missing and Noisy Modalities. Multi-modal time series analysis often frequently encounters messy real-world contexts with incomplete or noisy data. Existing methods employ an iterative context-refinement algorithm [31] that filters out less relevant information, thereby enhancing the predictive insights derived from multi-modal time series. Nonetheless, effectively dealing with missing and noisy modalities still demands further exploration. In particular, developing strategies for modality-specific imputation, noise reduction, and relevance quantification will be crucial to improving the real-world applicability of existing multi-modal time series methods." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.592, + 0.912, + 0.688 + ], + "angle": 0, + "content": "Ethical Considerations and Bias Mitigation. In light of potential biases in multi-modal time series datasets, future research should integrate fairness-aware techniques, such as fairness constraints, counterfactual analysis, and adversarial debiasing. These methods should be combined with robust bias assessment frameworks to systematically detect and mitigate inequities, ensuring outcomes that are both equitable and socially responsible." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.705, + 0.638, + 0.719 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.723, + 0.914, + 0.82 + ], + "angle": 0, + "content": "In this survey, we provide a comprehensive overview of existing multi-modal time series methods. We first discuss the multi-modal time series used in existing methods. Then, we propose a taxonomy based on cross-modal interactions between time series and other modalities. The existing methods are categorized and summarized accordingly. We also discuss the real-world applications and highlight future research directions in this promising area." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.836, + 0.669, + 0.852 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.855, + 0.914, + 0.896 + ], + "angle": 0, + "content": "This research was supported in part by the National Science Foundation (NSF) CAREER IIS-2338878, as well as by generous research gifts from NEC Labs America Inc. and Morgan Stanley." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.355, + 0.087 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.178, + 0.12 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.124, + 0.483, + 0.154 + ], + "angle": 0, + "content": "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. 2018. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv:1809.00496 [cs.CV] https://arxiv.org/abs/1809.00496" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.155, + 0.483, + 0.184 + ], + "angle": 0, + "content": "[2] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence 41, 2 (2018), 423-443." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.185, + 0.482, + 0.243 + ], + "angle": 0, + "content": "[3] Tom Bamford, Andrea Coletta, Elizabeth Fons, Sriram Gopalakrishnan, Svitlana Vyetrenko, Tucker Balch, and Manuela Veloso. 2023. Multi-Modal Financial Time-Series Retrieval Through Latent Space Projections. In Proceedings of the Fourth ACM International Conference on AI in Finance (Brooklyn, NY, USA) (ICAIF '23). Association for Computing Machinery, New York, NY, USA, 498-506. doi:10.1145/3604237.3626901" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.245, + 0.482, + 0.294 + ], + "angle": 0, + "content": "[4] Dhananjay Bhattacharya, Nayan K Sharma, Debottam Dutta, Srikanth R Chetupalli, Prashant Mote, Sriram Ganapathy, Jyothi Bhat, Shreyas Ramoji, Pravin Ghosh, Aswin Subramanian, et al. 2023. Coswara: a respiratory sounds and symptoms dataset for remote screening of SARS-CoV-2 infection. Scientific Data 10, 1 (2023), 397." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.295, + 0.482, + 0.325 + ], + "angle": 0, + "content": "[5] Nimeesha Chan, Felix Parker, William Bennett, Tianyi Wu, Mung Yao Jia, James Fackler, and Kimia Ghobadi. 2024. Medtsllm: Leveraging llms for multimodal medical time series analysis. arXiv preprint arXiv:2408.07773 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.326, + 0.482, + 0.365 + ], + "angle": 0, + "content": "[6] Sameep Chattopadhyay, Pulkit Paliwal, Sai Shankar Narasimhan, Shubhankar Agarwal, and Sandeep P. Chinchali. 2025. Context Matters: Leveraging Contextual Features for Time Series Forecasting. arXiv:2410.12672 [cs.LG] https://arxiv.org/abs/2410.12672" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.366, + 0.482, + 0.405 + ], + "angle": 0, + "content": "[7] Shengyu Chen, Yiqun Xie, Xiang Li, Xu Liang, and Xiaowei Jia. 2023. Physics-guided meta-learning method in baseflow prediction over large regions. In Proceedings of the 2023 SIAM International Conference on Data Mining (SDM). SIAM, 217-225." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.406, + 0.482, + 0.445 + ], + "angle": 0, + "content": "[8] Shengyu Chen, Jacob A Zwart, and Xiaowei Jia. 2022. Physics-guided graph meta learning for predicting water temperature and streamflow in stream networks. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 2752-2761." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.446, + 0.482, + 0.476 + ], + "angle": 0, + "content": "[9] Si-An Chen, Chun-Liang Li, Sercan O Arik, Nathanael Christian Yoder, and Tomas Pfister. 2023. TSMixer: An All-MLP Architecture for Time Series Forecasting. Transactions on Machine Learning Research (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.477, + 0.482, + 0.547 + ], + "angle": 0, + "content": "[10] Wei Chen, Xixuan Hao, Yuankai Wu, and Yuxuan Liang. 2024. Terra: A Multimodal Spatio-Temporal Dataset Spanning the Earth. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 66329-66356. https://proceedings.neurips.cc/paper_files/paper/2024/file/7a6a7fbd1ee0c9684b3f919f79d129ef-Paper-Datasets_and_Benchmarks_Track.pdf" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.548, + 0.482, + 0.576 + ], + "angle": 0, + "content": "[11] Zihan Chen, Lei Nico Zheng, Cheng Lu, Jialu Yuan, and Di Zhu. 2023. ChatGPT Informed Graph Neural Network for Stock Movement Prediction. Available at SSRN 4464002 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.577, + 0.482, + 0.636 + ], + "angle": 0, + "content": "[12] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, Yuong Luo, and Enhong Chen. 2025. InstrucTime: Advancing Time Series Classification with Multimodal Language Modeling. In Proceedings of the Eighteenth ACM International Conference on Web Search and Data Mining (Hannover, Germany) (WSDM '25). Association for Computing Machinery, New York, NY, USA, 792-800. doi:10.1145/3701551.3703499" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.637, + 0.482, + 0.667 + ], + "angle": 0, + "content": "[13] Joon Son Chung, Arsha Nagrani, and Andrew Zisserman. 2018. VoxCeleb2: Deep Speaker Recognition. In Interspeech 2018 (Interspeech Proceedings). ISCA. doi:10.21437/Interspeech.2018-1929" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.667, + 0.482, + 0.697 + ], + "angle": 0, + "content": "[14] Helena Cousijn, Patricia Feeney, Daan Lowenberg, Elisa Presani, and Natasha Simons. 2019. A data citation roadmap for scholarly data repositories. *Scientific Data* 6, 1 (2019), 28." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.698, + 0.482, + 0.737 + ], + "angle": 0, + "content": "[15] Mayank Daswani, Mathias MJ Bellaiche, Marc Wilson, Desislav Ivanov, Mikhail Papkov, Eva Schnider, Jing Tang, Kay Lamerigts, Gabriela Botea, Michael A Sanchez, et al. 2024. Plots Unlock Time-Series Understanding in Multimodal Models. arXiv preprint arXiv:2410.02637 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.738, + 0.482, + 0.788 + ], + "angle": 0, + "content": "[16] Iman Deznabi, Mohit Iyyer, and Madalina Fiterau. 2021. Predicting in-hospital mortality by combining clinical notes with time-series data. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, Online, 4026-4031. doi:10.18653/v1/2021-findings-acl.352" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.789, + 0.482, + 0.838 + ], + "angle": 0, + "content": "[17] Zihan Dong, Xinyu Fan, and Zhiyuan Peng. 2024. FNSPID: A Comprehensive Financial News Dataset in Time Series. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (Barcelona, Spain) (KDD '24). Association for Computing Machinery, New York, NY, USA, 4918-4927. doi:10.1145/3637528.3671629" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.839, + 0.482, + 0.888 + ], + "angle": 0, + "content": "[18] Vijay Ekambaram, Kushagra Manglik, Sumanta Mukherjee, Surya Shravan Kumar Sajja, Satyam Dwivedi, and Vikas Raykar. 2020. Attention based multimodal new product sales time-series forecasting. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining. 3110-3118." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.124, + 0.483, + 0.888 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.11, + 0.914, + 0.14 + ], + "angle": 0, + "content": "[19] Jie Feng, Yuwei Du, Tianhui Liu, Siqi Guo, Yuming Lin, and Yong Li. 2024. Citygpt: Empowering urban spatial cognition of large language models. arXiv preprint arXiv:2406.13948 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.141, + 0.914, + 0.18 + ], + "angle": 0, + "content": "[20] Stefan Feuerriegel, Dennis Frauen, Valentyn Melnychuk, Jonas Schweisthal, Konstantin Hess, Alicia Curth, Stefan Bauer, Niki Kilbertus, Isaac S Kohane, and Mihaela van der Schaar. 2024. Causal machine learning for predicting treatment outcomes. Nature Medicine 30, 4 (2024), 958-968." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.181, + 0.914, + 0.211 + ], + "angle": 0, + "content": "[21] Mohammad Fraiwan, Luay Fraiwan, Basheer Khassawneh, and Ali Ibnian. 2021. A dataset of lung sounds recorded from the chest wall using an electronic stethoscope. Data in Brief 35, 106913. doi:10.1016/j.dib.2021.106913" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.212, + 0.914, + 0.242 + ], + "angle": 0, + "content": "[22] Alessandro T. Gifford, Kshitij Dwivedi, Gemma Roig, and Radoslaw M. Cichy. 2022. A large and rich EEG dataset for modeling human visual object recognition. NeuroImage 264 (2022), 119754. doi:10.1016/j.neuroimage.2022.119754" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.242, + 0.914, + 0.28 + ], + "angle": 0, + "content": "[23] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In Proceedings of the AAAI conference on artificial intelligence, Vol. 33, 922-929." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.281, + 0.914, + 0.312 + ], + "angle": 0, + "content": "[24] Xusen Guo, Qiming Zhang, Junyue Jiang, Mingxing Peng, Meixin Zhu, and Hao Frank Yang. 2024. Towards explainable traffic flow prediction with large language models. Communications in Transportation Research 4 (2024), 100150." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.313, + 0.914, + 0.35 + ], + "angle": 0, + "content": "[25] Paul Hager, Martin J Menten, and Daniel Rueckert. 2023. Best of both worlds: Multimodal contrastive learning with tabular and imaging data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 23924-23935." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.351, + 0.914, + 0.432 + ], + "angle": 0, + "content": "[26] Nora Hollenstein, Marius Troendle, Ce Zhang, and Nicolas Langer. 2020. ZuCo 2.0: A Dataset of Physiological Recordings During Natural Reading and Annotation. In Proceedings of the Twelfth Language Resources and Evaluation Conference, Nicoletta Calzolari, Frédéric Béchet, Philippe Blache, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, and Stelios Piperidis (Eds.). European Language Resources Association, Marseille, France, 138-146. https://aclanthology.org/2020.lrec-1.18/" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.433, + 0.914, + 0.462 + ], + "angle": 0, + "content": "[27] Shi Bin Hoo, Samuel Müller, David Salinas, and Frank Hutter. 2025. The tabular foundation model TabPFN outperforms specialized time series forecasting models based on simple features. arXiv preprint arXiv:2501.02945 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.463, + 0.914, + 0.493 + ], + "angle": 0, + "content": "[28] Kexin Huang, Jaan Altsoaar, and Rajesh Ranganath. 2020. Clinical-BERT: Modeling Clinical Notes and Predicting Hospital Readmission. arXiv:1904.05342 [cs.CL] https://arxiv.org/abs/1904.05342" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.494, + 0.914, + 0.533 + ], + "angle": 0, + "content": "[29] Jiahao Ji, Jingyuan Wang, Chao Huang, Junjie Wu, Boren Xu, Zhenhe Wu, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal self-supervised learning for traffic flow prediction. In Proceedings of the AAAI conference on artificial intelligence, Vol. 37. 4356-4364." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.534, + 0.914, + 0.573 + ], + "angle": 0, + "content": "[30] Furong Jia, Kevin Wang, Yixiang Zheng, Defu Cao, and Yan Liu. 2024. GPT4MTS: Prompt-based Large Language Model for Multimodal Time-series Forecasting. Proceedings of the AAI Conference on Artificial Intelligence 38, 21 (Mar. 2024), 23343-23351. doi:10.1609/aaaai.v38i21.30383" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.574, + 0.914, + 0.613 + ], + "angle": 0, + "content": "[31] Yushan Jiang, Wenchao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2025. Explainable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv:2503.01013 [cs.LG] https://arxiv.org/abs/2503.01013" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.614, + 0.914, + 0.654 + ], + "angle": 0, + "content": "[32] Bo Jin, Haoyu Yang, Leilei Sun, Chuanren Liu, Yue Qu, and Jianing Tong. 2018. A treatment engine by predicting next-period prescriptions. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 1608-1616." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.655, + 0.914, + 0.703 + ], + "angle": 0, + "content": "[33] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y. Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, and Qingsong Wen. 2024. Time-LLM: Time Series Forecasting by Reprogramming Large Language Models. In The Twelfth International Conference on Learning Representations. https://openreview.net/forum?id=Unb5CVPtae" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.704, + 0.914, + 0.723 + ], + "angle": 0, + "content": "[34] Alistair Johnson, Lucas Bulgarelli, Tom Pollard, Steven Horng, Leo Anthony Celi, and Roger Mark. 2021. MIMIC-IV (version 1.0). doi:10.13026/s6n6-xd98" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.725, + 0.914, + 0.764 + ], + "angle": 0, + "content": "[35] Alistair E. W. Johnson, Tom J. Pollard, Lu Shen, Li-wei H. Lehman, Mengling Feng, Mohammad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G. Mark. 2016. MIMIC-III, a freely accessible critical care database. Scientific Data 3 (2016), 160035." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.765, + 0.914, + 0.824 + ], + "angle": 0, + "content": "[36] Swaraj Khadanga, Karan Aggarwal, Shafiq Joty, and Jaideep Srivastava. 2019. Using Clinical Notes with Time Series Data for ICU Management. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IFCNLP). Association for Computational Linguistics, Hong Kong, China, 6432-6437. doi:10.18653/v1/D19-1678" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.825, + 0.914, + 0.865 + ], + "angle": 0, + "content": "[37] Kai Kim, Howard Tsai, Rajat Sen, Abhimanyu Das, Zihao Zhou, Abhishek Tanpure, Mathew Luo, and Rose Yu. 2024. Multi-Modal Forecaster: Jointly Predicting Time Series and Textual Data. arXiv:2411.06735 [cs.AI] https://arxiv.org/abs/2411.06735" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.866, + 0.914, + 0.895 + ], + "angle": 0, + "content": "[38] Yaxuan Kong, Yiyuan Yang, Yoontae Hwang, Wenjie Du, Stefan Zohren, Zhangyang Wang, Ming Jin, and Qingsong Wen. 2025. Time-MQA: Time Series Multi-Task Question Answering with Context Enhancement." + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.11, + 0.914, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.857, + 0.076, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Jiang, et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.11, + 0.389, + 0.12 + ], + "angle": 0, + "content": "arXiv:2503.01875 [cs.CL] https://arxiv.org/abs/2503.01875" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.12, + 0.482, + 0.16 + ], + "angle": 0, + "content": "[39] Yaxuan Kong, Yiyuan Yang, Shiyu Wang, Chenghao Liu, Yuxuan Liang, Ming Jin, Stefan Zohren, Dan Pei, Yan Liu, and Qingsong Wen. 2025. Position: Empowering Time Series Reasoning with Multimodal LLMs. arXiv preprint arXiv:2502.01477 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.16, + 0.482, + 0.19 + ], + "angle": 0, + "content": "[40] Dongjie Wang Chengyuan Deng Reon Matsuoka Lecheng Zheng, Zhengzhang Chen and Haifeng Chen. 2024. LEMMA-RCA: A Large Multi-modal Multi-domain Dataset for Root Cause Analysis. arXiv:2406.05375 [cs.AI]" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.19, + 0.482, + 0.221 + ], + "angle": 0, + "content": "[41] Geon Lee, Wenchao Yu, Wei Cheng, and Haifeng Chen. 2024. MoAT: Multi-Modal Augmented Time Series Forecasting. https://openreview.net/forum?id=uRXxnoqDHH" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.221, + 0.482, + 0.251 + ], + "angle": 0, + "content": "[42] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. In AAAI." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.251, + 0.482, + 0.291 + ], + "angle": 0, + "content": "[43] Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan So, and Jaewoo Kang. 2019. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics (Oxford, England) 36 (09 2019). doi:10.1093/bioinformatics/btz682" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.291, + 0.482, + 0.322 + ], + "angle": 0, + "content": "[44] Haoran Li, Junqi Liu, Zexian Wang, Shiuyuan Luo, Xiaowei Jia, and Huaxiu Yao. 2024. LITE: Modeling Environmental Ecosystems with Multimodal Large Language Models. arXiv preprint arXiv:2404.01165 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.322, + 0.482, + 0.352 + ], + "angle": 0, + "content": "[45] Jun Li, Che Liu, Sibo Cheng, Rossella Arcucci, and Shenda Hong. 2023. Frozen Language Model Helps ECG Zero-Shot Learning. In Medical Imaging with Deep Learning." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.352, + 0.482, + 0.382 + ], + "angle": 0, + "content": "[46] Zekun Li, Shiyang Li, and Xifeng Yan. 2023. Time series as images: Vision transformer for irregularly sampled time series. Advances in Neural Information Processing Systems 36 (2023), 49187-49204." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.382, + 0.482, + 0.422 + ], + "angle": 0, + "content": "[47] Zihao Li, Xiao Lin, Zhining Liu, Jiaru Zou, Ziwei Wu, Lecheng Zheng, Dongqi Fu, Yada Zhu, Hendrik Hamann, Hanghang Tong, and Jingrui He. 2025. Language in the Flow of Time: Time-Series-Paired Texts Weaved into a Unified Temporal Narrative. arXiv:2502.08942 [cs.LG] https://arxiv.org/abs/2502.08942" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.422, + 0.482, + 0.462 + ], + "angle": 0, + "content": "[48] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. UrbanGPT: Spatio-Temporal Large Language Models. ArXiv abs/2403.00813 (2024). https://api(semanticscholar.org/CorpusID:268230972" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.462, + 0.482, + 0.493 + ], + "angle": 0, + "content": "[49] Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency. 2024. Foundations & trends in multimodal machine learning: Principles, challenges, and open questions. Comput. Surveys 56, 10 (2024), 1-42." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.493, + 0.482, + 0.533 + ], + "angle": 0, + "content": "[50] Minhua Lin, Zhengzhang Chen, Yanchi Liu, Xujiang Zhao, Zongyu Wu, Junxiang Wang, Xiang Zhang, Suhang Wang, and Haifeng Chen. 2024. Decoding Time Series with LLMs: A Multi-Agent Framework for Cross-Domain Annotation. arXiv:2410.17462 [cs.AI] https://arxiv.org/abs/2410.17462" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.533, + 0.482, + 0.563 + ], + "angle": 0, + "content": "[51] Chenxi Liu, Qianxiong Xu, Hao Miao, Sun Yang, Lingzheng Zhang, Cheng Long, Ziyue Li, and Rui Zhao. 2025. TimeCMA: Towards LLM-Empowered Multivariate Time Series Forecasting via Cross-Modality Alignment. In AAAI." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.563, + 0.482, + 0.613 + ], + "angle": 0, + "content": "[52] Hanwen Liu, Daniel Hajialigol, Benny Antony, Aiguo Han, and Xuan Wang. 2024. EEG2Text: Open Vocabulary EEG-to-Text Translation with Multi-View Transformer. In 2024 IEEE International Conference on Big Data (BigData). IEEE Computer Society, Los Alamitos, CA, USA, 1824-1833. doi:10.1109/ BigData62323.2024.10825980" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.613, + 0.482, + 0.674 + ], + "angle": 0, + "content": "[53] Haoxin Liu, Shangqing Xu, Zhiyuan Zhao, Lingkai Kong, Harshavardhan Kamarthi, Aditya B. Sasanur, Megha Sharma, Jiaming Cui, Qingsong Wen, Chao Zhang, and B. Aditya Prakash. 2024. Time-MMD: Multi-Domain Multimodal Dataset for Time Series Analysis. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. https://openreview.net/forum?id=fuD0h4R1IL" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.674, + 0.482, + 0.703 + ], + "angle": 0, + "content": "[54] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.703, + 0.482, + 0.755 + ], + "angle": 0, + "content": "[55] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. UniTime: A Language-Empowered Unified Model for Cross-Domain Time Series Forecasting. In Proceedings of the ACM Web Conference 2024 (Singapore, Singapore) (WWW'24). Association for Computing Machinery, New York, NY, USA, 4095-4106. doi:10.1145/3589334.3645434" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.755, + 0.482, + 0.794 + ], + "angle": 0, + "content": "[56] Xin Liu, Daniel McDuff, Geza Kovacs, Isaac Galatzer-Levy, Jacob Sunshine, Jiening Zhan, Ming-Zher Poh, Shun Liao, Paolo Di Achille, and Shwetak Patel. 2023. Large Language Models are Few-Shot Health Learners. arXiv preprint arXiv:2305.15525 (2023). https://arxiv.org/abs/2305.15525" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.794, + 0.482, + 0.825 + ], + "angle": 0, + "content": "[57] Jingchao Ni, Ziming Zhao, ChengAo Shen, Hanghang Tong, Dongjin Song, Wei Cheng, Dongsheng Luo, and Haifeng Chen. 2025. Harnessing Vision Models for Time Series Analysis: A Survey. arXiv preprint arXiv:2502.08869 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.825, + 0.482, + 0.855 + ], + "angle": 0, + "content": "[58] Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. 2023. A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. In The Eleventh International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.855, + 0.482, + 0.896 + ], + "angle": 0, + "content": "[59] Kanghui Ning, Zijie Pan, Yu Liu, Yushan Jiang, James Y. Zhang, Kashif Rasul, Anderson Schneider, Lintao Ma, Yuriy Nevmvaka, and Dongjin Song. 2025. TS-RAG: Retrieval-Augmented Generation based Time Series Foundation Models are Stronger Zero-Shot Forecaster. arXiv:2503.07649 [cs.LG] https://arxiv.org/" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.11, + 0.482, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.55, + 0.11, + 0.622, + 0.12 + ], + "angle": 0, + "content": "abs/2503.07649" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.12, + 0.913, + 0.16 + ], + "angle": 0, + "content": "[60] K. Niu, K. Zhang, X. Peng, Y. Pan, and N. Xiao. 2023. Deep Multi-Modal Intermediate Fusion of Clinical Record and Time Series Data in Mortality Prediction. Frontiers in Molecular Biosciences 10 (2023), 1136071. doi:10.3389/fmolb.2023.1136071" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.16, + 0.913, + 0.202 + ], + "angle": 0, + "content": "[61] Zijie Pan, Yushan Jiang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. \\( S^2 \\)IP-LLM: Semantic Space Informed Prompt Learning with LLM for Time Series Forecasting. In Forty-first International Conference on Machine Learning." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.202, + 0.913, + 0.232 + ], + "angle": 0, + "content": "[62] Vinay Prithyani, Mohsin Mohammed, Richa Gadgil, Ricardo Buitrago, Vinija Jain, and Aman Chadha. 2024. On the Feasibility of Vision-Language Models for Time-Series Classification. arXiv preprint arXiv:2412.17304 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.232, + 0.913, + 0.272 + ], + "angle": 0, + "content": "[63] Yao Qin, Dongjin Song, Haifeng Cheng, Wei Cheng, Guofei Jiang, and Garrison W Cottrell. 2017. A dual-stage attention-based recurrent neural network for time series prediction. In Proceedings of the 26th International Joint Conference on Artificial Intelligence, 2627-2633." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.272, + 0.913, + 0.302 + ], + "angle": 0, + "content": "[64] Hadi Rezaei, Hamidreza Faaljou, and Gholamreza Mansourfar. 2021. Stock price prediction using deep learning and frequency decomposition. Expert Systems with Applications 169 (2021), 114332." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.302, + 0.913, + 0.373 + ], + "angle": 0, + "content": "[65] Bruno M Rocha, Dimitris Filos, Luis Mendes, Gorkem Serbes, Sezer Ulukaya, Yasemin P Kahya, Niksa Jakovljevic, Tatjana L Turukalo, Ioannis M Vogiatzis, Eleni Perantoni, Evangelos Kaimakamis, Pantelis Natsivas, Ana Oliveira, Cristina Jacome, Alda Marques, Nicos Maglaveras, Rui Pedro Paiva, Ioanna Chouvarda, and Paulo de Carvalho. 2019. An open access database for the evaluation of respiratory sound classification algorithms. Physiological Measurement 40, 3 (2019), 035001." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.373, + 0.913, + 0.422 + ], + "angle": 0, + "content": "[66] Ludan Ruan, Yiyang Ma, Huan Yang, Huiguo He, Bei Liu, Jianlong Fu, Nicholas Jing Yuan, Qin Jin, and Baining Guo. 2023. Mm-diffusion: Learning multi-modal diffusion models for joint audio and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10219-10228." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.422, + 0.913, + 0.483 + ], + "angle": 0, + "content": "[67] Ramit Sawhney, Shivam Agarwal, Arnav Wadhwa, and Rajiv Ratn Shah. 2020. Deep Attentive Learning for Stock Movement Prediction From Social Media Text and Company Correlations. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, Online, 8415-8426. doi:10.18653/v1/2020.emnlp-main.676" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.483, + 0.913, + 0.524 + ], + "angle": 0, + "content": "[68] ChengAo Shen, Zhengzhang Chen, Dongsheng Luo, Dongkuan Xu, Haifeng Chen, and Jingchao Ni. 2024. Exploring Multi-Modal Integration with Tool-Augmented LLM Agents for Precise Causal Discovery. arXiv:2412.13667 [cs.LG] https://arxiv.org/abs/2412.13667" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.524, + 0.913, + 0.564 + ], + "angle": 0, + "content": "[69] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. 2022. Learning Audio-Visual Speech Representation by Masked Multimodal Cluster Prediction. In International Conference on Learning Representations. https://openreview.net/forum?id=Z1Qlm11uOM" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.564, + 0.913, + 0.595 + ], + "angle": 0, + "content": "[70] Geri Skenderi, Christian Joppi, Matteo Denitto, and Marco Cristani. 2024. Well googled is half done: Multimodal forecasting of new fashion product sales with image-based google trends. Journal of Forecasting 43, 6 (2024), 1982-1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.595, + 0.913, + 0.624 + ], + "angle": 0, + "content": "[71] Patrick Wagner, Nils Strothhoff, Ralf-Dieter Bousseljot, Dieter Kreiseler, Fatima I Lunze, Wojciech Samek, and Tobias Schaeffer. 2020. PTB-XL, a large publicly available electrocardiography dataset. Scientific Data 7, 1 (2020), 154." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.624, + 0.913, + 0.665 + ], + "angle": 0, + "content": "[72] Jiahao Wang, Mingyue Cheng, Qingyang Mao, Yitong Zhou, Feiyang Xu, and Xin Li. 2025. TableTime: Reformulating Time Series Classification as Training-Free Table Understanding with Large Language Models. arXiv:2411.15737 [cs.AI] https://arxiv.org/abs/2411.15737" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.664, + 0.913, + 0.735 + ], + "angle": 0, + "content": "[73] Xinlei Wang, Maike Feng, Jing Qiu, JINJIN GU, and Junhua Zhao. 2024. From News to Forecast: Integrating Event Analysis in LLM-Based Time Series Forecasting with Reflection. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 58118-58153. https://proceedings.neurips.cc/paper_files/paper/2024/file/6aef8bffb372096ee73d98da30119f89-Paper-Conference.pdf" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.735, + 0.913, + 0.795 + ], + "angle": 0, + "content": "[74] Xiaochen Wang, Junyu Luo, Jiaqi Wang, Ziyi Yin, Suhan Cui, Yuan Zhong, Yaqing Wang, and Fenglong Ma. 2023. Hierarchical Pretraining on Multimodal Electronic Health Records. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, Singapore, 2839-2852. doi:10.18653/v1/2023.emnlp-main.171" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.795, + 0.913, + 0.836 + ], + "angle": 0, + "content": "[75] Zhenhailong Wang and Heng Ji. 2021. Open Vocabulary Electroencephalography-To-Text Decoding and Zero-shot Sentiment Classification. In AAAI Conference on Artificial Intelligence. https://apisemantic scholar.org/CorpusID:244909027" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.836, + 0.913, + 0.876 + ], + "angle": 0, + "content": "[76] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models (NIPS '22). Curran Associates Inc., Red Hook, NY, USA, Article 1800, 14 pages." + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.11, + 0.913, + 0.876 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.075, + 0.355, + 0.087 + ], + "angle": 0, + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.109, + 0.484, + 0.16 + ], + "angle": 0, + "content": "[77] Andrew Robert Williams, Arjun Ashok, Étienne Marcotte, Valentina Zantedeschi, Jithendarraa Subramanian, Roland Riachi, James Requeima, Alexandre Lacoste, Irina Rish, Nicolas Chapados, et al. 2024. Context is key: A benchmark for forecasting with essential textual information. arXiv preprint arXiv:2410.18959 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.16, + 0.482, + 0.191 + ], + "angle": 0, + "content": "[78] Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. 2023. TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis. In The Eleventh International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.191, + 0.482, + 0.24 + ], + "angle": 0, + "content": "[79] Huizhe Wu, Wei Zhang, Weiwei Shen, and Jun Wang. 2018. Hybrid Deep Sequential Modeling for Social Text-Driven Stock Prediction. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (Torino, Italy) (CIKM '18). Association for Computing Machinery, New York, NY, USA, 1627–1630. doi:10.1145/3269206.3269290" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.241, + 0.482, + 0.281 + ], + "angle": 0, + "content": "[80] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, 753-763." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.281, + 0.482, + 0.312 + ], + "angle": 0, + "content": "[81] Qianqian Xie, Weiguang Han, Yanzhao Lai, Min Peng, and Jimin Huang. 2023. The wall street neophyte: A zero-shot analysis of chatgpt over multimodal stock movement prediction challenges. arXiv preprint arXiv:2304.05351 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.312, + 0.482, + 0.352 + ], + "angle": 0, + "content": "[82] Zhikai Xing and Yigang He. 2023. Multi-modal information analysis for fault diagnosis with time-series data from power transformer. International Journal of Electrical Power & Energy Systems 144 (2023), 108567. doi:10.1016/j.ijepes.2022.108567" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.352, + 0.482, + 0.383 + ], + "angle": 0, + "content": "[83] Haojun Xu, Yan Gao, Zheng Hui, Jie Li, and Xinbo Gao. 2023. Language Knowledge-Assisted Representation Learning for Skeleton-Based Action Recognition. arXiv:2305.12398 [cs.CV] https://arxiv.org/abs/2305.12398" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.383, + 0.482, + 0.432 + ], + "angle": 0, + "content": "[84] Yumo Xu and Shay B. Cohen. 2018. Stock Movement Prediction from Tweets and Historical Prices. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Iryna Gurevych and Yusuke Miyao (Eds.). Association for Computational Linguistics, Melbourne, Australia, 1970-1979. doi:10.18653/v1/P18-1183" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.433, + 0.482, + 0.472 + ], + "angle": 0, + "content": "[85] Bo Yang and Lijun Wu. 2021. How to Leverage the Multimodal EHR Data for Better Medical Prediction?. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics, 4029-4038. doi:10.18653/v1/2021.emnlp-main.329" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.472, + 0.482, + 0.502 + ], + "angle": 0, + "content": "[86] Haiyang Yang, Li Kuang, and FengQiang Xia. 2021. Multimodal Temporal-Clinical Note Network for Mortality Prediction. Journal of Biomedical Semantics 12, 1 (2021), 1-14. doi:10.1186/s13326-021-00235-3" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.503, + 0.482, + 0.543 + ], + "angle": 0, + "content": "[87] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. 2023. Tree of Thoughts: Deliberate Problem Solving with Large Language Models. In Thirty-seventh Conference on Neural Information Processing Systems. https://openreview.net/forum?id=5Xc1ecxO1h" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.543, + 0.482, + 0.583 + ], + "angle": 0, + "content": "[88] Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. 2024. Frequency-domain MLPs are more effective learners in time series forecasting. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.583, + 0.482, + 0.633 + ], + "angle": 0, + "content": "[89] Xinli Yu, Zheng Chen, and Yanbin Lu. 2023. Harnessing LLMs for Temporal Data - A Study on Explainable Financial Time Series Forecasting. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, Mingxuan Wang and Imed Zitouni (Eds.). Association for Computational Linguistics, Singapore, 739-753. doi:10.18653/v1/2023.emnlp-industry.69" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.633, + 0.482, + 0.673 + ], + "angle": 0, + "content": "[90] Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. 2023. SpeechGPT: Empowering Large Language Models with Intrinsic Cross-Modal Conversational Abilities. arXiv:2305.11000 [cs.CL] https://arxiv.org/abs/2305.11000" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.673, + 0.482, + 0.703 + ], + "angle": 0, + "content": "[91] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.703, + 0.482, + 0.743 + ], + "angle": 0, + "content": "[92] Liheng Zhang, Charu Aggarwal, and Guo-Jun Qi. 2017. Stock price prediction via discovering multi-frequency trading patterns. In Proceedings of the 23rd ACM SIGKDD international conference on knowledge discovery and data mining. 2141-2149." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.744, + 0.482, + 0.784 + ], + "angle": 0, + "content": "[93] Xiyue Zhang, Chao Huang, Yong Xu, Lianghao Xia, Peng Dai, Liefeng Bo, Junbo Zhang, and Yu Zheng. 2021. Traffic flow forecasting with spatial-temporal graph diffusion network. In Proceedings of the AAAI conference on artificial intelligence, Vol. 35. 15008-15015." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.784, + 0.482, + 0.824 + ], + "angle": 0, + "content": "[94] Xiang Zhang, Lina Yao, Manqing Dong, Zhe Liu, Yu Zhang, and Yong Li. 2020. Adversarial representation learning for robust patient-independent epileptic seizure detection. IEEE journal of biomedical and health informatics 24, 10 (2020), 2852-2859." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.824, + 0.482, + 0.855 + ], + "angle": 0, + "content": "[95] Yuwei Zhang, Tong Xia, Aaqib Saeed, and Cecilia Mascolo. 2024. RespLLM: Unifying Audio and Text with Multimodal LLMs for Generalized Respiratory Health Prediction. arXiv:2410.05361 [cs.LG] https://arxiv.org/abs/2410.05361" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.855, + 0.482, + 0.885 + ], + "angle": 0, + "content": "[96] Xiaohu Zhao, Kebin Jia, Benjamin Letcher, Jennifer Fair, Yiqun Xie, and Xiaowei Jia. 2022. VIMTS: Variational-based Imputation for Multi-modal Time Series. In 2022 IEEE International Conference on Big Data (Big Data). IEEE, 349-358." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.109, + 0.484, + 0.885 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.109, + 0.915, + 0.14 + ], + "angle": 0, + "content": "[97] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. MU-LAN: multi-modal causal structure learning and root cause analysis for microservice systems. In Proceedings of the ACM Web Conference 2024. 4107-4116." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.141, + 0.914, + 0.18 + ], + "angle": 0, + "content": "[98] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv:2502.04395 [cs.CV] https://arxiv.org/abs/2502.04395" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.18, + 0.914, + 0.201 + ], + "angle": 0, + "content": "[99] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.52, + 0.201, + 0.914, + 0.232 + ], + "angle": 0, + "content": "[100] Jiaxin Zhuang, Leon Yan, Zhenwei Zhang, Ruiqi Wang, Jiawei Zhang, and Yuantao Gu. 2024. See it, Think it, Sorted: Large Multimodal Models are Few-shot Time Series Anomaly Analyzers. arXiv preprint arXiv:2411.02465 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.52, + 0.109, + 0.915, + 0.232 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_origin.pdf b/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d6fe27aa196e1d70427bed19f6c771dc3a863e54 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/a5bb8084-4bf7-4b72-9901-fbf46d3fc4b9_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6900cb576a40e7648986449cbfdf1928060520eeb3993c42a22e72b494e9d04 +size 743913 diff --git a/data/2025/2503_13xxx/2503.13709/full.md b/data/2025/2503_13xxx/2503.13709/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5df27738988ef5b148d8211a1f5deb8543ed7942 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/full.md @@ -0,0 +1,352 @@ +# Multi-modal Time Series Analysis: A Tutorial and Survey + +Yushan Jiang $^{1*}$ , Kanghui Ning $^{1*}$ , Zijie Pan $^{1*}$ , Xuyang Shen $^{1}$ , Jingchao Ni $^{2}$ , Wenchao Yu $^{4}$ , Anderson Schneider $^{3}$ , Haifeng Chen $^{4\dagger}$ , Yuriy Nevmyvaka $^{3\dagger}$ , Dongjin Song $^{1\dagger}$ + +1University of Connecticut 2University of Houston + +$^{3}$ Morgan Stanley $^{4}$ NEC Laboratories America + +# Abstract + +Multi-modal time series analysis has recently emerged as a prominent research area in data mining, driven by the increasing availability of diverse data modalities, such as text, images, and structured tabular data from real-world sources. However, effective analysis of multi-modal time series is hindered by data heterogeneity, modality gap, misalignment, and inherent noise. Recent advancements in multi-modal time series methods have exploited the multi-modal context via cross-modal interactions based on deep learning methods, significantly enhancing various downstream tasks. In this tutorial and survey, we present a systematic and up-to-date overview of multi-modal time series datasets and methods. We first state the existing challenges of multi-modal time series analysis and our motivations, with a brief introduction of preliminaries. Then, we summarize the general pipeline and categorize existing methods through a unified cross-modal interaction framework encompassing fusion, alignment, and transference at different levels (i.e., input, intermediate, output), where key concepts and ideas are highlighted. We also discuss the real-world applications of multi-modal analysis for both standard and spatial time series, tailored to general and specific domains. Finally, we discuss future research directions to help practitioners explore and exploit multi-modal time series. The up-to-date resources are provided in the GitHub repository1. + +# Keywords + +Multi-modal Time Series Analysis, Foundation Model, Large Language Model, Deep Learning + +# 1 Introduction + +Time series analysis is a fundamental task in data mining, driven by the proliferation of sequential data exhibiting rich temporal dynamics across diverse real-world systems. With the advent of deep learning, various methods have been proposed to effectively model complex temporal relationships within time series [9, 58, 61, 63, 78, 80, 88], facilitating downstream tasks in diverse domains, including healthcare [20, 32, 94], finance [64, 92], transportation [23, 29, 93] and environmental sciences [7, 8]. + +In practice, time series are often associated with external contexts beyond their temporal dynamics [6, 77]. Such contexts are multi-modal, encompassing a variety of representations, such as texts [41, 73], images [18, 70], tables [6], and graphs [67], which carry rich semantic information for time series analysis. As such, incorporating the multi-modal contexts allows models to have a comprehensive view of underlying systems, capture subtle dependencies, and explain complex temporal behaviors more accurately. + +# Multi-modal Time Series Analysis + +# Background + +Challenges, Our Motivations, Preliminaries, etc. + +# Data, Methods & Applications + +Multi-modal Time Series Datas + +1. Modalities: Time Series, Text, Image, Tabular, Graph, etc. +2. Scope, Existing Datasets, Characteristics, Domain, etc. + +# Taxonomy of Multi-modal Time Series Methods + +1. Interaction Stage (Input, Intermediate, Output) +2. Interaction Strategy (Fusion, Alignment, Transference) +3. Specific Methods (Concatenate, Attention, Contrastive, Gating, etc.) + +# Domains & Tasks + +1. General, Finance, Healthcare, Traffic, Environment, etc. +2. Forecasting, Classification, Causal Discovery, Retrieval, etc. + +# Future Research Directions + +Reasoning, Decision Making, Generalization, Contextual Noise, + +Bias & Ethics + +Figure 1: The framework of our tutorial and survey. + +Effective analysis of multi-modal time series, however, is hindered by several key challenges in terms of data heterogeneity, modality gap and contextual relevance. First, different modalities exhibit distinct statistical properties, structures, and dimensionalities, leading to discrepancies in feature distributions and semantic meanings. For instance, while time series data is sequentially ordered with temporal dependencies, textual and image data contains rich contextual semantics and correlations. Aligning these heterogeneous data into a unified representation space is non-trivial. Second, the textual, tabular, or visual contexts may appear at different timesteps or granularities. Such temporal misalignment may impede meaningful cross-modal interactions. Third, real-world data is inevitably noisy with irrelevant information that may mislead correlation learning, resulting in suboptimal performance. For example, in finance, news articles related to stock market prediction often contain much redundant or speculative narratives that does not reflect actual market conditions. Therefore, the focus of multi-modal time series analysis is to effectively capture complementary and relevant information from multi-modal context and leverage it for predictive or analytical tasks. + +More recently, an increasing number of multi-modal methods have shown promise in exploiting contextual information from diverse data sources, which boosts performance in wide tasks ranging from forecasting [41, 51], classification [42, 45], anomaly detection [82] to retrieval [3] and causal discovery [68, 97]. Despite + +the promising results of multi-modal time series methods, they are tailored for their own tasks with domain-specific applications. The existing literature lacks a comprehensive and systematic review that provides a unified perspective on the underlying principles and pipelines for multi-modal time series learning. In this survey, we provide a systematic and up-to-date overview of existing methods for multi-modal time series analysis. As shown in Figure 1, we discuss the challenges, motivations, and preliminaries of multi-modal time series. Then we introduce the general pipeline for multi-modal time series analysis and propose three types of interactions for cross-modal modeling between time series and other modalities - fusion, alignment, and transference - at the input, intermediate and output level, respectively. We also discuss the applications of multi-modal time series across multiple domains. Furthermore, we provide Table 2 to comprehensively summarize representative methods, encapsulating the modalities, fine-grained cross-modal interactions, real-world domains and tasks. Finally, we highlight potential future research opportunities to further advance time series analysis with multi-modal data. In summary, the major contributions of our survey are: + +- We systematically catalog over 40 multi-modal time series methods with the corresponding open-source datasets. +- We uniquely categorize the existing methods into a unified cross-modal interaction framework, highlighting fusion, alignment, and transference at the input/intermediate/output levels. +- We discuss real-world applications of multi-modal time series and identify promising future directions, encouraging researchers and practitioners to explore and exploit multi-modal time series. + +# 2 Background and Our Scope + +# 2.1 Multi-modal Machine Learning + +Recent advancements in multi-modal machine learning have significantly enhanced models' ability to process and integrate data from diverse modalities, such as language, acoustic, vision, and tabular data [25, 66, 91]. With the development of deep learning architectures and sophisticated interaction designs, models are able to learn, infer, and reason by integrating multiple communicative modalities. Current research in multi-modal machine learning spans multiple key areas, including (1) representing multi-modal data to encode joint and individual characteristics, (2) identifying interconnections between modality elements, (3) transferring knowledge across modalities, and (4) theoretically and empirically analyzing the underlying learning process in a quantitative manner. We refer the audiences to the recent surveys [2, 49] for a more detailed overview of general multi-modal machine learning research. Building upon these advancements, we investigate multi-modal time series analysis with a focus on modeling temporal dependencies and leveraging the data interactions across heterogeneous modalities for predictive and analytical tasks. + +# 2.2 Multi-modal Time Series Analysis + +Multi-modal time series analysis aims to model time series data in combination with other complementary modalities. By leveraging cross-modal interactions, this approach yields deeper insights and more robust solutions for a wide range of predictive and analytical tasks across diverse real-world contexts. + +This survey aims to provide a unique and systematic perspective on effectively leveraging cross-modal interactions from relevant real-world contexts to advance multi-modal time series analysis, addressing both foundational principles and practical solutions. Our assessment is threefold: (1) reviewing multi-modal time series data (Section 3), (2) analyzing cross-modal interactions between time series and other modalities (Section 4), and (3) revealing the impact of multi-modal time series analysis in applications across diverse domains (Section 5). + +To resolve ambiguities, we define the scope of our survey by clarifying the types of time series considered and the criteria for multi-modal time series methods. First, we mainly consider standard time series and spatial time series. For the latter, spatial structures (often represented as graphs) are inherently paired with temporal data rather than treated as a separate modality. Second, we focus on methods that leverage multi-modal inputs from real-world contexts to provide complementary information, but for generation and retrieval tasks, the focus is more on transforming the input modality to another output modality. We acknowledge recent research on representing time series as a single modality (e.g., time series as images [15, 46, 62, 99, 100], time series as tabular data [27]) for downstream tasks. However, as these approaches are less relevant to our scope, we refer readers to their respective works. + +Besides, we would like to highlight the difference between our survey and recent related survey and position papers. Ni et al. [57] focuses on imaging-based transformations of time series and subsequent visual modeling techniques, where the discussion on multi-modal models is limited to those involving vision modalities. Kong et al. [39] concentrates on the use of multi-modal large language models (LLMs) for enhancing reasoning capabilities (e.g., causal reasoning, QA, planning, etc.) with multi-modal context. In contrast, our survey provides a broader and structured framework by delivering a systematic and unified perspective of multi-modal time series analysis, not limited to a specific modality or task type. + +# 3 Multi-modal Time Series Data + +# 3.1 Modalities in Multi-modal Time Series Data + +Multi-modal time series data often originate from diverse sources, each exhibiting unique characteristics that influence how they are processed and analyzed. Besides Time Series, i.e., continuous or discrete measurements recorded over time, such as sensor readings, financial metrics, or physiological signals, their modalities often include: 1) Tabular: Time-indexed records that are inherently organized in a tabular format, such as event logs, transaction records, or demographic information. 2) Text: Time-stamped or domain-specific textual information – like clinical notes, financial reports, news articles, or social media posts – that provides contextual or interpretative insights. 3) Image: Visual data acquired as images over time, such as photographs, medical images (e.g., X-rays, MRI), satellite imagery, or visual representations generated from time series data. 4) Graph: Relational data representing interactions or structural dependencies among entities that evolve. They are typically modeled as networks or graphs, where the connections may change dynamically. Although audio is widely studied as an independent modality in multi-modal research, we consider it a special + +Table 1: Representative open-source multi-modal time series datasets and across domains. + +
DomainDataset (Superscripts include the URLs to the datasets)Modalities
HealthcareMIMIC-III [35][1], MIMIC-IV [34][2]TS, Text, Tabular
ICBHI [65][3], Coswara [4][4], KAUH [21][5], PTB-XL [71][6], ZuCo [14, 26][7]TS, Text
Image-EEG [22][8]TS, Image
FinanceFNSPID [17][9], ACL18 [84][10], CIKM18 [79][11], DOW30 [11][12]TS, Text
Multi-domainTime-MMD [53][13], TimeCAP [42][14], NewsForecast [73][15], TTC [37][16], CiK [77][17], TSQA [38][18]TS, Text
RetailVISUELLE [70][19]TS, Image, Text
IoTLEMMA-RCA [40][20]TS, Text
SpeechLRS3 [1][21], VoxCeleb2 [13][22]TS (Audio), Image
TrafficNYC-taxi, NYC-bike [48][23]ST, Text
EnvironmentTerra [10][24]ST, Text
+ +form of time series in this survey and briefly discuss representative works within this scope. + +# 3.2 Common Datasets and Benchmarks + +Multi-modal time series datasets vary a lot and are domain-dependent, each with unique data characteristics and modalities. In Table 1 we provide representative datasets categorized by domain, along with their respective modalities: + +Healthcare: In this domain, physiological signals (e.g., ECG, EEG) are extensively analyzed alongside textual data such as clinical notes, patient demographics, and tabular data including vital signs and laboratory results. Common datasets include MIMIC-III [35], a comprehensive dataset containing electronic health records (EHRs) of ICU patients with physiological measurements, clinical notes, and diagnostic information, widely used for tasks like patient monitoring, mortality prediction, and clinical decision support. MIMIC-IV [34] is an extension of MIMIC-III, which provide detailed physiological signals, clinical narratives, medication records, and demographic data from a large population of critically ill patients, frequently utilized for predictive modeling, clinical outcome analysis, and health informatics research. Other notable healthcare datasets include ICBHI [65], which contains respiratory sound recordings paired with clinical annotations for respiratory disease classification; Coswara [4], which provides respiratory audio samples and rich metadata for COVID-19 detection tasks; KAUH [21], which comprises audio records and corresponding annotations for healthcare analytics; PTB-XL [71], a large-scale ECG dataset annotated with diagnostic labels for cardiac monitoring and diagnosis; ZuCo [14, 26], which consists of simultaneous EEG and textual data from reading comprehension tasks, being useful for cognitive neuroscience studies; and Image-EEG [22], which pairs EEG signals with images of objects on a natural background, aiding studies in visual neuroscience and computer vision. + +Finance: Datasets that combine time series data with financial news and reports are instrumental in financial analysis and modeling. Notable examples include ACL18 [84], CIKM18 [79], and DOW30 [11]. These datasets focus on high-trade-volume stocks from the U.S. stock markets, providing historical stock price data, such as opening, high, low, and closing prices; alongside related textual information, including tweets or financial news. Another + +large-scale dataset, FNSPID [17], consists of stock prices and time-aligned financial news records, covering over 4,000 companies from 1999 to 2023. + +Multi-domain: Datasets featuring general-purpose numerical time series combined with textual data are suitable for broad analytical applications. Examples include Time-MMD [53], which encompasses nine primary data domains: Agriculture, Climate, Economy, Energy, Environment, Health, Security, Social Good, and Traffic, while ensuring fine-grained alignment between time series and textual data; TimeCAP [42] compiles seven real-world time series datasets across three domains: weather, finance, and healthcare. To generate textual descriptions for each time series, a large language model (LLM) agent is employed, leveraging contextual information and domain-specific knowledge. NewsForecast [73] integrates task-specific time series data with verified public news reports across various domains, including finance, energy, traffic, and cryptocurrency; TTC [37] is a meticulously curated, time-aligned dataset designed for multimodal forecasting. It consists of paired time series and text data synchronized to timestamps, spanning two distinct domains: climate science and healthcare; CiK [77] is a dataset comprising 71 forecasting tasks across seven real-world domains. Each task necessitates the integration of both numerical data and textual information. The covered domains include Climatology, Economics, Energy, Mechanics, Public Safety, Transportation, and Retail. The TSQA [38] dataset consists of 200k question-answer pairs derived from time series data across 12 domains: healthcare, finance, energy, traffic, environment, IoT, nature, transport, human activities, machine sensors, AIOps, and the web. These QA pairs are designed to support five key tasks: forecasting, imputation, anomaly detection, classification, and open-ended reasoning. + +Other domains: Beyond the previously discussed major sectors, multi-modal time series analysis extends to various other domains. In Retail, datasets such as VISUELLE [70] integrate numerical sales data with product images and textual descriptions, facilitating thorough analyses of consumer behavior and inventory management. The Internet of Things (IoT) domain benefits from datasets such as LEMMA-RCA [40], which combine time series sensor data with textual metadata, enabling enhanced monitoring and more robust and secure methodologies that ensure the high performance of modern + +systems. In the Speech domain, datasets like LRS3 [1] and VoxCeleb2 [13] integrate audio recordings with corresponding visual data, supporting advancements in speech recognition and speaker identification technologies. In the Traffic domain, datasets like NYC-Taxi, NYC-Bike [48] contain spatial-temporal (ST) data alongside associated textual metadata. These integrations allow LLMs to effectively capture and utilize spatial-temporal contextual signals. In the Environment domain, Terra [10] collect 45 years of global geographic spatial-temporal data, supplemented with textual descriptions. + +# 4 Cross-modal Interactions with Time Series + +In this section, we conduct a detailed review of existing research on multi-modal time series analysis by thoroughly analyzing cross-modal interactions. We also elaborate how existing multi-modal methods are tailored for domain-specific applications in Section 5. The detailed taxonomy is provided in Table 2. + +We define three fundamental types of interactions between time series and other modalities, including fusion, alignment, and transference, which occur at different stages within a framework - input, intermediate (i.e., representations or intermediate outputs), and output. The representative examples are provided in Figure 2. + +# 4.1 Fusion + +Fusion refers to the process of integrating heterogeneous modalities in a way that captures complementary information across diverse sources to improve time series modeling. To fuse multi-modal inputs, a common practice is to directly integrate time series, tabular data and texts into a unified textual prompt, then use it to query LLMs for downstream tasks. This is typically facilitated by instruction fine-tuning for task-oriented analysis [19, 24, 38, 48, 56, 73, 90]. Some works also leverage the zero-shot reasoning and inference capability of pretrained LLMs (e.g., GPT-4 and its variants) [72, 81, 89]. Recent research efforts like TaTS [47] attempt to integrate paired text embedding as an additional variable of time series for temporal modeling, yielding competitive task performance. + +Most existing methods perform cross-modal fusion at the intermediate stage, such as adding and concatenating multi-modal representations, where each individual modal encoder first maps the raw data into a shared latent space. Addition combines time series and other modalities by summing up encoded representations, effectively blending shared information while preserving their interconnections in the latent space [6, 30, 51, 85, 95, 97]. On the other hand, concatenation stacks multi-modal representations along the same dimension, retaining modality-specific characteristics and allowing models to capture joint relationships between the modalities [16, 36, 36, 37]. To effectively leverage cross-modal information, existing methods often incorporate alignment designs after concatenating representations [5, 11, 12, 18, 31, 33, 41, 42, 44, 54, 55, 60, 69, 70, 74, 85, 96]. Alignment is also used in the aforementioned additions, which will be detailed in Section 4.2. + +When fusion is performed at the output level, different modalities contribute separately to the final output, allowing each modality to retain its unique predictive signal [31, 41, 42, 53]. Time-MMD [53] provides a paradigm that fuses predictions from both state-of-the-art forecasters and a pretrained language model with a projection + +![](images/dfc75126255eb54797d7990d9a20668b0a1b9ee7743a4acf0e3af423c98e1049.jpg) +Figure 2: Categorization of cross-modal interaction methods and representative examples. + +layer, in an end-to-end manner. MOAT [41] introduces a two-stage framework for multi-modal time series forecasting. In the first stage, the model is optimized to generate forecasts from decomposed time series and text embeddings. In the second stage, an offline synthesis via MLP is applied to dynamically fuse different components, yielding the final forecast based on their relative contributions. Beyond fusing outputs from a single model, TimeCAP [42] enhances performance by combining predictions from both a multi-modal predictor and a pretrained LLM, which synergizes the gradient-based method and LLM agents reasoning on real-world contexts. Output fusion gains advantage of design flexibility and robustness, but it may not fully utilize the complementary relationship between modalities without additional countermeasures. + +Cross-modal fusion relies on well-aligned multi-modal data for effective exploitation of the contextual information. However, ideally-aligned data may not be given in real-world scenarios. As such, existing methods also leverage alignment mechanisms to mitigate the challenge. + +# 4.2 Alignment + +Alignment ensures that the relationships between different modalities are preserved and semantically coherent when integrated into a unified learning framework. At the input level, we primarily refer alignment to data preprocessing techniques that aim at mitigating temporal misalignment caused by missing values, irregular sampling intervals, and differing granularities across modalities. This process is crucial for ensuring that data from multiple sources are properly synchronized before fusion, where domain knowledge is usually needed to handle such inconsistencies [10, 53, 73]. In addition, none of the existing methods we reviewed explicitly perform output alignment. However, the aforementioned output fusion can be easily adapted to alignment through the incorporation of a gating or attention mechanism that we will introduce shortly. + +Alignment at the intermediate stage plays a crucial role in multimodal interactions. We first introduce the alignment of multi-modal representations, spanning a range of techniques from model component design to learning objectives. The common component designs include self-attention [5, 12, 30, 33, 41, 42, 44, 54, 55, 69], cross-attention [6, 18, 51, 60, 70, 82, 85, 98] and gating mechanisms [82, 98]. Self-attention is often used to fuse multi-modal representations. It + +Table 2: Taxonomy of representative multi-modal time series methods. Modality refers to the different data modalities involved in each method. TS represents standard time series, $ST$ denotes spatial time series. The Method column lists the techniques used for each interaction, separated by semicolons, where each interaction may include one or more techniques, separated by commas. Superscripts in the Code column include the URLs to Github repositories. + +
MethodModalityDomainTaskCross-Modal InteractionLarge ModelYearCode
StageFusionAlign.Trans.Method
Time-MMD [53]TS, TextGeneralForecastingOutputXXAdditionMultiple2024Yes[1]
Wang et al. [73]TS, TextGeneralForecastingInput IntermediateXXPrompt Prompt; LLM ReasoningLLaMa2 GPT-4 Turbo2024Yes[2]
GPT4MTS [30]TS, TextGeneralForecastingIntermediateXAddition; Self-attentionGPT-22024No
TimeCMA [51]TS, TextGeneralForecastingInput IntermediateXXMeta-description Addition; Cross-attentionGPT-22025Yes[3]
MOAT [41]TS, TextGeneralForecastingIntermediate OutputXConcat.; Self-attention Offline Synthesis (MLP)S-Bert2024No
TimeCAP [42]TS, TextGeneralClassificationInput Intermediate OutputXXLLM Generation Concat.; Self-attention, Retrieval AdditionBert, GPT-42024No
TimeXL [31]TS, TextGeneralClassification ForecastingIntermediate OutputXConcat., Prompt; LLM Reasoning AdditionBert, S-Bert GPT-4o2025No
Hybrid-MMF [37]TS, TextGeneralForecastingIntermediateXXConcat.GPT-4o2024Yes[4]
Time-LLM [33]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionLLaMA, GPT-22024Yes[5]
Time-VLM [98]TS, Text, ImageGeneralForecastingInput IntermediateXXFeat. Imaging, Meta-description Addition; Gating, Cross-attentionViLT, CLIP BLIP-22025No
Unitime [55]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionGPT-22024Yes[6]
TESSA [50]TS, TextGeneralAnnotationIntermediatePrompt; RL; LLM GenerationGPT-4o2024No
InstruTime [12]TS, TextGeneralClassificationIntermediateXConcat.; Self-attentionGPT-22025Yes[7]
MATMCD [68]TS, Text, GraphGeneralCausal DiscoveryIntermediatePrompt; LLM Reasoning; SupervisionMultiple2025No
STG-LLM [54]ST, TextGeneralForecastingIntermediateXConcat.; Self-attentionGPT-22024No
TableTime [72]TS, TextGeneralClassificationInputXPrompt; ReformulateMultiple2024Yes[8]
ContextFormer [6]TS, TabularGeneralForecastingIntermediateXAddition; Cross-attentionNo2025No
Time-MQA [38]TS, TextGeneralMultipleInputXXPromptMultiple2025Yes[9]
MAN-SF [67]TS, Text, GraphFinanceClassificationIntermediateXBilinear; Graph ConvolutionUSE2020No
Bamford et al. [3]TS, Text, TS, ImageFinanceRetrievalIntermediate OutputXXSupervisionS-Bert2024No
Chen et al. [11]TS, Text, GraphFinanceClassificationIntermediateXXLLM Generation Concat.; Graph ConvolutionChatGPT2023No
Xie et al. [81]TS, TextFinanceClassificationInputXXPromptChatGPT2023No
Yu et al. [89]TS, TextFinanceForecastingInputXXPromptGPT-4, Open LLaMA2023No
MedTsLLM [5]TS, Text, TabularHealthcareMultipleIntermediateXConcat.; Self-attentionLlama22024Yes[10]
RespLLM [95]TS (Audio), TextHealthcareClassificationIntermediateXAddition, Self-attentionOpenBioLLM-8B2024No
METS [45]TS, TextHealthcareClassificationOutputXXContrastiveClinicalBert2023No
Wang et al. [75]TS, TextHealthcareClassificationIntermediateXXSupervisionBart, Bert, Roberta2021No
EEG2TEXT [52]TS, TextHealthcareGenerationOutputXXSelf-supervision, SupervisionBart2024No
MEDHMP [74]TS, TextHealthcareClassificationIntermediateXConcat.; Self-attention, ContrastiveClinicalT52023Yes[11]
Deznabi et al. [16]TS, TextHealthcareClassificationIntermediateXXConcat.Bio+Clinical Bert2021Yes[12]
Niu et al. [60]TS, TextHealthcareClassificationIntermediateXConcat.; Cross-attentionBioBERT2023No
Yang et al. [85]TS, TextHealthcareClassificationIntermediateXConcat.; Addition; GatingClinicalBERT2021Yes[13]
Liu et al. [56]TS, TextHealthcareClassification RegressionInputXXPromptPaLM2023Yes[14]
xTP-LLM [24]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionLlama2-7B-chat2024Yes[15]
UrbanGPT [48]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionVicuna-7B2024Yes[16]
CityGPT [19]ST, TextMobilityInputXXPromptMultiple2025Yes[17]
MULAN [97]TS, Text, GraphIoT Causal DiscoveryIntermediateAddition; Contrastive; SupervisionNo2024No
MIA [82]TS, ImageIoT Anomaly DetectionIntermediateXAddition; Cross-attention, GatingNo2023No
Ekambaram et al. [18]TS, Image, TextRetail ForecastingIntermediateXConcat.; Self & Cross-attentionNo2020Yes[18]
Skenderi et al. [70]TS, Image, TextRetail ForecastingIntermediateXConcat.; Cross-attentionNo2024Yes[19]
VIMTS [96]ST, Image EnvironmentImputationIntermediateXConcat.; SupervisionNo2022No
LTE [44]ST, Text, Image EnvironmentForecastingIntermediateXConcat.; Self-attentionLLaMA-2-7B2024Yes[20]
AV-HubERT [69]TS (Audio), Image SpeechClassificationIntermediateXConcat.; Self-attentionHuBert2022Yes[21]
SpeechGPT [90]TS(Audio), TextGeneration intermediateXConcat.; Self-attentionLLaMA-13B2023Yes[22]
LA-GCN [83]ST, TextClassificationIntermediateXSupervisionBert2023Yes[23]
+ +enables a joint and undirected alignment across all modalities by dynamically attending to important features. Given multi-modal embeddings $E_{\mathrm{mm}} \in \mathbb{R}^{n \times d}$ , where $n$ is the total number of modality tokens and $d$ is the embedding dimension, self-attention is computed as follows: + +$$ +\operatorname {A t t e n t i o n} \left(E _ {\mathrm {m m}}\right) = \operatorname {s o f t m a x} \left(\frac {Q K ^ {\top}}{\sqrt {d _ {k}}}\right) V +$$ + +where the queries $Q$ , keys $K$ , and values $V$ are linear projections of $E_{\mathrm{mm}}$ : $Q = E_{\mathrm{mm}}W_{Q}$ , $K = E_{\mathrm{mm}}W_{K}$ , $V = E_{\mathrm{mm}}W_{V}$ with learnable weights of dimensionality $d_{k}$ : $W_{Q}, W_{K}, W_{V} \in \mathbb{R}^{d \times d_{k}}$ . + +In cross-attention, time series serves as the query modality to get contextualized by other modalities, providing a directed alignment that ensures auxiliary modalities contribute relevant contextual information while preserving the temporal structure of time series. Given a query embedding $E_{\mathrm{ts}} \in \mathbb{R}^{n \times d}$ and that of auxiliary modalities $E_{c} \in \mathbb{R}^{n \times d}$ as keys and values: + +$$ +\text {C r o s s A t t e n t i o n} \left(E _ {\mathrm {t s}}, E _ {\mathrm {c}}\right) = \operatorname {s o f t m a x} \left(\frac {Q _ {\mathrm {t s}} K _ {\mathrm {c}} ^ {\top}}{\sqrt {d _ {k}}}\right) V _ {\mathrm {c}} +$$ + +where the query, key and value are denoted as $Q_{\mathrm{ts}} = E_{\mathrm{ts}}W_{Q}$ , $K_{\mathrm{c}} = E_{\mathrm{c}}W_{K}$ , $V_{\mathrm{c}} = E_{\mathrm{c}}W_{V}$ . Note that existing methods adopt multi-head attentions, which is omitted here for simplicity. + +Similarly, the gating mechanism is a parametric filtering operation that explicitly regulates the influence of time series and other modalities on the fused embeddings in $E$ : + +$$ +G = \sigma \left(W _ {g} \left[ E _ {\mathrm {t s}}; E _ {c} \right] + b _ {g}\right), \quad E = G \odot E _ {\mathrm {t s}} + (1 - G) \odot E _ {c} +$$ + +where $\sigma (\cdot)$ denotes the sigmoid function, the learnable weight and bias are denoted as $W_{g}\in \mathbb{R}^{2d\times d}$ and $b_{g}\in \mathbb{R}^{d}$ , respectively. + +When a graph modality is available from external contexts, the underlying topological insights can be leveraged for graph-based alignment [11, 67]. Unlike the above methods that rely solely on feature interactions, it explicitly aligns multi-modal representations with relational structures through graph convolution, enabling context-aware feature propagation across modalities. + +Representation alignments can also be achieved by learning objectives [3, 45, 83, 96, 97]. For example, MULAN [97] extracts modality-invariant and modality-specific representations from multi-modal time series. It employs contrastive learning to enhance cross-modal alignment by maximizing the similarity between invariant representations across modalities while minimizing the similarity between invariant and specific representations of the same modality. Moreover, Bamford et al. [3] align cross-modal representations by using the mean of uni-modal cosine similarities as the target similarity and optimizing cross-modal similarity via cross-entropy loss, which effectively connects both modalities in a shared latent space for time series retrieval tasks. In general, this branch of methods is effective as it directly integrates the alignment objective into the optimization process, ensuring that meaningful representations are explicitly learned. + +Lastly, we introduce the intermediate alignment of component outputs within a framework, extending beyond representation alignment within a model. The most recent studies explore the synergy between time series models and LLM agents, leveraging the strong reasoning capabilities of pretrained LLMs to provide + +contextual understanding and calibration in real-world scenarios [31, 42, 50, 68, 73]. We briefly discuss a few representative examples for demonstration. TimeCAP [42] utilizes the embedding space of a trained multi-modal encoder to retrieve in-context examples with the highest cosine similarity. These retrieved examples with ground truth labels are then fed, along with the query text, into an LLM to provide contextual guidance and improve outcome prediction. TimeXL [31] incorporates a multi-modal prototype-based encoder to generate explainable case-based rationales for both time series and texts, integrating three LLM agents, where prediction, reflection, and refinement LLMs collaborate to iteratively enhance prediction accuracy, identify textual inconsistencies or noise, and calibrate textual contexts, yielding more accurate predictions and explanations. NewsForecast [73] also employs reflection in language agents to iteratively select relevant news from a large database, enhancing alignment of textual information for text-based forecasting. Similarly, MATLAB [68] ensures alignment between statistical causal discovery on time series and LLM reasoning on textual context by leveraging iterative self-reflective tool-calling to structure textual context, which is then used to explain and refine causal constraints. + +In a nutshell, alignment aims to calibrate real-world contexts and effectively capture relevant multi-modal elements for a semantically coherent time series modeling. It enhances task performance, robustness and explanation, ensuring that models leverage meaningful contextual information for improved decision-making. + +# 4.3 Transference + +Transference refers to the process of mapping between different modalities. It allows one modality to be inferred, translated, or synthesized from another. This concept plays a crucial role across different stages of multi-modal time series analysis. The input-level transference typically serves for modality augmentation. It helps introduce contextual priors, enrich training samples, and provide alternative representations. This is particularly useful in scenarios of data scarcity and imbalance. In existing literature, a common practice is to use meta information to describe the narrative of real-world contexts (e.g., domain, data statistics and granularity, variable descriptions, other co-variates, etc.) [19, 24, 33, 48, 51, 55, 72, 98] or leverage pretrained LLMs to generate fine-grained textual contexts [42] or graphs [11] for real-world time series, serving as an augmented modality. In addition to texts, time series can also be transformed into high-dimensional images via feature imaging, such as stacking the original data with frequency and periodicity features [98]. Alternatively, time series can be represented in tabular form, transforming time series analysis into a table understanding task [72]. Note that the aforementioned uni-modal methods for transforming time series into other single modalities can also be integrated into multi-modal time series frameworks [15, 27, 46, 62, 99, 100]. The exploitation of input-level transference is two-fold. First, the embedding of generated modality can serve as semantic anchors that guides time series modeling via representation alignment, improving downstream supervised tasks [33, 51, 55, 98]. Second, it provides additional contextual guidance for pretrained LLMs through input fusion and prompting [19, 24, 48, 72]. + +At the intermediate [11, 50, 68, 75, 97] and output [3, 52] levels, transference are more task-oriented. The output-level transference typically refers to the end-to-end generation of new modalities, such as text-based and image-based time series retrieval, where users provide textual descriptions or sketched trends to query relevant time series data [3]. This also includes EEG-to-text conversion, enabling direct transformation from physiological signals to human-readable narratives [52]. + +The output of intermediate transference typically serves as an initial solution to be refined for modality generation tasks [50, 68, 97] or a medium to be inferred for predictive tasks [75], facilitating downstream reasoning and further alignment within the multimodal framework. MATMCD [68] generates an initial causal graph from time series, achieving modality transference in the intermediate level. Subsequently, it incorporates textual modality to refine the causal graph, ensuring improved alignment and interpretability. Moreover, Wang et al. [75] adopt a two-stage mechanism for sentiment classification based on EEG data, where the model first converts EEG signals into reading texts and then employs a pretrained LLM based on texts for classification, achieving impressive zero-shot results. + +# 5 Applications of Multi-modal Time Series Analysis + +In this section, we review the existing applications of multi-modal time series analysis for both standard and spatial time series, covering diverse domains such as healthcare, finance, transportation, environment, retail, and the Internet of Things (IoT). + +# Standard Time Series + +# 5.1 Healthcare + +Recent studies in healthcare highlight the multi-modal analysis of diverse medical data sources, such as EHRs (Electronic Health Records, containing lab values and clinical reports, etc.), audio, EEG (Electroencephalogram), ECG (Electrocardiogram), and other wearable and medical sensor recordings, for better disease diagnosis and patient monitoring. For multi-modal analysis on EHR data, a common modeling strategy involves the interaction between lab values and clinical reports, including the concatenation [16] and attention mechanisms [60, 85] on modality embeddings. Moreover, existing methods explore different modeling techniques to better exploit the clinical notes, via domain-specific text encoders (e.g., ClinicalBERT [28, 85] and BioBERT [43, 60]) and different processing strategies. For example, text embeddings can be modeled separately based on patient groups [86] or through a decaying mechanism based on time intervals [36] before interacting with time series embeddings, which leads to improved mortality prediction. + +In addition to EHRs, multi-modal modeling methods have been tailored for audio [95], ECG [45], and EEG [75]. Zhang et al. [95] focus on a respiratory health classification task by integrating both audio and textual descriptions. Li et al. [45] propose a multi-modal contrastive learning framework, constructing positive and negative samples by pairing patients' report texts with corresponding ECG signals for self-supervised pretraining. The classification task is then performed by computing the cosine similarity between different text representations and the target ECG representation. Wang + +et al. [75] propose a two-stage method for zero-shot EEG-based sentiment classification. First, a pretrained BART model is used for EEG-to-text decoding, followed by a trained text sentiment classifier that converts the generated text into sentiment categories. + +Similarly, Liu et al. [56] fuses physiological and behavioral time-series sensor data with real-world contextual information to effectively harness LLMs for wellness assessment. By fine-tuning the models with few-shot question-answer pairs that include contextual details, they improve performance on various healthcare tasks—such as cardiac signal analysis, physical activity recognition, and calorie-burn estimation, which outperform both supervised feedforward neural networks and zero-shot LLM baselines. + +# 5.2 Finance + +Recently, multi-modal time series analysis has received increasing attention in financial applications. Yu et al. [89] and Xie et al. [81] focus on stock prediction by integrating stock price movements, company profiles, and news directly into structured LLM prompts, enabling models to perform reasoning over multiple modalities. Yu et al. [89] applies GPT-4 and Open LLaMA to forecast NASDAQ-100 stock returns through instruction-based prompting and fine-tuning, demonstrating that structured LLM-driven inference can outperform traditional econometric models. Meanwhile, Xie et al. [81] conducts a zero-shot analysis of ChatGPT's capabilities for multimodal stock movement prediction, incorporating CoT prompting to assess the impact of social media sentiment on stock trends. Chen et al.[11] and Sawhneyet al.[67] also incorporate graph structures for stock movement prediction. For instance, Chen et al.[11] uses ChatGPT to infer dynamic stock relationship graphs from news, which reflects market conditions and enhances prediction accuracy. + +Beyond the predictive tasks, Bamford et al. [3] proposes a multimodal retrieval framework, where the model aligns both modalities in a shared latent space through contrastive learning. This framework allows users to search for financial time series through textual descriptions or sketched trends, offering greater flexibility. It also significantly improves retrieval speed and accuracy compared to traditional SQL-based search methods. + +# 5.3 Others + +Multi-modal time series analysis also exists in other domains, such as retail, IoT, computer vision and audio. In the retail sector, Ekambaram et al. [18] utilizes product images and textual descriptions, including attributes like color, pattern, and sleeve style, while incorporating temporal and exogenous features for new product sales forecasting. More recently, Skenderi et al. [70] integrates additional modality data, including product images and text descriptions, along with Google Trends data for sales forecasting. In IoT applications, MIA [82] enhances power transformer fault diagnosis by integrating multi-modal data, including dissolved gas analysis (DGA) and infrared images, to improve accuracy and efficiency. MULAN [97] converts log sequences into time-series data using a log-tailored language model and employs contrastive learning to leverage multi-modal data, facilitating root-cause discovery for system failures. In computer vision, LA-GCN [83] utilizes textual embeddings of joint names and action labels to generate faithful structural priors, enhancing skeleton-based action modeling and + +improving recognition tasks. In speech applications, AV-HuBERT [69] employs a self-supervised representation learning framework to leverage correlated audio and visual information [90], while SpeechGPT [69, 90] integrates audio and text to enhance generation performance. + +# Spatial Time Series + +# 5.4 Transportation and Mobility + +Several recent studies on traffic prediction highlight the importance of multi-modal contexts to enhance forecasting accuracy. Guo et al. [24] transforms California traffic data into structured LLM prompts. The method uses LLaMA models and instruction fine-tuning to improve spatial-temporal learning. Meanwhile, Li et al. [48] employs a spatial-temporal dependency encoder to align numerical New York City traffic data with LLMs, incorporating weather, geographic context, and historical flow patterns to refine predictions. Similarly, Feng et al. [19] proposes CityGPT, enhancing LLMs' spatial cognition for urban reasoning, mobility prediction, and navigation by integrating urban mobility data, road networks, and human behavior through instruction tuning. These studies demonstrate that LLM-based multi-modal fusion not only enhances traffic forecasting but also improves model interpretability and adaptability across diverse urban scenarios. + +# 5.5 Environment + +Integrating multi-modal information benefits environmental studies, particularly by addressing the prevalent challenge of missing values. VIMTS [96] utilizes a structured variational approximation technique to impute missing high-dimensional modalities (stream image data) by transforming them into low-dimensional features derived from simpler, related modalities (meteorological time series records), ensuring cross-modal correlations and interpretability of the imputation process. Additionally, LITE [44] addresses the imputation of missing features through a sparse Mixture of Experts framework. It integrates and encodes various environmental variables through a unified encoder. Directed by domain-specific instructions, a language model is utilized to merge these multi-modal representations, thereby improving the accuracy of environmental spatial-temporal predictions. + +# 6 Future Research Directions + +In this section, we outline several underexplored research directions that open up opportunities for future advancements. + +Reasoning with Multi-modal Time Series. Enhancing reasoning with multi-modal time series is pivotal for the development of intelligent systems. Future research should focus on creating a unified framework that can seamlessly integrate temporal reasoning with contextual understanding, enabling models to handle multiple time series tasks with interpretability. One potential path is to incorporate external knowledge bases and real-world context, such as developing a retrieval-augmented generation (RAG) [59] system, to enhance the reasoning process and allow models to make informed inferences beyond the immediate data. It is also promising to synergize time series model and language agents to provide more faithful and reliable reasoning on real-world contexts [31, 68]. + +The recent development of LLM reasoning models, such as chain of thoughts [76] and tree of thoughts [87], also offers potential solutions to improve reasoning quality. + +Decision Making. Multi-modal time series analysis presents a promising future direction to enhance decision-making processes, which is crucial in high-stakes applications. By leveraging predictive signals and explanations from multi-modal contexts, future research can develop more adaptive, interpretable, and reliable decision-support systems, to facilitate the downstream optimization tasks such as resource allocation and risk management. + +Domain Generalization. One key challenge in multi-modal time series analysis is domain generalization, which enables a model trained on one or more source domains to effectively generalize to unseen target domains, ensuring robustness against distribution shifts. In multi-modal time series, distribution shifts can be multifaceted, stemming not only from time series, but also from other modalities. Therefore, it is crucial to develop specialized domain generalization methods for effective multi-modal time series analysis, including strategies to identify and preserve domain-invariant components across modalities while capturing modality-specific variations for rapid adaptation. Additionally, disentangling the effects of each modality is essential to better understand their individual contributions and mitigate cross-modal interference. + +Robustness to Missing and Noisy Modalities. Multi-modal time series analysis often frequently encounters messy real-world contexts with incomplete or noisy data. Existing methods employ an iterative context-refinement algorithm [31] that filters out less relevant information, thereby enhancing the predictive insights derived from multi-modal time series. Nonetheless, effectively dealing with missing and noisy modalities still demands further exploration. In particular, developing strategies for modality-specific imputation, noise reduction, and relevance quantification will be crucial to improving the real-world applicability of existing multi-modal time series methods. + +Ethical Considerations and Bias Mitigation. In light of potential biases in multi-modal time series datasets, future research should integrate fairness-aware techniques, such as fairness constraints, counterfactual analysis, and adversarial debiasing. These methods should be combined with robust bias assessment frameworks to systematically detect and mitigate inequities, ensuring outcomes that are both equitable and socially responsible. + +# 7 Conclusion + +In this survey, we provide a comprehensive overview of existing multi-modal time series methods. We first discuss the multi-modal time series used in existing methods. Then, we propose a taxonomy based on cross-modal interactions between time series and other modalities. The existing methods are categorized and summarized accordingly. We also discuss the real-world applications and highlight future research directions in this promising area. + +# Acknowledgments + +This research was supported in part by the National Science Foundation (NSF) CAREER IIS-2338878, as well as by generous research gifts from NEC Labs America Inc. and Morgan Stanley. + +# References + +[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. 2018. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv:1809.00496 [cs.CV] https://arxiv.org/abs/1809.00496 +[2] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence 41, 2 (2018), 423-443. +[3] Tom Bamford, Andrea Coletta, Elizabeth Fons, Sriram Gopalakrishnan, Svitlana Vyetrenko, Tucker Balch, and Manuela Veloso. 2023. Multi-Modal Financial Time-Series Retrieval Through Latent Space Projections. In Proceedings of the Fourth ACM International Conference on AI in Finance (Brooklyn, NY, USA) (ICAIF '23). Association for Computing Machinery, New York, NY, USA, 498-506. doi:10.1145/3604237.3626901 +[4] Dhananjay Bhattacharya, Nayan K Sharma, Debottam Dutta, Srikanth R Chetupalli, Prashant Mote, Sriram Ganapathy, Jyothi Bhat, Shreyas Ramoji, Pravin Ghosh, Aswin Subramanian, et al. 2023. Coswara: a respiratory sounds and symptoms dataset for remote screening of SARS-CoV-2 infection. Scientific Data 10, 1 (2023), 397. +[5] Nimeesha Chan, Felix Parker, William Bennett, Tianyi Wu, Mung Yao Jia, James Fackler, and Kimia Ghobadi. 2024. Medtsllm: Leveraging llms for multimodal medical time series analysis. arXiv preprint arXiv:2408.07773 (2024). +[6] Sameep Chattopadhyay, Pulkit Paliwal, Sai Shankar Narasimhan, Shubhankar Agarwal, and Sandeep P. Chinchali. 2025. Context Matters: Leveraging Contextual Features for Time Series Forecasting. arXiv:2410.12672 [cs.LG] https://arxiv.org/abs/2410.12672 +[7] Shengyu Chen, Yiqun Xie, Xiang Li, Xu Liang, and Xiaowei Jia. 2023. Physics-guided meta-learning method in baseflow prediction over large regions. In Proceedings of the 2023 SIAM International Conference on Data Mining (SDM). SIAM, 217-225. +[8] Shengyu Chen, Jacob A Zwart, and Xiaowei Jia. 2022. Physics-guided graph meta learning for predicting water temperature and streamflow in stream networks. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 2752-2761. +[9] Si-An Chen, Chun-Liang Li, Sercan O Arik, Nathanael Christian Yoder, and Tomas Pfister. 2023. TSMixer: An All-MLP Architecture for Time Series Forecasting. Transactions on Machine Learning Research (2023). +[10] Wei Chen, Xixuan Hao, Yuankai Wu, and Yuxuan Liang. 2024. Terra: A Multimodal Spatio-Temporal Dataset Spanning the Earth. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 66329-66356. https://proceedings.neurips.cc/paper_files/paper/2024/file/7a6a7fbd1ee0c9684b3f919f79d129ef-Paper-Datasets_and_Benchmarks_Track.pdf +[11] Zihan Chen, Lei Nico Zheng, Cheng Lu, Jialu Yuan, and Di Zhu. 2023. ChatGPT Informed Graph Neural Network for Stock Movement Prediction. Available at SSRN 4464002 (2023). +[12] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, Yuong Luo, and Enhong Chen. 2025. InstrucTime: Advancing Time Series Classification with Multimodal Language Modeling. In Proceedings of the Eighteenth ACM International Conference on Web Search and Data Mining (Hannover, Germany) (WSDM '25). Association for Computing Machinery, New York, NY, USA, 792-800. doi:10.1145/3701551.3703499 +[13] Joon Son Chung, Arsha Nagrani, and Andrew Zisserman. 2018. VoxCeleb2: Deep Speaker Recognition. In Interspeech 2018 (Interspeech Proceedings). ISCA. doi:10.21437/Interspeech.2018-1929 +[14] Helena Cousijn, Patricia Feeney, Daan Lowenberg, Elisa Presani, and Natasha Simons. 2019. A data citation roadmap for scholarly data repositories. *Scientific Data* 6, 1 (2019), 28. +[15] Mayank Daswani, Mathias MJ Bellaiche, Marc Wilson, Desislav Ivanov, Mikhail Papkov, Eva Schnider, Jing Tang, Kay Lamerigts, Gabriela Botea, Michael A Sanchez, et al. 2024. Plots Unlock Time-Series Understanding in Multimodal Models. arXiv preprint arXiv:2410.02637 (2024). +[16] Iman Deznabi, Mohit Iyyer, and Madalina Fiterau. 2021. Predicting in-hospital mortality by combining clinical notes with time-series data. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, Online, 4026-4031. doi:10.18653/v1/2021-findings-acl.352 +[17] Zihan Dong, Xinyu Fan, and Zhiyuan Peng. 2024. FNSPID: A Comprehensive Financial News Dataset in Time Series. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (Barcelona, Spain) (KDD '24). Association for Computing Machinery, New York, NY, USA, 4918-4927. doi:10.1145/3637528.3671629 +[18] Vijay Ekambaram, Kushagra Manglik, Sumanta Mukherjee, Surya Shravan Kumar Sajja, Satyam Dwivedi, and Vikas Raykar. 2020. Attention based multimodal new product sales time-series forecasting. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining. 3110-3118. + +[19] Jie Feng, Yuwei Du, Tianhui Liu, Siqi Guo, Yuming Lin, and Yong Li. 2024. Citygpt: Empowering urban spatial cognition of large language models. arXiv preprint arXiv:2406.13948 (2024). +[20] Stefan Feuerriegel, Dennis Frauen, Valentyn Melnychuk, Jonas Schweisthal, Konstantin Hess, Alicia Curth, Stefan Bauer, Niki Kilbertus, Isaac S Kohane, and Mihaela van der Schaar. 2024. Causal machine learning for predicting treatment outcomes. Nature Medicine 30, 4 (2024), 958-968. +[21] Mohammad Fraiwan, Luay Fraiwan, Basheer Khassawneh, and Ali Ibnian. 2021. A dataset of lung sounds recorded from the chest wall using an electronic stethoscope. Data in Brief 35, 106913. doi:10.1016/j.dib.2021.106913 +[22] Alessandro T. Gifford, Kshitij Dwivedi, Gemma Roig, and Radoslaw M. Cichy. 2022. A large and rich EEG dataset for modeling human visual object recognition. NeuroImage 264 (2022), 119754. doi:10.1016/j.neuroimage.2022.119754 +[23] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In Proceedings of the AAAI conference on artificial intelligence, Vol. 33, 922-929. +[24] Xusen Guo, Qiming Zhang, Junyue Jiang, Mingxing Peng, Meixin Zhu, and Hao Frank Yang. 2024. Towards explainable traffic flow prediction with large language models. Communications in Transportation Research 4 (2024), 100150. +[25] Paul Hager, Martin J Menten, and Daniel Rueckert. 2023. Best of both worlds: Multimodal contrastive learning with tabular and imaging data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 23924-23935. +[26] Nora Hollenstein, Marius Troendle, Ce Zhang, and Nicolas Langer. 2020. ZuCo 2.0: A Dataset of Physiological Recordings During Natural Reading and Annotation. In Proceedings of the Twelfth Language Resources and Evaluation Conference, Nicoletta Calzolari, Frédéric Béchet, Philippe Blache, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, and Stelios Piperidis (Eds.). European Language Resources Association, Marseille, France, 138-146. https://aclanthology.org/2020.lrec-1.18/ +[27] Shi Bin Hoo, Samuel Müller, David Salinas, and Frank Hutter. 2025. The tabular foundation model TabPFN outperforms specialized time series forecasting models based on simple features. arXiv preprint arXiv:2501.02945 (2025). +[28] Kexin Huang, Jaan Altsoaar, and Rajesh Ranganath. 2020. Clinical-BERT: Modeling Clinical Notes and Predicting Hospital Readmission. arXiv:1904.05342 [cs.CL] https://arxiv.org/abs/1904.05342 +[29] Jiahao Ji, Jingyuan Wang, Chao Huang, Junjie Wu, Boren Xu, Zhenhe Wu, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal self-supervised learning for traffic flow prediction. In Proceedings of the AAAI conference on artificial intelligence, Vol. 37. 4356-4364. +[30] Furong Jia, Kevin Wang, Yixiang Zheng, Defu Cao, and Yan Liu. 2024. GPT4MTS: Prompt-based Large Language Model for Multimodal Time-series Forecasting. Proceedings of the AAI Conference on Artificial Intelligence 38, 21 (Mar. 2024), 23343-23351. doi:10.1609/aaaai.v38i21.30383 +[31] Yushan Jiang, Wenchao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2025. Explainable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv:2503.01013 [cs.LG] https://arxiv.org/abs/2503.01013 +[32] Bo Jin, Haoyu Yang, Leilei Sun, Chuanren Liu, Yue Qu, and Jianing Tong. 2018. A treatment engine by predicting next-period prescriptions. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 1608-1616. +[33] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y. Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, and Qingsong Wen. 2024. Time-LLM: Time Series Forecasting by Reprogramming Large Language Models. In The Twelfth International Conference on Learning Representations. https://openreview.net/forum?id=Unb5CVPtae +[34] Alistair Johnson, Lucas Bulgarelli, Tom Pollard, Steven Horng, Leo Anthony Celi, and Roger Mark. 2021. MIMIC-IV (version 1.0). doi:10.13026/s6n6-xd98 +[35] Alistair E. W. Johnson, Tom J. Pollard, Lu Shen, Li-wei H. Lehman, Mengling Feng, Mohammad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G. Mark. 2016. MIMIC-III, a freely accessible critical care database. Scientific Data 3 (2016), 160035. +[36] Swaraj Khadanga, Karan Aggarwal, Shafiq Joty, and Jaideep Srivastava. 2019. Using Clinical Notes with Time Series Data for ICU Management. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IFCNLP). Association for Computational Linguistics, Hong Kong, China, 6432-6437. doi:10.18653/v1/D19-1678 +[37] Kai Kim, Howard Tsai, Rajat Sen, Abhimanyu Das, Zihao Zhou, Abhishek Tanpure, Mathew Luo, and Rose Yu. 2024. Multi-Modal Forecaster: Jointly Predicting Time Series and Textual Data. arXiv:2411.06735 [cs.AI] https://arxiv.org/abs/2411.06735 +[38] Yaxuan Kong, Yiyuan Yang, Yoontae Hwang, Wenjie Du, Stefan Zohren, Zhangyang Wang, Ming Jin, and Qingsong Wen. 2025. Time-MQA: Time Series Multi-Task Question Answering with Context Enhancement. + +arXiv:2503.01875 [cs.CL] https://arxiv.org/abs/2503.01875 +[39] Yaxuan Kong, Yiyuan Yang, Shiyu Wang, Chenghao Liu, Yuxuan Liang, Ming Jin, Stefan Zohren, Dan Pei, Yan Liu, and Qingsong Wen. 2025. Position: Empowering Time Series Reasoning with Multimodal LLMs. arXiv preprint arXiv:2502.01477 (2025). +[40] Dongjie Wang Chengyuan Deng Reon Matsuoka Lecheng Zheng, Zhengzhang Chen and Haifeng Chen. 2024. LEMMA-RCA: A Large Multi-modal Multi-domain Dataset for Root Cause Analysis. arXiv:2406.05375 [cs.AI] +[41] Geon Lee, Wenchao Yu, Wei Cheng, and Haifeng Chen. 2024. MoAT: Multi-Modal Augmented Time Series Forecasting. https://openreview.net/forum?id=uRXxnoqDHH +[42] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. In AAAI. +[43] Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan So, and Jaewoo Kang. 2019. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics (Oxford, England) 36 (09 2019). doi:10.1093/bioinformatics/btz682 +[44] Haoran Li, Junqi Liu, Zexian Wang, Shiuyuan Luo, Xiaowei Jia, and Huaxiu Yao. 2024. LITE: Modeling Environmental Ecosystems with Multimodal Large Language Models. arXiv preprint arXiv:2404.01165 (2024). +[45] Jun Li, Che Liu, Sibo Cheng, Rossella Arcucci, and Shenda Hong. 2023. Frozen Language Model Helps ECG Zero-Shot Learning. In Medical Imaging with Deep Learning. +[46] Zekun Li, Shiyang Li, and Xifeng Yan. 2023. Time series as images: Vision transformer for irregularly sampled time series. Advances in Neural Information Processing Systems 36 (2023), 49187-49204. +[47] Zihao Li, Xiao Lin, Zhining Liu, Jiaru Zou, Ziwei Wu, Lecheng Zheng, Dongqi Fu, Yada Zhu, Hendrik Hamann, Hanghang Tong, and Jingrui He. 2025. Language in the Flow of Time: Time-Series-Paired Texts Weaved into a Unified Temporal Narrative. arXiv:2502.08942 [cs.LG] https://arxiv.org/abs/2502.08942 +[48] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. UrbanGPT: Spatio-Temporal Large Language Models. ArXiv abs/2403.00813 (2024). https://api(semanticscholar.org/CorpusID:268230972 +[49] Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency. 2024. Foundations & trends in multimodal machine learning: Principles, challenges, and open questions. Comput. Surveys 56, 10 (2024), 1-42. +[50] Minhua Lin, Zhengzhang Chen, Yanchi Liu, Xujiang Zhao, Zongyu Wu, Junxiang Wang, Xiang Zhang, Suhang Wang, and Haifeng Chen. 2024. Decoding Time Series with LLMs: A Multi-Agent Framework for Cross-Domain Annotation. arXiv:2410.17462 [cs.AI] https://arxiv.org/abs/2410.17462 +[51] Chenxi Liu, Qianxiong Xu, Hao Miao, Sun Yang, Lingzheng Zhang, Cheng Long, Ziyue Li, and Rui Zhao. 2025. TimeCMA: Towards LLM-Empowered Multivariate Time Series Forecasting via Cross-Modality Alignment. In AAAI. +[52] Hanwen Liu, Daniel Hajialigol, Benny Antony, Aiguo Han, and Xuan Wang. 2024. EEG2Text: Open Vocabulary EEG-to-Text Translation with Multi-View Transformer. In 2024 IEEE International Conference on Big Data (BigData). IEEE Computer Society, Los Alamitos, CA, USA, 1824-1833. doi:10.1109/ BigData62323.2024.10825980 +[53] Haoxin Liu, Shangqing Xu, Zhiyuan Zhao, Lingkai Kong, Harshavardhan Kamarthi, Aditya B. Sasanur, Megha Sharma, Jiaming Cui, Qingsong Wen, Chao Zhang, and B. Aditya Prakash. 2024. Time-MMD: Multi-Domain Multimodal Dataset for Time Series Analysis. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. https://openreview.net/forum?id=fuD0h4R1IL +[54] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024). +[55] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. UniTime: A Language-Empowered Unified Model for Cross-Domain Time Series Forecasting. In Proceedings of the ACM Web Conference 2024 (Singapore, Singapore) (WWW'24). Association for Computing Machinery, New York, NY, USA, 4095-4106. doi:10.1145/3589334.3645434 +[56] Xin Liu, Daniel McDuff, Geza Kovacs, Isaac Galatzer-Levy, Jacob Sunshine, Jiening Zhan, Ming-Zher Poh, Shun Liao, Paolo Di Achille, and Shwetak Patel. 2023. Large Language Models are Few-Shot Health Learners. arXiv preprint arXiv:2305.15525 (2023). https://arxiv.org/abs/2305.15525 +[57] Jingchao Ni, Ziming Zhao, ChengAo Shen, Hanghang Tong, Dongjin Song, Wei Cheng, Dongsheng Luo, and Haifeng Chen. 2025. Harnessing Vision Models for Time Series Analysis: A Survey. arXiv preprint arXiv:2502.08869 (2025). +[58] Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. 2023. A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. In The Eleventh International Conference on Learning Representations. +[59] Kanghui Ning, Zijie Pan, Yu Liu, Yushan Jiang, James Y. Zhang, Kashif Rasul, Anderson Schneider, Lintao Ma, Yuriy Nevmvaka, and Dongjin Song. 2025. TS-RAG: Retrieval-Augmented Generation based Time Series Foundation Models are Stronger Zero-Shot Forecaster. arXiv:2503.07649 [cs.LG] https://arxiv.org/ + +abs/2503.07649 +[60] K. Niu, K. Zhang, X. Peng, Y. Pan, and N. Xiao. 2023. Deep Multi-Modal Intermediate Fusion of Clinical Record and Time Series Data in Mortality Prediction. Frontiers in Molecular Biosciences 10 (2023), 1136071. doi:10.3389/fmolb.2023.1136071 +[61] Zijie Pan, Yushan Jiang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. $S^2$ IP-LLM: Semantic Space Informed Prompt Learning with LLM for Time Series Forecasting. In Forty-first International Conference on Machine Learning. +[62] Vinay Prithyani, Mohsin Mohammed, Richa Gadgil, Ricardo Buitrago, Vinija Jain, and Aman Chadha. 2024. On the Feasibility of Vision-Language Models for Time-Series Classification. arXiv preprint arXiv:2412.17304 (2024). +[63] Yao Qin, Dongjin Song, Haifeng Cheng, Wei Cheng, Guofei Jiang, and Garrison W Cottrell. 2017. A dual-stage attention-based recurrent neural network for time series prediction. In Proceedings of the 26th International Joint Conference on Artificial Intelligence, 2627-2633. +[64] Hadi Rezaei, Hamidreza Faaljou, and Gholamreza Mansourfar. 2021. Stock price prediction using deep learning and frequency decomposition. Expert Systems with Applications 169 (2021), 114332. +[65] Bruno M Rocha, Dimitris Filos, Luis Mendes, Gorkem Serbes, Sezer Ulukaya, Yasemin P Kahya, Niksa Jakovljevic, Tatjana L Turukalo, Ioannis M Vogiatzis, Eleni Perantoni, Evangelos Kaimakamis, Pantelis Natsivas, Ana Oliveira, Cristina Jacome, Alda Marques, Nicos Maglaveras, Rui Pedro Paiva, Ioanna Chouvarda, and Paulo de Carvalho. 2019. An open access database for the evaluation of respiratory sound classification algorithms. Physiological Measurement 40, 3 (2019), 035001. +[66] Ludan Ruan, Yiyang Ma, Huan Yang, Huiguo He, Bei Liu, Jianlong Fu, Nicholas Jing Yuan, Qin Jin, and Baining Guo. 2023. Mm-diffusion: Learning multi-modal diffusion models for joint audio and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10219-10228. +[67] Ramit Sawhney, Shivam Agarwal, Arnav Wadhwa, and Rajiv Ratn Shah. 2020. Deep Attentive Learning for Stock Movement Prediction From Social Media Text and Company Correlations. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, Online, 8415-8426. doi:10.18653/v1/2020.emnlp-main.676 +[68] ChengAo Shen, Zhengzhang Chen, Dongsheng Luo, Dongkuan Xu, Haifeng Chen, and Jingchao Ni. 2024. Exploring Multi-Modal Integration with Tool-Augmented LLM Agents for Precise Causal Discovery. arXiv:2412.13667 [cs.LG] https://arxiv.org/abs/2412.13667 +[69] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. 2022. Learning Audio-Visual Speech Representation by Masked Multimodal Cluster Prediction. In International Conference on Learning Representations. https://openreview.net/forum?id=Z1Qlm11uOM +[70] Geri Skenderi, Christian Joppi, Matteo Denitto, and Marco Cristani. 2024. Well googled is half done: Multimodal forecasting of new fashion product sales with image-based google trends. Journal of Forecasting 43, 6 (2024), 1982-1997. +[71] Patrick Wagner, Nils Strothhoff, Ralf-Dieter Bousseljot, Dieter Kreiseler, Fatima I Lunze, Wojciech Samek, and Tobias Schaeffer. 2020. PTB-XL, a large publicly available electrocardiography dataset. Scientific Data 7, 1 (2020), 154. +[72] Jiahao Wang, Mingyue Cheng, Qingyang Mao, Yitong Zhou, Feiyang Xu, and Xin Li. 2025. TableTime: Reformulating Time Series Classification as Training-Free Table Understanding with Large Language Models. arXiv:2411.15737 [cs.AI] https://arxiv.org/abs/2411.15737 +[73] Xinlei Wang, Maike Feng, Jing Qiu, JINJIN GU, and Junhua Zhao. 2024. From News to Forecast: Integrating Event Analysis in LLM-Based Time Series Forecasting with Reflection. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 58118-58153. https://proceedings.neurips.cc/paper_files/paper/2024/file/6aef8bffb372096ee73d98da30119f89-Paper-Conference.pdf +[74] Xiaochen Wang, Junyu Luo, Jiaqi Wang, Ziyi Yin, Suhan Cui, Yuan Zhong, Yaqing Wang, and Fenglong Ma. 2023. Hierarchical Pretraining on Multimodal Electronic Health Records. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, Singapore, 2839-2852. doi:10.18653/v1/2023.emnlp-main.171 +[75] Zhenhailong Wang and Heng Ji. 2021. Open Vocabulary Electroencephalography-To-Text Decoding and Zero-shot Sentiment Classification. In AAAI Conference on Artificial Intelligence. https://apisemantic scholar.org/CorpusID:244909027 +[76] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models (NIPS '22). Curran Associates Inc., Red Hook, NY, USA, Article 1800, 14 pages. + +[77] Andrew Robert Williams, Arjun Ashok, Étienne Marcotte, Valentina Zantedeschi, Jithendarraa Subramanian, Roland Riachi, James Requeima, Alexandre Lacoste, Irina Rish, Nicolas Chapados, et al. 2024. Context is key: A benchmark for forecasting with essential textual information. arXiv preprint arXiv:2410.18959 (2024). +[78] Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. 2023. TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis. In The Eleventh International Conference on Learning Representations. +[79] Huizhe Wu, Wei Zhang, Weiwei Shen, and Jun Wang. 2018. Hybrid Deep Sequential Modeling for Social Text-Driven Stock Prediction. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (Torino, Italy) (CIKM '18). Association for Computing Machinery, New York, NY, USA, 1627–1630. doi:10.1145/3269206.3269290 +[80] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, 753-763. +[81] Qianqian Xie, Weiguang Han, Yanzhao Lai, Min Peng, and Jimin Huang. 2023. The wall street neophyte: A zero-shot analysis of chatgpt over multimodal stock movement prediction challenges. arXiv preprint arXiv:2304.05351 (2023). +[82] Zhikai Xing and Yigang He. 2023. Multi-modal information analysis for fault diagnosis with time-series data from power transformer. International Journal of Electrical Power & Energy Systems 144 (2023), 108567. doi:10.1016/j.ijepes.2022.108567 +[83] Haojun Xu, Yan Gao, Zheng Hui, Jie Li, and Xinbo Gao. 2023. Language Knowledge-Assisted Representation Learning for Skeleton-Based Action Recognition. arXiv:2305.12398 [cs.CV] https://arxiv.org/abs/2305.12398 +[84] Yumo Xu and Shay B. Cohen. 2018. Stock Movement Prediction from Tweets and Historical Prices. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Iryna Gurevych and Yusuke Miyao (Eds.). Association for Computational Linguistics, Melbourne, Australia, 1970-1979. doi:10.18653/v1/P18-1183 +[85] Bo Yang and Lijun Wu. 2021. How to Leverage the Multimodal EHR Data for Better Medical Prediction?. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics, 4029-4038. doi:10.18653/v1/2021.emnlp-main.329 +[86] Haiyang Yang, Li Kuang, and FengQiang Xia. 2021. Multimodal Temporal-Clinical Note Network for Mortality Prediction. Journal of Biomedical Semantics 12, 1 (2021), 1-14. doi:10.1186/s13326-021-00235-3 +[87] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. 2023. Tree of Thoughts: Deliberate Problem Solving with Large Language Models. In Thirty-seventh Conference on Neural Information Processing Systems. https://openreview.net/forum?id=5Xc1ecxO1h +[88] Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. 2024. Frequency-domain MLPs are more effective learners in time series forecasting. Advances in Neural Information Processing Systems 36 (2024). +[89] Xinli Yu, Zheng Chen, and Yanbin Lu. 2023. Harnessing LLMs for Temporal Data - A Study on Explainable Financial Time Series Forecasting. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, Mingxuan Wang and Imed Zitouni (Eds.). Association for Computational Linguistics, Singapore, 739-753. doi:10.18653/v1/2023.emnlp-industry.69 +[90] Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. 2023. SpeechGPT: Empowering Large Language Models with Intrinsic Cross-Modal Conversational Abilities. arXiv:2305.11000 [cs.CL] https://arxiv.org/abs/2305.11000 +[91] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024). +[92] Liheng Zhang, Charu Aggarwal, and Guo-Jun Qi. 2017. Stock price prediction via discovering multi-frequency trading patterns. In Proceedings of the 23rd ACM SIGKDD international conference on knowledge discovery and data mining. 2141-2149. +[93] Xiyue Zhang, Chao Huang, Yong Xu, Lianghao Xia, Peng Dai, Liefeng Bo, Junbo Zhang, and Yu Zheng. 2021. Traffic flow forecasting with spatial-temporal graph diffusion network. In Proceedings of the AAAI conference on artificial intelligence, Vol. 35. 15008-15015. +[94] Xiang Zhang, Lina Yao, Manqing Dong, Zhe Liu, Yu Zhang, and Yong Li. 2020. Adversarial representation learning for robust patient-independent epileptic seizure detection. IEEE journal of biomedical and health informatics 24, 10 (2020), 2852-2859. +[95] Yuwei Zhang, Tong Xia, Aaqib Saeed, and Cecilia Mascolo. 2024. RespLLM: Unifying Audio and Text with Multimodal LLMs for Generalized Respiratory Health Prediction. arXiv:2410.05361 [cs.LG] https://arxiv.org/abs/2410.05361 +[96] Xiaohu Zhao, Kebin Jia, Benjamin Letcher, Jennifer Fair, Yiqun Xie, and Xiaowei Jia. 2022. VIMTS: Variational-based Imputation for Multi-modal Time Series. In 2022 IEEE International Conference on Big Data (Big Data). IEEE, 349-358. + +[97] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. MU-LAN: multi-modal causal structure learning and root cause analysis for microservice systems. In Proceedings of the ACM Web Conference 2024. 4107-4116. +[98] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv:2502.04395 [cs.CV] https://arxiv.org/abs/2502.04395 +[99] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024). +[100] Jiaxin Zhuang, Leon Yan, Zhenwei Zhang, Ruiqi Wang, Jiawei Zhang, and Yuantao Gu. 2024. See it, Think it, Sorted: Large Multimodal Models are Few-shot Time Series Anomaly Analyzers. arXiv preprint arXiv:2411.02465 (2024). \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13709/images/363c4647bba76cf7a9718c32981fbfa35c8c6001ff5bccdcad97a7bdb5afc4a9.jpg b/data/2025/2503_13xxx/2503.13709/images/363c4647bba76cf7a9718c32981fbfa35c8c6001ff5bccdcad97a7bdb5afc4a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17f1c04d29f791e299781fcf277c498d2c422f69 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/images/363c4647bba76cf7a9718c32981fbfa35c8c6001ff5bccdcad97a7bdb5afc4a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:459ce356d398dbe9949bfc59f6a1289f5af4e13d00e54d7c1a600a8ce0861b3a +size 5787 diff --git a/data/2025/2503_13xxx/2503.13709/images/3c674ad0e2e84929c9f56d3334e69ed89be4011b598a814f2e99ea5de4015cb1.jpg b/data/2025/2503_13xxx/2503.13709/images/3c674ad0e2e84929c9f56d3334e69ed89be4011b598a814f2e99ea5de4015cb1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac0635fbd2ec7c88e82b17a42a8ca2a183207aa7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/images/3c674ad0e2e84929c9f56d3334e69ed89be4011b598a814f2e99ea5de4015cb1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43c4a68148132c111361c17159793de28a2889b395d3f3b0683b42054f9e0796 +size 458164 diff --git a/data/2025/2503_13xxx/2503.13709/images/3d0c0748a3d73486d41680250f0cee30ae17188b7f3582d6184ca022935cf2a5.jpg b/data/2025/2503_13xxx/2503.13709/images/3d0c0748a3d73486d41680250f0cee30ae17188b7f3582d6184ca022935cf2a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a87451da8644a8ab1307f4fa65f5db2ae3cc7f91 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/images/3d0c0748a3d73486d41680250f0cee30ae17188b7f3582d6184ca022935cf2a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33e2a2b1bbad9886bdee2bbdf9f2ba71213e1ac0170f77e41bfc929fdddfbccc +size 6546 diff --git a/data/2025/2503_13xxx/2503.13709/images/bf8f5117d91e3da11dba5893a8b9ac5948fa50f19246cf4dd41c242bc923a774.jpg b/data/2025/2503_13xxx/2503.13709/images/bf8f5117d91e3da11dba5893a8b9ac5948fa50f19246cf4dd41c242bc923a774.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c494691778eb4e14fc6b3196d6d230ea0a0b986 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/images/bf8f5117d91e3da11dba5893a8b9ac5948fa50f19246cf4dd41c242bc923a774.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6f62ee0310d6250661af0a2f24506a652be98ec82cd545a162b0bc1661a0f38 +size 76769 diff --git a/data/2025/2503_13xxx/2503.13709/images/d787f6fd60aa56bc27145664a56d4930b471531c8fa3b48352b56a19ae232274.jpg b/data/2025/2503_13xxx/2503.13709/images/d787f6fd60aa56bc27145664a56d4930b471531c8fa3b48352b56a19ae232274.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88f3a3562141a2f91f6c048621cf7f8289171103 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/images/d787f6fd60aa56bc27145664a56d4930b471531c8fa3b48352b56a19ae232274.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b25848506d56a7b04604a76d81d3565d0b2ede9a87017cbf3d1ff2d3115893a +size 5376 diff --git a/data/2025/2503_13xxx/2503.13709/images/dfc75126255eb54797d7990d9a20668b0a1b9ee7743a4acf0e3af423c98e1049.jpg b/data/2025/2503_13xxx/2503.13709/images/dfc75126255eb54797d7990d9a20668b0a1b9ee7743a4acf0e3af423c98e1049.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7898e5f0546d9e4c216fbd2d9d8c515273a565f8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/images/dfc75126255eb54797d7990d9a20668b0a1b9ee7743a4acf0e3af423c98e1049.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f827855343e7706a539a140175749ad2087a1f82908c31cff84a74fac50b610 +size 65565 diff --git a/data/2025/2503_13xxx/2503.13709/layout.json b/data/2025/2503_13xxx/2503.13709/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ff2275638eb8eab4cae4c7b54d56443e9167acd0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13709/layout.json @@ -0,0 +1,8852 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 82, + 80, + 527, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 80, + 527, + 100 + ], + "spans": [ + { + "bbox": [ + 82, + 80, + 527, + 100 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": "Yushan Jiang" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Kanghui Ning" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Zijie Pan" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Xuyang Shen" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Jingchao Ni" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Wenchao Yu" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Anderson Schneider" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Haifeng Chen" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{4\\dagger}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Yuriy Nevmyvaka" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "text", + "content": ", Dongjin Song" + }, + { + "bbox": [ + 64, + 109, + 545, + 138 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 198, + 137, + 413, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 137, + 413, + 149 + ], + "spans": [ + { + "bbox": [ + 198, + 137, + 413, + 149 + ], + "type": "text", + "content": "1University of Connecticut 2University of Houston" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 209, + 149, + 402, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 149, + 402, + 162 + ], + "spans": [ + { + "bbox": [ + 209, + 149, + 402, + 162 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 209, + 149, + 402, + 162 + ], + "type": "text", + "content": "Morgan Stanley " + }, + { + "bbox": [ + 209, + 149, + 402, + 162 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 209, + 149, + 402, + 162 + ], + "type": "text", + "content": "NEC Laboratories America" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 170, + 96, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 170, + 96, + 180 + ], + "spans": [ + { + "bbox": [ + 51, + 170, + 96, + 180 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 184, + 296, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 184, + 296, + 426 + ], + "spans": [ + { + "bbox": [ + 50, + 184, + 296, + 426 + ], + "type": "text", + "content": "Multi-modal time series analysis has recently emerged as a prominent research area in data mining, driven by the increasing availability of diverse data modalities, such as text, images, and structured tabular data from real-world sources. However, effective analysis of multi-modal time series is hindered by data heterogeneity, modality gap, misalignment, and inherent noise. Recent advancements in multi-modal time series methods have exploited the multi-modal context via cross-modal interactions based on deep learning methods, significantly enhancing various downstream tasks. In this tutorial and survey, we present a systematic and up-to-date overview of multi-modal time series datasets and methods. We first state the existing challenges of multi-modal time series analysis and our motivations, with a brief introduction of preliminaries. Then, we summarize the general pipeline and categorize existing methods through a unified cross-modal interaction framework encompassing fusion, alignment, and transference at different levels (i.e., input, intermediate, output), where key concepts and ideas are highlighted. We also discuss the real-world applications of multi-modal analysis for both standard and spatial time series, tailored to general and specific domains. Finally, we discuss future research directions to help practitioners explore and exploit multi-modal time series. The up-to-date resources are provided in the GitHub repository1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 435, + 104, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 435, + 104, + 448 + ], + "spans": [ + { + "bbox": [ + 51, + 435, + 104, + 448 + ], + "type": "text", + "content": "Keywords" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 449, + 295, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 449, + 295, + 472 + ], + "spans": [ + { + "bbox": [ + 50, + 449, + 295, + 472 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis, Foundation Model, Large Language Model, Deep Learning" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 483, + 134, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 483, + 134, + 495 + ], + "spans": [ + { + "bbox": [ + 51, + 483, + 134, + 495 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 498, + 295, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 498, + 295, + 585 + ], + "spans": [ + { + "bbox": [ + 50, + 498, + 295, + 585 + ], + "type": "text", + "content": "Time series analysis is a fundamental task in data mining, driven by the proliferation of sequential data exhibiting rich temporal dynamics across diverse real-world systems. With the advent of deep learning, various methods have been proposed to effectively model complex temporal relationships within time series [9, 58, 61, 63, 78, 80, 88], facilitating downstream tasks in diverse domains, including healthcare [20, 32, 94], finance [64, 92], transportation [23, 29, 93] and environmental sciences [7, 8]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 586, + 295, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 586, + 295, + 673 + ], + "spans": [ + { + "bbox": [ + 50, + 586, + 295, + 673 + ], + "type": "text", + "content": "In practice, time series are often associated with external contexts beyond their temporal dynamics [6, 77]. Such contexts are multi-modal, encompassing a variety of representations, such as texts [41, 73], images [18, 70], tables [6], and graphs [67], which carry rich semantic information for time series analysis. As such, incorporating the multi-modal contexts allows models to have a comprehensive view of underlying systems, capture subtle dependencies, and explain complex temporal behaviors more accurately." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 363, + 169, + 515, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 169, + 515, + 182 + ], + "spans": [ + { + "bbox": [ + 363, + 169, + 515, + 182 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 412, + 182, + 463, + 193 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 412, + 182, + 463, + 193 + ], + "spans": [ + { + "bbox": [ + 412, + 182, + 463, + 193 + ], + "type": "text", + "content": "Background" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 354, + 196, + 522, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 196, + 522, + 206 + ], + "spans": [ + { + "bbox": [ + 354, + 196, + 522, + 206 + ], + "type": "text", + "content": "Challenges, Our Motivations, Preliminaries, etc." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 376, + 214, + 502, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 214, + 502, + 225 + ], + "spans": [ + { + "bbox": [ + 376, + 214, + 502, + 225 + ], + "type": "text", + "content": "Data, Methods & Applications" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 379, + 231, + 488, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 379, + 231, + 488, + 240 + ], + "spans": [ + { + "bbox": [ + 379, + 231, + 488, + 240 + ], + "type": "text", + "content": "Multi-modal Time Series Datas" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 330, + 242, + 528, + 260 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 331, + 242, + 528, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 242, + 528, + 251 + ], + "spans": [ + { + "bbox": [ + 331, + 242, + 528, + 251 + ], + "type": "text", + "content": "1. Modalities: Time Series, Text, Image, Tabular, Graph, etc." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 330, + 251, + 518, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 251, + 518, + 260 + ], + "spans": [ + { + "bbox": [ + 330, + 251, + 518, + 260 + ], + "type": "text", + "content": "2. Scope, Existing Datasets, Characteristics, Domain, etc." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 355, + 269, + 522, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 355, + 269, + 522, + 278 + ], + "spans": [ + { + "bbox": [ + 355, + 269, + 522, + 278 + ], + "type": "text", + "content": "Taxonomy of Multi-modal Time Series Methods" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 330, + 280, + 545, + 308 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 331, + 280, + 485, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 280, + 485, + 289 + ], + "spans": [ + { + "bbox": [ + 331, + 280, + 485, + 289 + ], + "type": "text", + "content": "1. Interaction Stage (Input, Intermediate, Output)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 331, + 290, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 290, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 331, + 290, + 506, + 299 + ], + "type": "text", + "content": "2. Interaction Strategy (Fusion, Alignment, Transference)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 330, + 299, + 545, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 299, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 330, + 299, + 545, + 308 + ], + "type": "text", + "content": "3. Specific Methods (Concatenate, Attention, Contrastive, Gating, etc.)" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 408, + 317, + 469, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 317, + 469, + 326 + ], + "spans": [ + { + "bbox": [ + 408, + 317, + 469, + 326 + ], + "type": "text", + "content": "Domains & Tasks" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 330, + 327, + 534, + 346 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 331, + 327, + 523, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 327, + 523, + 336 + ], + "spans": [ + { + "bbox": [ + 331, + 327, + 523, + 336 + ], + "type": "text", + "content": "1. General, Finance, Healthcare, Traffic, Environment, etc." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 330, + 337, + 534, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 337, + 534, + 346 + ], + "spans": [ + { + "bbox": [ + 330, + 337, + 534, + 346 + ], + "type": "text", + "content": "2. Forecasting, Classification, Causal Discovery, Retrieval, etc." + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 381, + 356, + 493, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 356, + 493, + 366 + ], + "spans": [ + { + "bbox": [ + 381, + 356, + 493, + 366 + ], + "type": "text", + "content": "Future Research Directions" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 326, + 368, + 547, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 368, + 547, + 377 + ], + "spans": [ + { + "bbox": [ + 326, + 368, + 547, + 377 + ], + "type": "text", + "content": "Reasoning, Decision Making, Generalization, Contextual Noise," + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 413, + 378, + 459, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 378, + 459, + 387 + ], + "spans": [ + { + "bbox": [ + 413, + 378, + 459, + 387 + ], + "type": "text", + "content": "Bias & Ethics" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 330, + 405, + 542, + 416 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 405, + 542, + 416 + ], + "spans": [ + { + "bbox": [ + 330, + 405, + 542, + 416 + ], + "type": "text", + "content": "Figure 1: The framework of our tutorial and survey." + } + ] + } + ], + "index": 32, + "type": "text" + }, + { + "bbox": [ + 313, + 435, + 560, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 435, + 560, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 435, + 560, + 654 + ], + "type": "text", + "content": "Effective analysis of multi-modal time series, however, is hindered by several key challenges in terms of data heterogeneity, modality gap and contextual relevance. First, different modalities exhibit distinct statistical properties, structures, and dimensionalities, leading to discrepancies in feature distributions and semantic meanings. For instance, while time series data is sequentially ordered with temporal dependencies, textual and image data contains rich contextual semantics and correlations. Aligning these heterogeneous data into a unified representation space is non-trivial. Second, the textual, tabular, or visual contexts may appear at different timesteps or granularities. Such temporal misalignment may impede meaningful cross-modal interactions. Third, real-world data is inevitably noisy with irrelevant information that may mislead correlation learning, resulting in suboptimal performance. For example, in finance, news articles related to stock market prediction often contain much redundant or speculative narratives that does not reflect actual market conditions. Therefore, the focus of multi-modal time series analysis is to effectively capture complementary and relevant information from multi-modal context and leverage it for predictive or analytical tasks." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "content": "More recently, an increasing number of multi-modal methods have shown promise in exploiting contextual information from diverse data sources, which boosts performance in wide tasks ranging from forecasting [41, 51], classification [42, 45], anomaly detection [82] to retrieval [3] and causal discovery [68, 97]. Despite" + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.13709v1 [cs.LG] 17 Mar 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 682, + 295, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 682, + 295, + 700 + ], + "spans": [ + { + "bbox": [ + 50, + 682, + 295, + 700 + ], + "type": "text", + "content": "* equal contribution.† Yuriy Nevmyvaka, Haifeng Chen and Dongjin Song are the corresponding authors." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 50, + 700, + 249, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 700, + 249, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 700, + 249, + 709 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 50, + 700, + 249, + 709 + ], + "type": "text", + "content": "https://github.com/UCconn-DSIS/Multi-modal-Time-Series-Analysis" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 85, + 294, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 85, + 294, + 304 + ], + "spans": [ + { + "bbox": [ + 50, + 85, + 294, + 304 + ], + "type": "text", + "content": "the promising results of multi-modal time series methods, they are tailored for their own tasks with domain-specific applications. The existing literature lacks a comprehensive and systematic review that provides a unified perspective on the underlying principles and pipelines for multi-modal time series learning. In this survey, we provide a systematic and up-to-date overview of existing methods for multi-modal time series analysis. As shown in Figure 1, we discuss the challenges, motivations, and preliminaries of multi-modal time series. Then we introduce the general pipeline for multi-modal time series analysis and propose three types of interactions for cross-modal modeling between time series and other modalities - fusion, alignment, and transference - at the input, intermediate and output level, respectively. We also discuss the applications of multi-modal time series across multiple domains. Furthermore, we provide Table 2 to comprehensively summarize representative methods, encapsulating the modalities, fine-grained cross-modal interactions, real-world domains and tasks. Finally, we highlight potential future research opportunities to further advance time series analysis with multi-modal data. In summary, the major contributions of our survey are:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 306, + 295, + 393 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 51, + 306, + 295, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 306, + 295, + 327 + ], + "spans": [ + { + "bbox": [ + 51, + 306, + 295, + 327 + ], + "type": "text", + "content": "- We systematically catalog over 40 multi-modal time series methods with the corresponding open-source datasets." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 328, + 295, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 328, + 295, + 360 + ], + "spans": [ + { + "bbox": [ + 51, + 328, + 295, + 360 + ], + "type": "text", + "content": "- We uniquely categorize the existing methods into a unified cross-modal interaction framework, highlighting fusion, alignment, and transference at the input/intermediate/output levels." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 361, + 294, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 361, + 294, + 393 + ], + "spans": [ + { + "bbox": [ + 51, + 361, + 294, + 393 + ], + "type": "text", + "content": "- We discuss real-world applications of multi-modal time series and identify promising future directions, encouraging researchers and practitioners to explore and exploit multi-modal time series." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 403, + 205, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 403, + 205, + 416 + ], + "spans": [ + { + "bbox": [ + 50, + 403, + 205, + 416 + ], + "type": "text", + "content": "2 Background and Our Scope" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 418, + 234, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 418, + 234, + 431 + ], + "spans": [ + { + "bbox": [ + 50, + 418, + 234, + 431 + ], + "type": "text", + "content": "2.1 Multi-modal Machine Learning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 434, + 295, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 434, + 295, + 631 + ], + "spans": [ + { + "bbox": [ + 50, + 434, + 295, + 631 + ], + "type": "text", + "content": "Recent advancements in multi-modal machine learning have significantly enhanced models' ability to process and integrate data from diverse modalities, such as language, acoustic, vision, and tabular data [25, 66, 91]. With the development of deep learning architectures and sophisticated interaction designs, models are able to learn, infer, and reason by integrating multiple communicative modalities. Current research in multi-modal machine learning spans multiple key areas, including (1) representing multi-modal data to encode joint and individual characteristics, (2) identifying interconnections between modality elements, (3) transferring knowledge across modalities, and (4) theoretically and empirically analyzing the underlying learning process in a quantitative manner. We refer the audiences to the recent surveys [2, 49] for a more detailed overview of general multi-modal machine learning research. Building upon these advancements, we investigate multi-modal time series analysis with a focus on modeling temporal dependencies and leveraging the data interactions across heterogeneous modalities for predictive and analytical tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 640, + 247, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 640, + 247, + 653 + ], + "spans": [ + { + "bbox": [ + 50, + 640, + 247, + 653 + ], + "type": "text", + "content": "2.2 Multi-modal Time Series Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 654, + 294, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 294, + 709 + ], + "type": "text", + "content": "Multi-modal time series analysis aims to model time series data in combination with other complementary modalities. By leveraging cross-modal interactions, this approach yields deeper insights and more robust solutions for a wide range of predictive and analytical tasks across diverse real-world contexts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 85, + 559, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 85, + 559, + 183 + ], + "spans": [ + { + "bbox": [ + 313, + 85, + 559, + 183 + ], + "type": "text", + "content": "This survey aims to provide a unique and systematic perspective on effectively leveraging cross-modal interactions from relevant real-world contexts to advance multi-modal time series analysis, addressing both foundational principles and practical solutions. Our assessment is threefold: (1) reviewing multi-modal time series data (Section 3), (2) analyzing cross-modal interactions between time series and other modalities (Section 4), and (3) revealing the impact of multi-modal time series analysis in applications across diverse domains (Section 5)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 184, + 559, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 184, + 559, + 337 + ], + "spans": [ + { + "bbox": [ + 313, + 184, + 559, + 337 + ], + "type": "text", + "content": "To resolve ambiguities, we define the scope of our survey by clarifying the types of time series considered and the criteria for multi-modal time series methods. First, we mainly consider standard time series and spatial time series. For the latter, spatial structures (often represented as graphs) are inherently paired with temporal data rather than treated as a separate modality. Second, we focus on methods that leverage multi-modal inputs from real-world contexts to provide complementary information, but for generation and retrieval tasks, the focus is more on transforming the input modality to another output modality. We acknowledge recent research on representing time series as a single modality (e.g., time series as images [15, 46, 62, 99, 100], time series as tabular data [27]) for downstream tasks. However, as these approaches are less relevant to our scope, we refer readers to their respective works." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 338, + 559, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 338, + 559, + 458 + ], + "spans": [ + { + "bbox": [ + 313, + 338, + 559, + 458 + ], + "type": "text", + "content": "Besides, we would like to highlight the difference between our survey and recent related survey and position papers. Ni et al. [57] focuses on imaging-based transformations of time series and subsequent visual modeling techniques, where the discussion on multi-modal models is limited to those involving vision modalities. Kong et al. [39] concentrates on the use of multi-modal large language models (LLMs) for enhancing reasoning capabilities (e.g., causal reasoning, QA, planning, etc.) with multi-modal context. In contrast, our survey provides a broader and structured framework by delivering a systematic and unified perspective of multi-modal time series analysis, not limited to a specific modality or task type." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 482, + 484, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 482, + 484, + 493 + ], + "spans": [ + { + "bbox": [ + 314, + 482, + 484, + 493 + ], + "type": "text", + "content": "3 Multi-modal Time Series Data" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 498, + 558, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 498, + 558, + 509 + ], + "spans": [ + { + "bbox": [ + 314, + 498, + 558, + 509 + ], + "type": "text", + "content": "3.1 Modalities in Multi-modal Time Series Data" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 512, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 512, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 512, + 559, + 710 + ], + "type": "text", + "content": "Multi-modal time series data often originate from diverse sources, each exhibiting unique characteristics that influence how they are processed and analyzed. Besides Time Series, i.e., continuous or discrete measurements recorded over time, such as sensor readings, financial metrics, or physiological signals, their modalities often include: 1) Tabular: Time-indexed records that are inherently organized in a tabular format, such as event logs, transaction records, or demographic information. 2) Text: Time-stamped or domain-specific textual information – like clinical notes, financial reports, news articles, or social media posts – that provides contextual or interpretative insights. 3) Image: Visual data acquired as images over time, such as photographs, medical images (e.g., X-rays, MRI), satellite imagery, or visual representations generated from time series data. 4) Graph: Relational data representing interactions or structural dependencies among entities that evolve. They are typically modeled as networks or graphs, where the connections may change dynamically. Although audio is widely studied as an independent modality in multi-modal research, we consider it a special" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "text", + "content": "Jiang, et al." + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 99, + 104, + 512, + 240 + ], + "blocks": [ + { + "bbox": [ + 121, + 83, + 488, + 94 + ], + "lines": [ + { + "bbox": [ + 121, + 83, + 488, + 94 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 488, + 94 + ], + "type": "text", + "content": "Table 1: Representative open-source multi-modal time series datasets and across domains." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 99, + 104, + 512, + 240 + ], + "lines": [ + { + "bbox": [ + 99, + 104, + 512, + 240 + ], + "spans": [ + { + "bbox": [ + 99, + 104, + 512, + 240 + ], + "type": "table", + "html": "
DomainDataset (Superscripts include the URLs to the datasets)Modalities
HealthcareMIMIC-III [35][1], MIMIC-IV [34][2]TS, Text, Tabular
ICBHI [65][3], Coswara [4][4], KAUH [21][5], PTB-XL [71][6], ZuCo [14, 26][7]TS, Text
Image-EEG [22][8]TS, Image
FinanceFNSPID [17][9], ACL18 [84][10], CIKM18 [79][11], DOW30 [11][12]TS, Text
Multi-domainTime-MMD [53][13], TimeCAP [42][14], NewsForecast [73][15], TTC [37][16], CiK [77][17], TSQA [38][18]TS, Text
RetailVISUELLE [70][19]TS, Image, Text
IoTLEMMA-RCA [40][20]TS, Text
SpeechLRS3 [1][21], VoxCeleb2 [13][22]TS (Audio), Image
TrafficNYC-taxi, NYC-bike [48][23]ST, Text
EnvironmentTerra [10][24]ST, Text
", + "image_path": "bf8f5117d91e3da11dba5893a8b9ac5948fa50f19246cf4dd41c242bc923a774.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 256, + 294, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 256, + 294, + 278 + ], + "spans": [ + { + "bbox": [ + 50, + 256, + 294, + 278 + ], + "type": "text", + "content": "form of time series in this survey and briefly discuss representative works within this scope." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 293, + 255, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 293, + 255, + 304 + ], + "spans": [ + { + "bbox": [ + 51, + 293, + 255, + 304 + ], + "type": "text", + "content": "3.2 Common Datasets and Benchmarks" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 308, + 300, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 308, + 300, + 351 + ], + "spans": [ + { + "bbox": [ + 50, + 308, + 300, + 351 + ], + "type": "text", + "content": "Multi-modal time series datasets vary a lot and are domain-dependent, each with unique data characteristics and modalities. In Table 1 we provide representative datasets categorized by domain, along with their respective modalities:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 351, + 295, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 351, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 50, + 351, + 295, + 625 + ], + "type": "text", + "content": "Healthcare: In this domain, physiological signals (e.g., ECG, EEG) are extensively analyzed alongside textual data such as clinical notes, patient demographics, and tabular data including vital signs and laboratory results. Common datasets include MIMIC-III [35], a comprehensive dataset containing electronic health records (EHRs) of ICU patients with physiological measurements, clinical notes, and diagnostic information, widely used for tasks like patient monitoring, mortality prediction, and clinical decision support. MIMIC-IV [34] is an extension of MIMIC-III, which provide detailed physiological signals, clinical narratives, medication records, and demographic data from a large population of critically ill patients, frequently utilized for predictive modeling, clinical outcome analysis, and health informatics research. Other notable healthcare datasets include ICBHI [65], which contains respiratory sound recordings paired with clinical annotations for respiratory disease classification; Coswara [4], which provides respiratory audio samples and rich metadata for COVID-19 detection tasks; KAUH [21], which comprises audio records and corresponding annotations for healthcare analytics; PTB-XL [71], a large-scale ECG dataset annotated with diagnostic labels for cardiac monitoring and diagnosis; ZuCo [14, 26], which consists of simultaneous EEG and textual data from reading comprehension tasks, being useful for cognitive neuroscience studies; and Image-EEG [22], which pairs EEG signals with images of objects on a natural background, aiding studies in visual neuroscience and computer vision." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": "Finance: Datasets that combine time series data with financial news and reports are instrumental in financial analysis and modeling. Notable examples include ACL18 [84], CIKM18 [79], and DOW30 [11]. These datasets focus on high-trade-volume stocks from the U.S. stock markets, providing historical stock price data, such as opening, high, low, and closing prices; alongside related textual information, including tweets or financial news. Another" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 256, + 559, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 256, + 559, + 289 + ], + "spans": [ + { + "bbox": [ + 314, + 256, + 559, + 289 + ], + "type": "text", + "content": "large-scale dataset, FNSPID [17], consists of stock prices and time-aligned financial news records, covering over 4,000 companies from 1999 to 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 309, + 559, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 309, + 559, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 309, + 559, + 605 + ], + "type": "text", + "content": "Multi-domain: Datasets featuring general-purpose numerical time series combined with textual data are suitable for broad analytical applications. Examples include Time-MMD [53], which encompasses nine primary data domains: Agriculture, Climate, Economy, Energy, Environment, Health, Security, Social Good, and Traffic, while ensuring fine-grained alignment between time series and textual data; TimeCAP [42] compiles seven real-world time series datasets across three domains: weather, finance, and healthcare. To generate textual descriptions for each time series, a large language model (LLM) agent is employed, leveraging contextual information and domain-specific knowledge. NewsForecast [73] integrates task-specific time series data with verified public news reports across various domains, including finance, energy, traffic, and cryptocurrency; TTC [37] is a meticulously curated, time-aligned dataset designed for multimodal forecasting. It consists of paired time series and text data synchronized to timestamps, spanning two distinct domains: climate science and healthcare; CiK [77] is a dataset comprising 71 forecasting tasks across seven real-world domains. Each task necessitates the integration of both numerical data and textual information. The covered domains include Climatology, Economics, Energy, Mechanics, Public Safety, Transportation, and Retail. The TSQA [38] dataset consists of 200k question-answer pairs derived from time series data across 12 domains: healthcare, finance, energy, traffic, environment, IoT, nature, transport, human activities, machine sensors, AIOps, and the web. These QA pairs are designed to support five key tasks: forecasting, imputation, anomaly detection, classification, and open-ended reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 610, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 610, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 610, + 559, + 710 + ], + "type": "text", + "content": "Other domains: Beyond the previously discussed major sectors, multi-modal time series analysis extends to various other domains. In Retail, datasets such as VISUELLE [70] integrate numerical sales data with product images and textual descriptions, facilitating thorough analyses of consumer behavior and inventory management. The Internet of Things (IoT) domain benefits from datasets such as LEMMA-RCA [40], which combine time series sensor data with textual metadata, enabling enhanced monitoring and more robust and secure methodologies that ensure the high performance of modern" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 59, + 217, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 59, + 217, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 59, + 217, + 69 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 295, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 295, + 194 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 295, + 194 + ], + "type": "text", + "content": "systems. In the Speech domain, datasets like LRS3 [1] and VoxCeleb2 [13] integrate audio recordings with corresponding visual data, supporting advancements in speech recognition and speaker identification technologies. In the Traffic domain, datasets like NYC-Taxi, NYC-Bike [48] contain spatial-temporal (ST) data alongside associated textual metadata. These integrations allow LLMs to effectively capture and utilize spatial-temporal contextual signals. In the Environment domain, Terra [10] collect 45 years of global geographic spatial-temporal data, supplemented with textual descriptions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 207, + 282, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 207, + 282, + 218 + ], + "spans": [ + { + "bbox": [ + 50, + 207, + 282, + 218 + ], + "type": "text", + "content": "4 Cross-modal Interactions with Time Series" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 221, + 295, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 221, + 295, + 276 + ], + "spans": [ + { + "bbox": [ + 50, + 221, + 295, + 276 + ], + "type": "text", + "content": "In this section, we conduct a detailed review of existing research on multi-modal time series analysis by thoroughly analyzing cross-modal interactions. We also elaborate how existing multi-modal methods are tailored for domain-specific applications in Section 5. The detailed taxonomy is provided in Table 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 277, + 295, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 277, + 295, + 331 + ], + "spans": [ + { + "bbox": [ + 50, + 277, + 295, + 331 + ], + "type": "text", + "content": "We define three fundamental types of interactions between time series and other modalities, including fusion, alignment, and transference, which occur at different stages within a framework - input, intermediate (i.e., representations or intermediate outputs), and output. The representative examples are provided in Figure 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 344, + 113, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 344, + 113, + 355 + ], + "spans": [ + { + "bbox": [ + 51, + 344, + 113, + 355 + ], + "type": "text", + "content": "4.1 Fusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 358, + 295, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 358, + 295, + 490 + ], + "spans": [ + { + "bbox": [ + 50, + 358, + 295, + 490 + ], + "type": "text", + "content": "Fusion refers to the process of integrating heterogeneous modalities in a way that captures complementary information across diverse sources to improve time series modeling. To fuse multi-modal inputs, a common practice is to directly integrate time series, tabular data and texts into a unified textual prompt, then use it to query LLMs for downstream tasks. This is typically facilitated by instruction fine-tuning for task-oriented analysis [19, 24, 38, 48, 56, 73, 90]. Some works also leverage the zero-shot reasoning and inference capability of pretrained LLMs (e.g., GPT-4 and its variants) [72, 81, 89]. Recent research efforts like TaTS [47] attempt to integrate paired text embedding as an additional variable of time series for temporal modeling, yielding competitive task performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 491, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 491, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 50, + 491, + 295, + 654 + ], + "type": "text", + "content": "Most existing methods perform cross-modal fusion at the intermediate stage, such as adding and concatenating multi-modal representations, where each individual modal encoder first maps the raw data into a shared latent space. Addition combines time series and other modalities by summing up encoded representations, effectively blending shared information while preserving their interconnections in the latent space [6, 30, 51, 85, 95, 97]. On the other hand, concatenation stacks multi-modal representations along the same dimension, retaining modality-specific characteristics and allowing models to capture joint relationships between the modalities [16, 36, 36, 37]. To effectively leverage cross-modal information, existing methods often incorporate alignment designs after concatenating representations [5, 11, 12, 18, 31, 33, 41, 42, 44, 54, 55, 60, 69, 70, 74, 85, 96]. Alignment is also used in the aforementioned additions, which will be detailed in Section 4.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "content": "When fusion is performed at the output level, different modalities contribute separately to the final output, allowing each modality to retain its unique predictive signal [31, 41, 42, 53]. Time-MMD [53] provides a paradigm that fuses predictions from both state-of-the-art forecasters and a pretrained language model with a projection" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 318, + 83, + 558, + 220 + ], + "blocks": [ + { + "bbox": [ + 318, + 83, + 558, + 220 + ], + "lines": [ + { + "bbox": [ + 318, + 83, + 558, + 220 + ], + "spans": [ + { + "bbox": [ + 318, + 83, + 558, + 220 + ], + "type": "image", + "image_path": "dfc75126255eb54797d7990d9a20668b0a1b9ee7743a4acf0e3af423c98e1049.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 232, + 559, + 255 + ], + "lines": [ + { + "bbox": [ + 314, + 232, + 559, + 255 + ], + "spans": [ + { + "bbox": [ + 314, + 232, + 559, + 255 + ], + "type": "text", + "content": "Figure 2: Categorization of cross-modal interaction methods and representative examples." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 268, + 559, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 268, + 559, + 411 + ], + "spans": [ + { + "bbox": [ + 313, + 268, + 559, + 411 + ], + "type": "text", + "content": "layer, in an end-to-end manner. MOAT [41] introduces a two-stage framework for multi-modal time series forecasting. In the first stage, the model is optimized to generate forecasts from decomposed time series and text embeddings. In the second stage, an offline synthesis via MLP is applied to dynamically fuse different components, yielding the final forecast based on their relative contributions. Beyond fusing outputs from a single model, TimeCAP [42] enhances performance by combining predictions from both a multi-modal predictor and a pretrained LLM, which synergizes the gradient-based method and LLM agents reasoning on real-world contexts. Output fusion gains advantage of design flexibility and robustness, but it may not fully utilize the complementary relationship between modalities without additional countermeasures." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 411, + 562, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 411, + 562, + 466 + ], + "spans": [ + { + "bbox": [ + 314, + 411, + 562, + 466 + ], + "type": "text", + "content": "Cross-modal fusion relies on well-aligned multi-modal data for effective exploitation of the contextual information. However, ideally-aligned data may not be given in real-world scenarios. As such, existing methods also leverage alignment mechanisms to mitigate the challenge." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 475, + 397, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 475, + 397, + 487 + ], + "spans": [ + { + "bbox": [ + 315, + 475, + 397, + 487 + ], + "type": "text", + "content": "4.2 Alignment" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 490, + 559, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 490, + 559, + 632 + ], + "spans": [ + { + "bbox": [ + 313, + 490, + 559, + 632 + ], + "type": "text", + "content": "Alignment ensures that the relationships between different modalities are preserved and semantically coherent when integrated into a unified learning framework. At the input level, we primarily refer alignment to data preprocessing techniques that aim at mitigating temporal misalignment caused by missing values, irregular sampling intervals, and differing granularities across modalities. This process is crucial for ensuring that data from multiple sources are properly synchronized before fusion, where domain knowledge is usually needed to handle such inconsistencies [10, 53, 73]. In addition, none of the existing methods we reviewed explicitly perform output alignment. However, the aforementioned output fusion can be easily adapted to alignment through the incorporation of a gating or attention mechanism that we will introduce shortly." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 633, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 633, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 633, + 559, + 710 + ], + "type": "text", + "content": "Alignment at the intermediate stage plays a crucial role in multimodal interactions. We first introduce the alignment of multi-modal representations, spanning a range of techniques from model component design to learning objectives. The common component designs include self-attention [5, 12, 30, 33, 41, 42, 44, 54, 55, 69], cross-attention [6, 18, 51, 60, 70, 82, 85, 98] and gating mechanisms [82, 98]. Self-attention is often used to fuse multi-modal representations. It" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "text", + "content": "Jiang, et al." + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 114, + 567, + 696 + ], + "blocks": [ + { + "bbox": [ + 52, + 84, + 558, + 110 + ], + "lines": [ + { + "bbox": [ + 52, + 84, + 558, + 110 + ], + "spans": [ + { + "bbox": [ + 52, + 84, + 558, + 110 + ], + "type": "text", + "content": "Table 2: Taxonomy of representative multi-modal time series methods. Modality refers to the different data modalities involved in each method. TS represents standard time series, " + }, + { + "bbox": [ + 52, + 84, + 558, + 110 + ], + "type": "inline_equation", + "content": "ST" + }, + { + "bbox": [ + 52, + 84, + 558, + 110 + ], + "type": "text", + "content": " denotes spatial time series. The Method column lists the techniques used for each interaction, separated by semicolons, where each interaction may include one or more techniques, separated by commas. Superscripts in the Code column include the URLs to Github repositories." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 114, + 567, + 696 + ], + "lines": [ + { + "bbox": [ + 52, + 114, + 567, + 696 + ], + "spans": [ + { + "bbox": [ + 52, + 114, + 567, + 696 + ], + "type": "table", + "html": "
MethodModalityDomainTaskCross-Modal InteractionLarge ModelYearCode
StageFusionAlign.Trans.Method
Time-MMD [53]TS, TextGeneralForecastingOutputXXAdditionMultiple2024Yes[1]
Wang et al. [73]TS, TextGeneralForecastingInput IntermediateXXPrompt Prompt; LLM ReasoningLLaMa2 GPT-4 Turbo2024Yes[2]
GPT4MTS [30]TS, TextGeneralForecastingIntermediateXAddition; Self-attentionGPT-22024No
TimeCMA [51]TS, TextGeneralForecastingInput IntermediateXXMeta-description Addition; Cross-attentionGPT-22025Yes[3]
MOAT [41]TS, TextGeneralForecastingIntermediate OutputXConcat.; Self-attention Offline Synthesis (MLP)S-Bert2024No
TimeCAP [42]TS, TextGeneralClassificationInput Intermediate OutputXXLLM Generation Concat.; Self-attention, Retrieval AdditionBert, GPT-42024No
TimeXL [31]TS, TextGeneralClassification ForecastingIntermediate OutputXConcat., Prompt; LLM Reasoning AdditionBert, S-Bert GPT-4o2025No
Hybrid-MMF [37]TS, TextGeneralForecastingIntermediateXXConcat.GPT-4o2024Yes[4]
Time-LLM [33]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionLLaMA, GPT-22024Yes[5]
Time-VLM [98]TS, Text, ImageGeneralForecastingInput IntermediateXXFeat. Imaging, Meta-description Addition; Gating, Cross-attentionViLT, CLIP BLIP-22025No
Unitime [55]TS, TextGeneralForecastingInput IntermediateXXMeta-description Concat.; Self-attentionGPT-22024Yes[6]
TESSA [50]TS, TextGeneralAnnotationIntermediatePrompt; RL; LLM GenerationGPT-4o2024No
InstruTime [12]TS, TextGeneralClassificationIntermediateXConcat.; Self-attentionGPT-22025Yes[7]
MATMCD [68]TS, Text, GraphGeneralCausal DiscoveryIntermediatePrompt; LLM Reasoning; SupervisionMultiple2025No
STG-LLM [54]ST, TextGeneralForecastingIntermediateXConcat.; Self-attentionGPT-22024No
TableTime [72]TS, TextGeneralClassificationInputXPrompt; ReformulateMultiple2024Yes[8]
ContextFormer [6]TS, TabularGeneralForecastingIntermediateXAddition; Cross-attentionNo2025No
Time-MQA [38]TS, TextGeneralMultipleInputXXPromptMultiple2025Yes[9]
MAN-SF [67]TS, Text, GraphFinanceClassificationIntermediateXBilinear; Graph ConvolutionUSE2020No
Bamford et al. [3]TS, Text, TS, ImageFinanceRetrievalIntermediate OutputXXSupervisionS-Bert2024No
Chen et al. [11]TS, Text, GraphFinanceClassificationIntermediateXXLLM Generation Concat.; Graph ConvolutionChatGPT2023No
Xie et al. [81]TS, TextFinanceClassificationInputXXPromptChatGPT2023No
Yu et al. [89]TS, TextFinanceForecastingInputXXPromptGPT-4, Open LLaMA2023No
MedTsLLM [5]TS, Text, TabularHealthcareMultipleIntermediateXConcat.; Self-attentionLlama22024Yes[10]
RespLLM [95]TS (Audio), TextHealthcareClassificationIntermediateXAddition, Self-attentionOpenBioLLM-8B2024No
METS [45]TS, TextHealthcareClassificationOutputXXContrastiveClinicalBert2023No
Wang et al. [75]TS, TextHealthcareClassificationIntermediateXXSupervisionBart, Bert, Roberta2021No
EEG2TEXT [52]TS, TextHealthcareGenerationOutputXXSelf-supervision, SupervisionBart2024No
MEDHMP [74]TS, TextHealthcareClassificationIntermediateXConcat.; Self-attention, ContrastiveClinicalT52023Yes[11]
Deznabi et al. [16]TS, TextHealthcareClassificationIntermediateXXConcat.Bio+Clinical Bert2021Yes[12]
Niu et al. [60]TS, TextHealthcareClassificationIntermediateXConcat.; Cross-attentionBioBERT2023No
Yang et al. [85]TS, TextHealthcareClassificationIntermediateXConcat.; Addition; GatingClinicalBERT2021Yes[13]
Liu et al. [56]TS, TextHealthcareClassification RegressionInputXXPromptPaLM2023Yes[14]
xTP-LLM [24]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionLlama2-7B-chat2024Yes[15]
UrbanGPT [48]ST, TextTraffic ForecastingInputXPrompt; Meta-descriptionVicuna-7B2024Yes[16]
CityGPT [19]ST, TextMobilityInputXXPromptMultiple2025Yes[17]
MULAN [97]TS, Text, GraphIoT Causal DiscoveryIntermediateAddition; Contrastive; SupervisionNo2024No
MIA [82]TS, ImageIoT Anomaly DetectionIntermediateXAddition; Cross-attention, GatingNo2023No
Ekambaram et al. [18]TS, Image, TextRetail ForecastingIntermediateXConcat.; Self & Cross-attentionNo2020Yes[18]
Skenderi et al. [70]TS, Image, TextRetail ForecastingIntermediateXConcat.; Cross-attentionNo2024Yes[19]
VIMTS [96]ST, Image EnvironmentImputationIntermediateXConcat.; SupervisionNo2022No
LTE [44]ST, Text, Image EnvironmentForecastingIntermediateXConcat.; Self-attentionLLaMA-2-7B2024Yes[20]
AV-HubERT [69]TS (Audio), Image SpeechClassificationIntermediateXConcat.; Self-attentionHuBert2022Yes[21]
SpeechGPT [90]TS(Audio), TextGeneration intermediateXConcat.; Self-attentionLLaMA-13B2023Yes[22]
LA-GCN [83]ST, TextClassificationIntermediateXSupervisionBert2023Yes[23]
", + "image_path": "3c674ad0e2e84929c9f56d3334e69ed89be4011b598a814f2e99ea5de4015cb1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 216, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 216, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 216, + 68 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "content": "enables a joint and undirected alignment across all modalities by dynamically attending to important features. Given multi-modal embeddings " + }, + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{mm}} \\in \\mathbb{R}^{n \\times d}" + }, + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "content": " is the total number of modality tokens and " + }, + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "content": " is the embedding dimension, self-attention is computed as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 100, + 144, + 246, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 144, + 246, + 174 + ], + "spans": [ + { + "bbox": [ + 100, + 144, + 246, + 174 + ], + "type": "interline_equation", + "content": "\\operatorname {A t t e n t i o n} \\left(E _ {\\mathrm {m m}}\\right) = \\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {\\top}}{\\sqrt {d _ {k}}}\\right) V", + "image_path": "363c4647bba76cf7a9718c32981fbfa35c8c6001ff5bccdcad97a7bdb5afc4a9.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "spans": [ + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": "where the queries " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": ", keys " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": ", and values " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": " are linear projections of " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{mm}}" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "Q = E_{\\mathrm{mm}}W_{Q}" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "K = E_{\\mathrm{mm}}W_{K}" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "V = E_{\\mathrm{mm}}W_{V}" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": " with learnable weights of dimensionality " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "inline_equation", + "content": "W_{Q}, W_{K}, W_{V} \\in \\mathbb{R}^{d \\times d_{k}}" + }, + { + "bbox": [ + 50, + 177, + 294, + 212 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 212, + 295, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 212, + 295, + 278 + ], + "spans": [ + { + "bbox": [ + 50, + 212, + 295, + 278 + ], + "type": "text", + "content": "In cross-attention, time series serves as the query modality to get contextualized by other modalities, providing a directed alignment that ensures auxiliary modalities contribute relevant contextual information while preserving the temporal structure of time series. Given a query embedding " + }, + { + "bbox": [ + 50, + 212, + 295, + 278 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{ts}} \\in \\mathbb{R}^{n \\times d}" + }, + { + "bbox": [ + 50, + 212, + 295, + 278 + ], + "type": "text", + "content": " and that of auxiliary modalities " + }, + { + "bbox": [ + 50, + 212, + 295, + 278 + ], + "type": "inline_equation", + "content": "E_{c} \\in \\mathbb{R}^{n \\times d}" + }, + { + "bbox": [ + 50, + 212, + 295, + 278 + ], + "type": "text", + "content": " as keys and values:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 282, + 261, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 282, + 261, + 312 + ], + "spans": [ + { + "bbox": [ + 85, + 282, + 261, + 312 + ], + "type": "interline_equation", + "content": "\\text {C r o s s A t t e n t i o n} \\left(E _ {\\mathrm {t s}}, E _ {\\mathrm {c}}\\right) = \\operatorname {s o f t m a x} \\left(\\frac {Q _ {\\mathrm {t s}} K _ {\\mathrm {c}} ^ {\\top}}{\\sqrt {d _ {k}}}\\right) V _ {\\mathrm {c}}", + "image_path": "3d0c0748a3d73486d41680250f0cee30ae17188b7f3582d6184ca022935cf2a5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "spans": [ + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "text", + "content": "where the query, key and value are denoted as " + }, + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "inline_equation", + "content": "Q_{\\mathrm{ts}} = E_{\\mathrm{ts}}W_{Q}" + }, + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "inline_equation", + "content": "K_{\\mathrm{c}} = E_{\\mathrm{c}}W_{K}" + }, + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{c}} = E_{\\mathrm{c}}W_{V}" + }, + { + "bbox": [ + 50, + 315, + 294, + 347 + ], + "type": "text", + "content": ". Note that existing methods adopt multi-head attentions, which is omitted here for simplicity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 348, + 295, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 348, + 295, + 380 + ], + "spans": [ + { + "bbox": [ + 50, + 348, + 295, + 380 + ], + "type": "text", + "content": "Similarly, the gating mechanism is a parametric filtering operation that explicitly regulates the influence of time series and other modalities on the fused embeddings in " + }, + { + "bbox": [ + 50, + 348, + 295, + 380 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 50, + 348, + 295, + 380 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 72, + 385, + 272, + 398 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 385, + 272, + 398 + ], + "spans": [ + { + "bbox": [ + 72, + 385, + 272, + 398 + ], + "type": "interline_equation", + "content": "G = \\sigma \\left(W _ {g} \\left[ E _ {\\mathrm {t s}}; E _ {c} \\right] + b _ {g}\\right), \\quad E = G \\odot E _ {\\mathrm {t s}} + (1 - G) \\odot E _ {c}", + "image_path": "d787f6fd60aa56bc27145664a56d4930b471531c8fa3b48352b56a19ae232274.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "spans": [ + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "inline_equation", + "content": "\\sigma (\\cdot)" + }, + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "text", + "content": " denotes the sigmoid function, the learnable weight and bias are denoted as " + }, + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "inline_equation", + "content": "W_{g}\\in \\mathbb{R}^{2d\\times d}" + }, + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "inline_equation", + "content": "b_{g}\\in \\mathbb{R}^{d}" + }, + { + "bbox": [ + 50, + 401, + 294, + 425 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 425, + 295, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 425, + 295, + 491 + ], + "spans": [ + { + "bbox": [ + 50, + 425, + 295, + 491 + ], + "type": "text", + "content": "When a graph modality is available from external contexts, the underlying topological insights can be leveraged for graph-based alignment [11, 67]. Unlike the above methods that rely solely on feature interactions, it explicitly aligns multi-modal representations with relational structures through graph convolution, enabling context-aware feature propagation across modalities." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 491, + 297, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 491, + 297, + 654 + ], + "spans": [ + { + "bbox": [ + 50, + 491, + 297, + 654 + ], + "type": "text", + "content": "Representation alignments can also be achieved by learning objectives [3, 45, 83, 96, 97]. For example, MULAN [97] extracts modality-invariant and modality-specific representations from multi-modal time series. It employs contrastive learning to enhance cross-modal alignment by maximizing the similarity between invariant representations across modalities while minimizing the similarity between invariant and specific representations of the same modality. Moreover, Bamford et al. [3] align cross-modal representations by using the mean of uni-modal cosine similarities as the target similarity and optimizing cross-modal similarity via cross-entropy loss, which effectively connects both modalities in a shared latent space for time series retrieval tasks. In general, this branch of methods is effective as it directly integrates the alignment objective into the optimization process, ensuring that meaningful representations are explicitly learned." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 654, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 294, + 710 + ], + "type": "text", + "content": "Lastly, we introduce the intermediate alignment of component outputs within a framework, extending beyond representation alignment within a model. The most recent studies explore the synergy between time series models and LLM agents, leveraging the strong reasoning capabilities of pretrained LLMs to provide" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 84, + 559, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 314 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 314 + ], + "type": "text", + "content": "contextual understanding and calibration in real-world scenarios [31, 42, 50, 68, 73]. We briefly discuss a few representative examples for demonstration. TimeCAP [42] utilizes the embedding space of a trained multi-modal encoder to retrieve in-context examples with the highest cosine similarity. These retrieved examples with ground truth labels are then fed, along with the query text, into an LLM to provide contextual guidance and improve outcome prediction. TimeXL [31] incorporates a multi-modal prototype-based encoder to generate explainable case-based rationales for both time series and texts, integrating three LLM agents, where prediction, reflection, and refinement LLMs collaborate to iteratively enhance prediction accuracy, identify textual inconsistencies or noise, and calibrate textual contexts, yielding more accurate predictions and explanations. NewsForecast [73] also employs reflection in language agents to iteratively select relevant news from a large database, enhancing alignment of textual information for text-based forecasting. Similarly, MATLAB [68] ensures alignment between statistical causal discovery on time series and LLM reasoning on textual context by leveraging iterative self-reflective tool-calling to structure textual context, which is then used to explain and refine causal constraints." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 315, + 559, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 315, + 559, + 370 + ], + "spans": [ + { + "bbox": [ + 314, + 315, + 559, + 370 + ], + "type": "text", + "content": "In a nutshell, alignment aims to calibrate real-world contexts and effectively capture relevant multi-modal elements for a semantically coherent time series modeling. It enhances task performance, robustness and explanation, ensuring that models leverage meaningful contextual information for improved decision-making." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 399, + 408, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 399, + 408, + 410 + ], + "spans": [ + { + "bbox": [ + 315, + 399, + 408, + 410 + ], + "type": "text", + "content": "4.3 Transference" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 413, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 413, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 413, + 559, + 710 + ], + "type": "text", + "content": "Transference refers to the process of mapping between different modalities. It allows one modality to be inferred, translated, or synthesized from another. This concept plays a crucial role across different stages of multi-modal time series analysis. The input-level transference typically serves for modality augmentation. It helps introduce contextual priors, enrich training samples, and provide alternative representations. This is particularly useful in scenarios of data scarcity and imbalance. In existing literature, a common practice is to use meta information to describe the narrative of real-world contexts (e.g., domain, data statistics and granularity, variable descriptions, other co-variates, etc.) [19, 24, 33, 48, 51, 55, 72, 98] or leverage pretrained LLMs to generate fine-grained textual contexts [42] or graphs [11] for real-world time series, serving as an augmented modality. In addition to texts, time series can also be transformed into high-dimensional images via feature imaging, such as stacking the original data with frequency and periodicity features [98]. Alternatively, time series can be represented in tabular form, transforming time series analysis into a table understanding task [72]. Note that the aforementioned uni-modal methods for transforming time series into other single modalities can also be integrated into multi-modal time series frameworks [15, 27, 46, 62, 99, 100]. The exploitation of input-level transference is two-fold. First, the embedding of generated modality can serve as semantic anchors that guides time series modeling via representation alignment, improving downstream supervised tasks [33, 51, 55, 98]. Second, it provides additional contextual guidance for pretrained LLMs through input fusion and prompting [19, 24, 48, 72]." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "text", + "content": "Jiang, et al." + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 85, + 294, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 85, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 53, + 85, + 294, + 172 + ], + "type": "text", + "content": "At the intermediate [11, 50, 68, 75, 97] and output [3, 52] levels, transference are more task-oriented. The output-level transference typically refers to the end-to-end generation of new modalities, such as text-based and image-based time series retrieval, where users provide textual descriptions or sketched trends to query relevant time series data [3]. This also includes EEG-to-text conversion, enabling direct transformation from physiological signals to human-readable narratives [52]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 173, + 294, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 173, + 294, + 314 + ], + "spans": [ + { + "bbox": [ + 53, + 173, + 294, + 314 + ], + "type": "text", + "content": "The output of intermediate transference typically serves as an initial solution to be refined for modality generation tasks [50, 68, 97] or a medium to be inferred for predictive tasks [75], facilitating downstream reasoning and further alignment within the multimodal framework. MATMCD [68] generates an initial causal graph from time series, achieving modality transference in the intermediate level. Subsequently, it incorporates textual modality to refine the causal graph, ensuring improved alignment and interpretability. Moreover, Wang et al. [75] adopt a two-stage mechanism for sentiment classification based on EEG data, where the model first converts EEG signals into reading texts and then employs a pretrained LLM based on texts for classification, achieving impressive zero-shot results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 324, + 271, + 349 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 271, + 349 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 271, + 349 + ], + "type": "text", + "content": "5 Applications of Multi-modal Time Series Analysis" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 352, + 294, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 352, + 294, + 395 + ], + "spans": [ + { + "bbox": [ + 53, + 352, + 294, + 395 + ], + "type": "text", + "content": "In this section, we review the existing applications of multi-modal time series analysis for both standard and spatial time series, covering diverse domains such as healthcare, finance, transportation, environment, retail, and the Internet of Things (IoT)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 405, + 157, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 405, + 157, + 416 + ], + "spans": [ + { + "bbox": [ + 53, + 405, + 157, + 416 + ], + "type": "text", + "content": "Standard Time Series" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 421, + 132, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 421, + 132, + 431 + ], + "spans": [ + { + "bbox": [ + 53, + 421, + 132, + 431 + ], + "type": "text", + "content": "5.1 Healthcare" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 435, + 294, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 435, + 294, + 610 + ], + "spans": [ + { + "bbox": [ + 53, + 435, + 294, + 610 + ], + "type": "text", + "content": "Recent studies in healthcare highlight the multi-modal analysis of diverse medical data sources, such as EHRs (Electronic Health Records, containing lab values and clinical reports, etc.), audio, EEG (Electroencephalogram), ECG (Electrocardiogram), and other wearable and medical sensor recordings, for better disease diagnosis and patient monitoring. For multi-modal analysis on EHR data, a common modeling strategy involves the interaction between lab values and clinical reports, including the concatenation [16] and attention mechanisms [60, 85] on modality embeddings. Moreover, existing methods explore different modeling techniques to better exploit the clinical notes, via domain-specific text encoders (e.g., ClinicalBERT [28, 85] and BioBERT [43, 60]) and different processing strategies. For example, text embeddings can be modeled separately based on patient groups [86] or through a decaying mechanism based on time intervals [36] before interacting with time series embeddings, which leads to improved mortality prediction." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 611, + 294, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 611, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 53, + 611, + 294, + 709 + ], + "type": "text", + "content": "In addition to EHRs, multi-modal modeling methods have been tailored for audio [95], ECG [45], and EEG [75]. Zhang et al. [95] focus on a respiratory health classification task by integrating both audio and textual descriptions. Li et al. [45] propose a multi-modal contrastive learning framework, constructing positive and negative samples by pairing patients' report texts with corresponding ECG signals for self-supervised pretraining. The classification task is then performed by computing the cosine similarity between different text representations and the target ECG representation. Wang" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 85, + 558, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 85, + 558, + 128 + ], + "spans": [ + { + "bbox": [ + 317, + 85, + 558, + 128 + ], + "type": "text", + "content": "et al. [75] propose a two-stage method for zero-shot EEG-based sentiment classification. First, a pretrained BART model is used for EEG-to-text decoding, followed by a trained text sentiment classifier that converts the generated text into sentiment categories." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 129, + 558, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 558, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 558, + 215 + ], + "type": "text", + "content": "Similarly, Liu et al. [56] fuses physiological and behavioral time-series sensor data with real-world contextual information to effectively harness LLMs for wellness assessment. By fine-tuning the models with few-shot question-answer pairs that include contextual details, they improve performance on various healthcare tasks—such as cardiac signal analysis, physical activity recognition, and calorie-burn estimation, which outperform both supervised feedforward neural networks and zero-shot LLM baselines." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 229, + 381, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 229, + 381, + 239 + ], + "spans": [ + { + "bbox": [ + 317, + 229, + 381, + 239 + ], + "type": "text", + "content": "5.2 Finance" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 243, + 558, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 243, + 558, + 418 + ], + "spans": [ + { + "bbox": [ + 317, + 243, + 558, + 418 + ], + "type": "text", + "content": "Recently, multi-modal time series analysis has received increasing attention in financial applications. Yu et al. [89] and Xie et al. [81] focus on stock prediction by integrating stock price movements, company profiles, and news directly into structured LLM prompts, enabling models to perform reasoning over multiple modalities. Yu et al. [89] applies GPT-4 and Open LLaMA to forecast NASDAQ-100 stock returns through instruction-based prompting and fine-tuning, demonstrating that structured LLM-driven inference can outperform traditional econometric models. Meanwhile, Xie et al. [81] conducts a zero-shot analysis of ChatGPT's capabilities for multimodal stock movement prediction, incorporating CoT prompting to assess the impact of social media sentiment on stock trends. Chen et al.[11] and Sawhneyet al.[67] also incorporate graph structures for stock movement prediction. For instance, Chen et al.[11] uses ChatGPT to infer dynamic stock relationship graphs from news, which reflects market conditions and enhances prediction accuracy." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 419, + 558, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 419, + 558, + 495 + ], + "spans": [ + { + "bbox": [ + 317, + 419, + 558, + 495 + ], + "type": "text", + "content": "Beyond the predictive tasks, Bamford et al. [3] proposes a multimodal retrieval framework, where the model aligns both modalities in a shared latent space through contrastive learning. This framework allows users to search for financial time series through textual descriptions or sketched trends, offering greater flexibility. It also significantly improves retrieval speed and accuracy compared to traditional SQL-based search methods." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 509, + 375, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 509, + 375, + 519 + ], + "spans": [ + { + "bbox": [ + 317, + 509, + 375, + 519 + ], + "type": "text", + "content": "5.3 Others" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 523, + 558, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 523, + 558, + 709 + ], + "spans": [ + { + "bbox": [ + 317, + 523, + 558, + 709 + ], + "type": "text", + "content": "Multi-modal time series analysis also exists in other domains, such as retail, IoT, computer vision and audio. In the retail sector, Ekambaram et al. [18] utilizes product images and textual descriptions, including attributes like color, pattern, and sleeve style, while incorporating temporal and exogenous features for new product sales forecasting. More recently, Skenderi et al. [70] integrates additional modality data, including product images and text descriptions, along with Google Trends data for sales forecasting. In IoT applications, MIA [82] enhances power transformer fault diagnosis by integrating multi-modal data, including dissolved gas analysis (DGA) and infrared images, to improve accuracy and efficiency. MULAN [97] converts log sequences into time-series data using a log-tailored language model and employs contrastive learning to leverage multi-modal data, facilitating root-cause discovery for system failures. In computer vision, LA-GCN [83] utilizes textual embeddings of joint names and action labels to generate faithful structural priors, enhancing skeleton-based action modeling and" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 217, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 217, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 217, + 68 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 85, + 294, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 85, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 52, + 85, + 294, + 139 + ], + "type": "text", + "content": "improving recognition tasks. In speech applications, AV-HuBERT [69] employs a self-supervised representation learning framework to leverage correlated audio and visual information [90], while SpeechGPT [69, 90] integrates audio and text to enhance generation performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 151, + 147, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 151, + 147, + 163 + ], + "spans": [ + { + "bbox": [ + 52, + 151, + 147, + 163 + ], + "type": "text", + "content": "Spatial Time Series" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 166, + 219, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 166, + 219, + 178 + ], + "spans": [ + { + "bbox": [ + 52, + 166, + 219, + 178 + ], + "type": "text", + "content": "5.4 Transportation and Mobility" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 181, + 294, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 181, + 294, + 345 + ], + "spans": [ + { + "bbox": [ + 53, + 181, + 294, + 345 + ], + "type": "text", + "content": "Several recent studies on traffic prediction highlight the importance of multi-modal contexts to enhance forecasting accuracy. Guo et al. [24] transforms California traffic data into structured LLM prompts. The method uses LLaMA models and instruction fine-tuning to improve spatial-temporal learning. Meanwhile, Li et al. [48] employs a spatial-temporal dependency encoder to align numerical New York City traffic data with LLMs, incorporating weather, geographic context, and historical flow patterns to refine predictions. Similarly, Feng et al. [19] proposes CityGPT, enhancing LLMs' spatial cognition for urban reasoning, mobility prediction, and navigation by integrating urban mobility data, road networks, and human behavior through instruction tuning. These studies demonstrate that LLM-based multi-modal fusion not only enhances traffic forecasting but also improves model interpretability and adaptability across diverse urban scenarios." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 357, + 144, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 357, + 144, + 368 + ], + "spans": [ + { + "bbox": [ + 52, + 357, + 144, + 368 + ], + "type": "text", + "content": "5.5 Environment" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 371, + 294, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 371, + 294, + 524 + ], + "spans": [ + { + "bbox": [ + 53, + 371, + 294, + 524 + ], + "type": "text", + "content": "Integrating multi-modal information benefits environmental studies, particularly by addressing the prevalent challenge of missing values. VIMTS [96] utilizes a structured variational approximation technique to impute missing high-dimensional modalities (stream image data) by transforming them into low-dimensional features derived from simpler, related modalities (meteorological time series records), ensuring cross-modal correlations and interpretability of the imputation process. Additionally, LITE [44] addresses the imputation of missing features through a sparse Mixture of Experts framework. It integrates and encodes various environmental variables through a unified encoder. Directed by domain-specific instructions, a language model is utilized to merge these multi-modal representations, thereby improving the accuracy of environmental spatial-temporal predictions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 536, + 205, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 205, + 547 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 205, + 547 + ], + "type": "text", + "content": "6 Future Research Directions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 550, + 294, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 550, + 294, + 572 + ], + "spans": [ + { + "bbox": [ + 52, + 550, + 294, + 572 + ], + "type": "text", + "content": "In this section, we outline several underexplored research directions that open up opportunities for future advancements." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 578, + 294, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 578, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 53, + 578, + 294, + 709 + ], + "type": "text", + "content": "Reasoning with Multi-modal Time Series. Enhancing reasoning with multi-modal time series is pivotal for the development of intelligent systems. Future research should focus on creating a unified framework that can seamlessly integrate temporal reasoning with contextual understanding, enabling models to handle multiple time series tasks with interpretability. One potential path is to incorporate external knowledge bases and real-world context, such as developing a retrieval-augmented generation (RAG) [59] system, to enhance the reasoning process and allow models to make informed inferences beyond the immediate data. It is also promising to synergize time series model and language agents to provide more faithful and reliable reasoning on real-world contexts [31, 68]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 85, + 558, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 85, + 558, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 85, + 558, + 118 + ], + "type": "text", + "content": "The recent development of LLM reasoning models, such as chain of thoughts [76] and tree of thoughts [87], also offers potential solutions to improve reasoning quality." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 120, + 559, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 120, + 559, + 197 + ], + "spans": [ + { + "bbox": [ + 317, + 120, + 559, + 197 + ], + "type": "text", + "content": "Decision Making. Multi-modal time series analysis presents a promising future direction to enhance decision-making processes, which is crucial in high-stakes applications. By leveraging predictive signals and explanations from multi-modal contexts, future research can develop more adaptive, interpretable, and reliable decision-support systems, to facilitate the downstream optimization tasks such as resource allocation and risk management." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 200, + 559, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 200, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 200, + 559, + 342 + ], + "type": "text", + "content": "Domain Generalization. One key challenge in multi-modal time series analysis is domain generalization, which enables a model trained on one or more source domains to effectively generalize to unseen target domains, ensuring robustness against distribution shifts. In multi-modal time series, distribution shifts can be multifaceted, stemming not only from time series, but also from other modalities. Therefore, it is crucial to develop specialized domain generalization methods for effective multi-modal time series analysis, including strategies to identify and preserve domain-invariant components across modalities while capturing modality-specific variations for rapid adaptation. Additionally, disentangling the effects of each modality is essential to better understand their individual contributions and mitigate cross-modal interference." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 345, + 559, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 345, + 559, + 464 + ], + "spans": [ + { + "bbox": [ + 317, + 345, + 559, + 464 + ], + "type": "text", + "content": "Robustness to Missing and Noisy Modalities. Multi-modal time series analysis often frequently encounters messy real-world contexts with incomplete or noisy data. Existing methods employ an iterative context-refinement algorithm [31] that filters out less relevant information, thereby enhancing the predictive insights derived from multi-modal time series. Nonetheless, effectively dealing with missing and noisy modalities still demands further exploration. In particular, developing strategies for modality-specific imputation, noise reduction, and relevance quantification will be crucial to improving the real-world applicability of existing multi-modal time series methods." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 468, + 558, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 468, + 558, + 544 + ], + "spans": [ + { + "bbox": [ + 317, + 468, + 558, + 544 + ], + "type": "text", + "content": "Ethical Considerations and Bias Mitigation. In light of potential biases in multi-modal time series datasets, future research should integrate fairness-aware techniques, such as fairness constraints, counterfactual analysis, and adversarial debiasing. These methods should be combined with robust bias assessment frameworks to systematically detect and mitigate inequities, ensuring outcomes that are both equitable and socially responsible." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 558, + 390, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 558, + 390, + 569 + ], + "spans": [ + { + "bbox": [ + 317, + 558, + 390, + 569 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 572, + 559, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 572, + 559, + 649 + ], + "spans": [ + { + "bbox": [ + 317, + 572, + 559, + 649 + ], + "type": "text", + "content": "In this survey, we provide a comprehensive overview of existing multi-modal time series methods. We first discuss the multi-modal time series used in existing methods. Then, we propose a taxonomy based on cross-modal interactions between time series and other modalities. The existing methods are categorized and summarized accordingly. We also discuss the real-world applications and highlight future research directions in this promising area." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 662, + 409, + 674 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 662, + 409, + 674 + ], + "spans": [ + { + "bbox": [ + 317, + 662, + 409, + 674 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 677, + 559, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 677, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 317, + 677, + 559, + 709 + ], + "type": "text", + "content": "This research was supported in part by the National Science Foundation (NSF) CAREER IIS-2338878, as well as by generous research gifts from NEC Labs America Inc. and Morgan Stanley." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "text", + "content": "Jiang, et al." + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 98, + 295, + 703 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 58, + 98, + 295, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 98, + 295, + 121 + ], + "spans": [ + { + "bbox": [ + 58, + 98, + 295, + 121 + ], + "type": "text", + "content": "[1] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. 2018. LRS3-TED: a large-scale dataset for visual speech recognition. arXiv:1809.00496 [cs.CV] https://arxiv.org/abs/1809.00496" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 122, + 295, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 122, + 295, + 145 + ], + "spans": [ + { + "bbox": [ + 58, + 122, + 295, + 145 + ], + "type": "text", + "content": "[2] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence 41, 2 (2018), 423-443." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 146, + 294, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 146, + 294, + 192 + ], + "spans": [ + { + "bbox": [ + 58, + 146, + 294, + 192 + ], + "type": "text", + "content": "[3] Tom Bamford, Andrea Coletta, Elizabeth Fons, Sriram Gopalakrishnan, Svitlana Vyetrenko, Tucker Balch, and Manuela Veloso. 2023. Multi-Modal Financial Time-Series Retrieval Through Latent Space Projections. In Proceedings of the Fourth ACM International Conference on AI in Finance (Brooklyn, NY, USA) (ICAIF '23). Association for Computing Machinery, New York, NY, USA, 498-506. doi:10.1145/3604237.3626901" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 194, + 294, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 194, + 294, + 232 + ], + "spans": [ + { + "bbox": [ + 58, + 194, + 294, + 232 + ], + "type": "text", + "content": "[4] Dhananjay Bhattacharya, Nayan K Sharma, Debottam Dutta, Srikanth R Chetupalli, Prashant Mote, Sriram Ganapathy, Jyothi Bhat, Shreyas Ramoji, Pravin Ghosh, Aswin Subramanian, et al. 2023. Coswara: a respiratory sounds and symptoms dataset for remote screening of SARS-CoV-2 infection. Scientific Data 10, 1 (2023), 397." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 233, + 294, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 233, + 294, + 257 + ], + "spans": [ + { + "bbox": [ + 58, + 233, + 294, + 257 + ], + "type": "text", + "content": "[5] Nimeesha Chan, Felix Parker, William Bennett, Tianyi Wu, Mung Yao Jia, James Fackler, and Kimia Ghobadi. 2024. Medtsllm: Leveraging llms for multimodal medical time series analysis. arXiv preprint arXiv:2408.07773 (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 258, + 294, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 258, + 294, + 289 + ], + "spans": [ + { + "bbox": [ + 58, + 258, + 294, + 289 + ], + "type": "text", + "content": "[6] Sameep Chattopadhyay, Pulkit Paliwal, Sai Shankar Narasimhan, Shubhankar Agarwal, and Sandeep P. Chinchali. 2025. Context Matters: Leveraging Contextual Features for Time Series Forecasting. arXiv:2410.12672 [cs.LG] https://arxiv.org/abs/2410.12672" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 58, + 289, + 294, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 289, + 294, + 320 + ], + "spans": [ + { + "bbox": [ + 58, + 289, + 294, + 320 + ], + "type": "text", + "content": "[7] Shengyu Chen, Yiqun Xie, Xiang Li, Xu Liang, and Xiaowei Jia. 2023. Physics-guided meta-learning method in baseflow prediction over large regions. In Proceedings of the 2023 SIAM International Conference on Data Mining (SDM). SIAM, 217-225." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 58, + 321, + 294, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 321, + 294, + 352 + ], + "spans": [ + { + "bbox": [ + 58, + 321, + 294, + 352 + ], + "type": "text", + "content": "[8] Shengyu Chen, Jacob A Zwart, and Xiaowei Jia. 2022. Physics-guided graph meta learning for predicting water temperature and streamflow in stream networks. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 2752-2761." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 58, + 353, + 294, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 353, + 294, + 376 + ], + "spans": [ + { + "bbox": [ + 58, + 353, + 294, + 376 + ], + "type": "text", + "content": "[9] Si-An Chen, Chun-Liang Li, Sercan O Arik, Nathanael Christian Yoder, and Tomas Pfister. 2023. TSMixer: An All-MLP Architecture for Time Series Forecasting. Transactions on Machine Learning Research (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 377, + 294, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 377, + 294, + 433 + ], + "spans": [ + { + "bbox": [ + 56, + 377, + 294, + 433 + ], + "type": "text", + "content": "[10] Wei Chen, Xixuan Hao, Yuankai Wu, and Yuxuan Liang. 2024. Terra: A Multimodal Spatio-Temporal Dataset Spanning the Earth. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 66329-66356. https://proceedings.neurips.cc/paper_files/paper/2024/file/7a6a7fbd1ee0c9684b3f919f79d129ef-Paper-Datasets_and_Benchmarks_Track.pdf" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 434, + 294, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 434, + 294, + 456 + ], + "spans": [ + { + "bbox": [ + 56, + 434, + 294, + 456 + ], + "type": "text", + "content": "[11] Zihan Chen, Lei Nico Zheng, Cheng Lu, Jialu Yuan, and Di Zhu. 2023. ChatGPT Informed Graph Neural Network for Stock Movement Prediction. Available at SSRN 4464002 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 456, + 294, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 456, + 294, + 503 + ], + "spans": [ + { + "bbox": [ + 56, + 456, + 294, + 503 + ], + "type": "text", + "content": "[12] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, Yuong Luo, and Enhong Chen. 2025. InstrucTime: Advancing Time Series Classification with Multimodal Language Modeling. In Proceedings of the Eighteenth ACM International Conference on Web Search and Data Mining (Hannover, Germany) (WSDM '25). Association for Computing Machinery, New York, NY, USA, 792-800. doi:10.1145/3701551.3703499" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 504, + 294, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 504, + 294, + 528 + ], + "spans": [ + { + "bbox": [ + 56, + 504, + 294, + 528 + ], + "type": "text", + "content": "[13] Joon Son Chung, Arsha Nagrani, and Andrew Zisserman. 2018. VoxCeleb2: Deep Speaker Recognition. In Interspeech 2018 (Interspeech Proceedings). ISCA. doi:10.21437/Interspeech.2018-1929" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 528, + 294, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 528, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 56, + 528, + 294, + 552 + ], + "type": "text", + "content": "[14] Helena Cousijn, Patricia Feeney, Daan Lowenberg, Elisa Presani, and Natasha Simons. 2019. A data citation roadmap for scholarly data repositories. *Scientific Data* 6, 1 (2019), 28." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 552, + 294, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 552, + 294, + 583 + ], + "spans": [ + { + "bbox": [ + 56, + 552, + 294, + 583 + ], + "type": "text", + "content": "[15] Mayank Daswani, Mathias MJ Bellaiche, Marc Wilson, Desislav Ivanov, Mikhail Papkov, Eva Schnider, Jing Tang, Kay Lamerigts, Gabriela Botea, Michael A Sanchez, et al. 2024. Plots Unlock Time-Series Understanding in Multimodal Models. arXiv preprint arXiv:2410.02637 (2024)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 584, + 294, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 584, + 294, + 624 + ], + "spans": [ + { + "bbox": [ + 56, + 584, + 294, + 624 + ], + "type": "text", + "content": "[16] Iman Deznabi, Mohit Iyyer, and Madalina Fiterau. 2021. Predicting in-hospital mortality by combining clinical notes with time-series data. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, Online, 4026-4031. doi:10.18653/v1/2021-findings-acl.352" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 624, + 294, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 294, + 663 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 294, + 663 + ], + "type": "text", + "content": "[17] Zihan Dong, Xinyu Fan, and Zhiyuan Peng. 2024. FNSPID: A Comprehensive Financial News Dataset in Time Series. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (Barcelona, Spain) (KDD '24). Association for Computing Machinery, New York, NY, USA, 4918-4927. doi:10.1145/3637528.3671629" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 664, + 294, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 664, + 294, + 703 + ], + "spans": [ + { + "bbox": [ + 56, + 664, + 294, + 703 + ], + "type": "text", + "content": "[18] Vijay Ekambaram, Kushagra Manglik, Sumanta Mukherjee, Surya Shravan Kumar Sajja, Satyam Dwivedi, and Vikas Raykar. 2020. Attention based multimodal new product sales time-series forecasting. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining. 3110-3118." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 87, + 559, + 708 + ], + "type": "list", + "angle": 0, + "index": 41, + "blocks": [ + { + "bbox": [ + 320, + 87, + 559, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 87, + 559, + 110 + ], + "spans": [ + { + "bbox": [ + 320, + 87, + 559, + 110 + ], + "type": "text", + "content": "[19] Jie Feng, Yuwei Du, Tianhui Liu, Siqi Guo, Yuming Lin, and Yong Li. 2024. Citygpt: Empowering urban spatial cognition of large language models. arXiv preprint arXiv:2406.13948 (2024)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 111, + 559, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 111, + 559, + 142 + ], + "spans": [ + { + "bbox": [ + 320, + 111, + 559, + 142 + ], + "type": "text", + "content": "[20] Stefan Feuerriegel, Dennis Frauen, Valentyn Melnychuk, Jonas Schweisthal, Konstantin Hess, Alicia Curth, Stefan Bauer, Niki Kilbertus, Isaac S Kohane, and Mihaela van der Schaar. 2024. Causal machine learning for predicting treatment outcomes. Nature Medicine 30, 4 (2024), 958-968." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 143, + 559, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 143, + 559, + 167 + ], + "spans": [ + { + "bbox": [ + 320, + 143, + 559, + 167 + ], + "type": "text", + "content": "[21] Mohammad Fraiwan, Luay Fraiwan, Basheer Khassawneh, and Ali Ibnian. 2021. A dataset of lung sounds recorded from the chest wall using an electronic stethoscope. Data in Brief 35, 106913. doi:10.1016/j.dib.2021.106913" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 167, + 559, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 167, + 559, + 191 + ], + "spans": [ + { + "bbox": [ + 320, + 167, + 559, + 191 + ], + "type": "text", + "content": "[22] Alessandro T. Gifford, Kshitij Dwivedi, Gemma Roig, and Radoslaw M. Cichy. 2022. A large and rich EEG dataset for modeling human visual object recognition. NeuroImage 264 (2022), 119754. doi:10.1016/j.neuroimage.2022.119754" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 191, + 559, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 191, + 559, + 221 + ], + "spans": [ + { + "bbox": [ + 320, + 191, + 559, + 221 + ], + "type": "text", + "content": "[23] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In Proceedings of the AAAI conference on artificial intelligence, Vol. 33, 922-929." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 222, + 559, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 222, + 559, + 247 + ], + "spans": [ + { + "bbox": [ + 320, + 222, + 559, + 247 + ], + "type": "text", + "content": "[24] Xusen Guo, Qiming Zhang, Junyue Jiang, Mingxing Peng, Meixin Zhu, and Hao Frank Yang. 2024. Towards explainable traffic flow prediction with large language models. Communications in Transportation Research 4 (2024), 100150." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 320, + 247, + 559, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 247, + 559, + 277 + ], + "spans": [ + { + "bbox": [ + 320, + 247, + 559, + 277 + ], + "type": "text", + "content": "[25] Paul Hager, Martin J Menten, and Daniel Rueckert. 2023. Best of both worlds: Multimodal contrastive learning with tabular and imaging data. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 23924-23935." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 320, + 277, + 559, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 277, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 320, + 277, + 559, + 342 + ], + "type": "text", + "content": "[26] Nora Hollenstein, Marius Troendle, Ce Zhang, and Nicolas Langer. 2020. ZuCo 2.0: A Dataset of Physiological Recordings During Natural Reading and Annotation. In Proceedings of the Twelfth Language Resources and Evaluation Conference, Nicoletta Calzolari, Frédéric Béchet, Philippe Blache, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, and Stelios Piperidis (Eds.). European Language Resources Association, Marseille, France, 138-146. https://aclanthology.org/2020.lrec-1.18/" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "spans": [ + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "type": "text", + "content": "[27] Shi Bin Hoo, Samuel Müller, David Salinas, and Frank Hutter. 2025. The tabular foundation model TabPFN outperforms specialized time series forecasting models based on simple features. arXiv preprint arXiv:2501.02945 (2025)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 320, + 366, + 559, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 366, + 559, + 390 + ], + "spans": [ + { + "bbox": [ + 320, + 366, + 559, + 390 + ], + "type": "text", + "content": "[28] Kexin Huang, Jaan Altsoaar, and Rajesh Ranganath. 2020. Clinical-BERT: Modeling Clinical Notes and Predicting Hospital Readmission. arXiv:1904.05342 [cs.CL] https://arxiv.org/abs/1904.05342" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 320, + 391, + 559, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 391, + 559, + 422 + ], + "spans": [ + { + "bbox": [ + 320, + 391, + 559, + 422 + ], + "type": "text", + "content": "[29] Jiahao Ji, Jingyuan Wang, Chao Huang, Junjie Wu, Boren Xu, Zhenhe Wu, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal self-supervised learning for traffic flow prediction. In Proceedings of the AAAI conference on artificial intelligence, Vol. 37. 4356-4364." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 320, + 422, + 559, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 422, + 559, + 453 + ], + "spans": [ + { + "bbox": [ + 320, + 422, + 559, + 453 + ], + "type": "text", + "content": "[30] Furong Jia, Kevin Wang, Yixiang Zheng, Defu Cao, and Yan Liu. 2024. GPT4MTS: Prompt-based Large Language Model for Multimodal Time-series Forecasting. Proceedings of the AAI Conference on Artificial Intelligence 38, 21 (Mar. 2024), 23343-23351. doi:10.1609/aaaai.v38i21.30383" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 320, + 454, + 559, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 454, + 559, + 485 + ], + "spans": [ + { + "bbox": [ + 320, + 454, + 559, + 485 + ], + "type": "text", + "content": "[31] Yushan Jiang, Wenchao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2025. Explainable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv:2503.01013 [cs.LG] https://arxiv.org/abs/2503.01013" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 320, + 486, + 559, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 486, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 320, + 486, + 559, + 517 + ], + "type": "text", + "content": "[32] Bo Jin, Haoyu Yang, Leilei Sun, Chuanren Liu, Yue Qu, and Jianing Tong. 2018. A treatment engine by predicting next-period prescriptions. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 1608-1616." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 320, + 518, + 559, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 518, + 559, + 556 + ], + "spans": [ + { + "bbox": [ + 320, + 518, + 559, + 556 + ], + "type": "text", + "content": "[33] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y. Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, and Qingsong Wen. 2024. Time-LLM: Time Series Forecasting by Reprogramming Large Language Models. In The Twelfth International Conference on Learning Representations. https://openreview.net/forum?id=Unb5CVPtae" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 320, + 557, + 559, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 557, + 559, + 572 + ], + "spans": [ + { + "bbox": [ + 320, + 557, + 559, + 572 + ], + "type": "text", + "content": "[34] Alistair Johnson, Lucas Bulgarelli, Tom Pollard, Steven Horng, Leo Anthony Celi, and Roger Mark. 2021. MIMIC-IV (version 1.0). doi:10.13026/s6n6-xd98" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 320, + 574, + 559, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 574, + 559, + 605 + ], + "spans": [ + { + "bbox": [ + 320, + 574, + 559, + 605 + ], + "type": "text", + "content": "[35] Alistair E. W. Johnson, Tom J. Pollard, Lu Shen, Li-wei H. Lehman, Mengling Feng, Mohammad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G. Mark. 2016. MIMIC-III, a freely accessible critical care database. Scientific Data 3 (2016), 160035." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 320, + 605, + 559, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 605, + 559, + 652 + ], + "spans": [ + { + "bbox": [ + 320, + 605, + 559, + 652 + ], + "type": "text", + "content": "[36] Swaraj Khadanga, Karan Aggarwal, Shafiq Joty, and Jaideep Srivastava. 2019. Using Clinical Notes with Time Series Data for ICU Management. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IFCNLP). Association for Computational Linguistics, Hong Kong, China, 6432-6437. doi:10.18653/v1/D19-1678" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 320, + 653, + 559, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 653, + 559, + 685 + ], + "spans": [ + { + "bbox": [ + 320, + 653, + 559, + 685 + ], + "type": "text", + "content": "[37] Kai Kim, Howard Tsai, Rajat Sen, Abhimanyu Das, Zihao Zhou, Abhishek Tanpure, Mathew Luo, and Rose Yu. 2024. Multi-Modal Forecaster: Jointly Predicting Time Series and Textual Data. arXiv:2411.06735 [cs.AI] https://arxiv.org/abs/2411.06735" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 320, + 685, + 559, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 685, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 320, + 685, + 559, + 708 + ], + "type": "text", + "content": "[38] Yaxuan Kong, Yiyuan Yang, Yoontae Hwang, Wenjie Du, Stefan Zohren, Zhangyang Wang, Ming Jin, and Qingsong Wen. 2025. Time-MQA: Time Series Multi-Task Question Answering with Context Enhancement." + } + ] + } + ], + "index": 40 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 217, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 217, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 217, + 68 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 87, + 294, + 709 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 72, + 87, + 238, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 87, + 238, + 95 + ], + "spans": [ + { + "bbox": [ + 72, + 87, + 238, + 95 + ], + "type": "text", + "content": "arXiv:2503.01875 [cs.CL] https://arxiv.org/abs/2503.01875" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 95, + 294, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 95, + 294, + 126 + ], + "spans": [ + { + "bbox": [ + 56, + 95, + 294, + 126 + ], + "type": "text", + "content": "[39] Yaxuan Kong, Yiyuan Yang, Shiyu Wang, Chenghao Liu, Yuxuan Liang, Ming Jin, Stefan Zohren, Dan Pei, Yan Liu, and Qingsong Wen. 2025. Position: Empowering Time Series Reasoning with Multimodal LLMs. arXiv preprint arXiv:2502.01477 (2025)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 126, + 294, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 126, + 294, + 150 + ], + "spans": [ + { + "bbox": [ + 56, + 126, + 294, + 150 + ], + "type": "text", + "content": "[40] Dongjie Wang Chengyuan Deng Reon Matsuoka Lecheng Zheng, Zhengzhang Chen and Haifeng Chen. 2024. LEMMA-RCA: A Large Multi-modal Multi-domain Dataset for Root Cause Analysis. arXiv:2406.05375 [cs.AI]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 150, + 294, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 150, + 294, + 175 + ], + "spans": [ + { + "bbox": [ + 56, + 150, + 294, + 175 + ], + "type": "text", + "content": "[41] Geon Lee, Wenchao Yu, Wei Cheng, and Haifeng Chen. 2024. MoAT: Multi-Modal Augmented Time Series Forecasting. https://openreview.net/forum?id=uRXxnoqDHH" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 175, + 294, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 294, + 198 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 294, + 198 + ], + "type": "text", + "content": "[42] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. In AAAI." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 198, + 294, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 198, + 294, + 230 + ], + "spans": [ + { + "bbox": [ + 56, + 198, + 294, + 230 + ], + "type": "text", + "content": "[43] Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan So, and Jaewoo Kang. 2019. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics (Oxford, England) 36 (09 2019). doi:10.1093/bioinformatics/btz682" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 230, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 294, + 255 + ], + "type": "text", + "content": "[44] Haoran Li, Junqi Liu, Zexian Wang, Shiuyuan Luo, Xiaowei Jia, and Huaxiu Yao. 2024. LITE: Modeling Environmental Ecosystems with Multimodal Large Language Models. arXiv preprint arXiv:2404.01165 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 255, + 294, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 255, + 294, + 278 + ], + "spans": [ + { + "bbox": [ + 56, + 255, + 294, + 278 + ], + "type": "text", + "content": "[45] Jun Li, Che Liu, Sibo Cheng, Rossella Arcucci, and Shenda Hong. 2023. Frozen Language Model Helps ECG Zero-Shot Learning. In Medical Imaging with Deep Learning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 278, + 294, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 278, + 294, + 302 + ], + "spans": [ + { + "bbox": [ + 56, + 278, + 294, + 302 + ], + "type": "text", + "content": "[46] Zekun Li, Shiyang Li, and Xifeng Yan. 2023. Time series as images: Vision transformer for irregularly sampled time series. Advances in Neural Information Processing Systems 36 (2023), 49187-49204." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 302, + 294, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 302, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 56, + 302, + 294, + 334 + ], + "type": "text", + "content": "[47] Zihao Li, Xiao Lin, Zhining Liu, Jiaru Zou, Ziwei Wu, Lecheng Zheng, Dongqi Fu, Yada Zhu, Hendrik Hamann, Hanghang Tong, and Jingrui He. 2025. Language in the Flow of Time: Time-Series-Paired Texts Weaved into a Unified Temporal Narrative. arXiv:2502.08942 [cs.LG] https://arxiv.org/abs/2502.08942" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 334, + 294, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 334, + 294, + 365 + ], + "spans": [ + { + "bbox": [ + 56, + 334, + 294, + 365 + ], + "type": "text", + "content": "[48] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. UrbanGPT: Spatio-Temporal Large Language Models. ArXiv abs/2403.00813 (2024). https://api(semanticscholar.org/CorpusID:268230972" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 365, + 294, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 294, + 390 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 294, + 390 + ], + "type": "text", + "content": "[49] Paul Pu Liang, Amir Zadeh, and Louis-Philippe Morency. 2024. Foundations & trends in multimodal machine learning: Principles, challenges, and open questions. Comput. Surveys 56, 10 (2024), 1-42." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 390, + 294, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 390, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 56, + 390, + 294, + 422 + ], + "type": "text", + "content": "[50] Minhua Lin, Zhengzhang Chen, Yanchi Liu, Xujiang Zhao, Zongyu Wu, Junxiang Wang, Xiang Zhang, Suhang Wang, and Haifeng Chen. 2024. Decoding Time Series with LLMs: A Multi-Agent Framework for Cross-Domain Annotation. arXiv:2410.17462 [cs.AI] https://arxiv.org/abs/2410.17462" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 422, + 294, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 422, + 294, + 445 + ], + "spans": [ + { + "bbox": [ + 56, + 422, + 294, + 445 + ], + "type": "text", + "content": "[51] Chenxi Liu, Qianxiong Xu, Hao Miao, Sun Yang, Lingzheng Zhang, Cheng Long, Ziyue Li, and Rui Zhao. 2025. TimeCMA: Towards LLM-Empowered Multivariate Time Series Forecasting via Cross-Modality Alignment. In AAAI." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 445, + 294, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 445, + 294, + 485 + ], + "spans": [ + { + "bbox": [ + 56, + 445, + 294, + 485 + ], + "type": "text", + "content": "[52] Hanwen Liu, Daniel Hajialigol, Benny Antony, Aiguo Han, and Xuan Wang. 2024. EEG2Text: Open Vocabulary EEG-to-Text Translation with Multi-View Transformer. In 2024 IEEE International Conference on Big Data (BigData). IEEE Computer Society, Los Alamitos, CA, USA, 1824-1833. doi:10.1109/ BigData62323.2024.10825980" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 485, + 294, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 485, + 294, + 533 + ], + "spans": [ + { + "bbox": [ + 56, + 485, + 294, + 533 + ], + "type": "text", + "content": "[53] Haoxin Liu, Shangqing Xu, Zhiyuan Zhao, Lingkai Kong, Harshavardhan Kamarthi, Aditya B. Sasanur, Megha Sharma, Jiaming Cui, Qingsong Wen, Chao Zhang, and B. Aditya Prakash. 2024. Time-MMD: Multi-Domain Multimodal Dataset for Time Series Analysis. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. https://openreview.net/forum?id=fuD0h4R1IL" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 533, + 294, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 294, + 556 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 294, + 556 + ], + "type": "text", + "content": "[54] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 556, + 294, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 556, + 294, + 597 + ], + "spans": [ + { + "bbox": [ + 56, + 556, + 294, + 597 + ], + "type": "text", + "content": "[55] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. UniTime: A Language-Empowered Unified Model for Cross-Domain Time Series Forecasting. In Proceedings of the ACM Web Conference 2024 (Singapore, Singapore) (WWW'24). Association for Computing Machinery, New York, NY, USA, 4095-4106. doi:10.1145/3589334.3645434" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 597, + 294, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 597, + 294, + 628 + ], + "spans": [ + { + "bbox": [ + 56, + 597, + 294, + 628 + ], + "type": "text", + "content": "[56] Xin Liu, Daniel McDuff, Geza Kovacs, Isaac Galatzer-Levy, Jacob Sunshine, Jiening Zhan, Ming-Zher Poh, Shun Liao, Paolo Di Achille, and Shwetak Patel. 2023. Large Language Models are Few-Shot Health Learners. arXiv preprint arXiv:2305.15525 (2023). https://arxiv.org/abs/2305.15525" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 628, + 294, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 628, + 294, + 653 + ], + "spans": [ + { + "bbox": [ + 56, + 628, + 294, + 653 + ], + "type": "text", + "content": "[57] Jingchao Ni, Ziming Zhao, ChengAo Shen, Hanghang Tong, Dongjin Song, Wei Cheng, Dongsheng Luo, and Haifeng Chen. 2025. Harnessing Vision Models for Time Series Analysis: A Survey. arXiv preprint arXiv:2502.08869 (2025)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 56, + 653, + 294, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 653, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 653, + 294, + 677 + ], + "type": "text", + "content": "[58] Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. 2023. A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. In The Eleventh International Conference on Learning Representations." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 677, + 294, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 677, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 56, + 677, + 294, + 709 + ], + "type": "text", + "content": "[59] Kanghui Ning, Zijie Pan, Yu Liu, Yushan Jiang, James Y. Zhang, Kashif Rasul, Anderson Schneider, Lintao Ma, Yuriy Nevmvaka, and Dongjin Song. 2025. TS-RAG: Retrieval-Augmented Generation based Time Series Foundation Models are Stronger Zero-Shot Forecaster. arXiv:2503.07649 [cs.LG] https://arxiv.org/" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 87, + 558, + 693 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 336, + 87, + 380, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 87, + 380, + 95 + ], + "spans": [ + { + "bbox": [ + 336, + 87, + 380, + 95 + ], + "type": "text", + "content": "abs/2503.07649" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 95, + 558, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 95, + 558, + 126 + ], + "spans": [ + { + "bbox": [ + 320, + 95, + 558, + 126 + ], + "type": "text", + "content": "[60] K. Niu, K. Zhang, X. Peng, Y. Pan, and N. Xiao. 2023. Deep Multi-Modal Intermediate Fusion of Clinical Record and Time Series Data in Mortality Prediction. Frontiers in Molecular Biosciences 10 (2023), 1136071. doi:10.3389/fmolb.2023.1136071" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 126, + 558, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 126, + 558, + 159 + ], + "spans": [ + { + "bbox": [ + 320, + 126, + 558, + 159 + ], + "type": "text", + "content": "[61] Zijie Pan, Yushan Jiang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. " + }, + { + "bbox": [ + 320, + 126, + 558, + 159 + ], + "type": "inline_equation", + "content": "S^2" + }, + { + "bbox": [ + 320, + 126, + 558, + 159 + ], + "type": "text", + "content": "IP-LLM: Semantic Space Informed Prompt Learning with LLM for Time Series Forecasting. In Forty-first International Conference on Machine Learning." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 320, + 159, + 558, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 159, + 558, + 183 + ], + "spans": [ + { + "bbox": [ + 320, + 159, + 558, + 183 + ], + "type": "text", + "content": "[62] Vinay Prithyani, Mohsin Mohammed, Richa Gadgil, Ricardo Buitrago, Vinija Jain, and Aman Chadha. 2024. On the Feasibility of Vision-Language Models for Time-Series Classification. arXiv preprint arXiv:2412.17304 (2024)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 320, + 183, + 558, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 183, + 558, + 215 + ], + "spans": [ + { + "bbox": [ + 320, + 183, + 558, + 215 + ], + "type": "text", + "content": "[63] Yao Qin, Dongjin Song, Haifeng Cheng, Wei Cheng, Guofei Jiang, and Garrison W Cottrell. 2017. A dual-stage attention-based recurrent neural network for time series prediction. In Proceedings of the 26th International Joint Conference on Artificial Intelligence, 2627-2633." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 320, + 215, + 558, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 215, + 558, + 239 + ], + "spans": [ + { + "bbox": [ + 320, + 215, + 558, + 239 + ], + "type": "text", + "content": "[64] Hadi Rezaei, Hamidreza Faaljou, and Gholamreza Mansourfar. 2021. Stock price prediction using deep learning and frequency decomposition. Expert Systems with Applications 169 (2021), 114332." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 320, + 239, + 558, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 239, + 558, + 295 + ], + "spans": [ + { + "bbox": [ + 320, + 239, + 558, + 295 + ], + "type": "text", + "content": "[65] Bruno M Rocha, Dimitris Filos, Luis Mendes, Gorkem Serbes, Sezer Ulukaya, Yasemin P Kahya, Niksa Jakovljevic, Tatjana L Turukalo, Ioannis M Vogiatzis, Eleni Perantoni, Evangelos Kaimakamis, Pantelis Natsivas, Ana Oliveira, Cristina Jacome, Alda Marques, Nicos Maglaveras, Rui Pedro Paiva, Ioanna Chouvarda, and Paulo de Carvalho. 2019. An open access database for the evaluation of respiratory sound classification algorithms. Physiological Measurement 40, 3 (2019), 035001." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 320, + 295, + 558, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 295, + 558, + 334 + ], + "spans": [ + { + "bbox": [ + 320, + 295, + 558, + 334 + ], + "type": "text", + "content": "[66] Ludan Ruan, Yiyang Ma, Huan Yang, Huiguo He, Bei Liu, Jianlong Fu, Nicholas Jing Yuan, Qin Jin, and Baining Guo. 2023. Mm-diffusion: Learning multi-modal diffusion models for joint audio and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10219-10228." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 320, + 334, + 558, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 334, + 558, + 382 + ], + "spans": [ + { + "bbox": [ + 320, + 334, + 558, + 382 + ], + "type": "text", + "content": "[67] Ramit Sawhney, Shivam Agarwal, Arnav Wadhwa, and Rajiv Ratn Shah. 2020. Deep Attentive Learning for Stock Movement Prediction From Social Media Text and Company Correlations. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, Online, 8415-8426. doi:10.18653/v1/2020.emnlp-main.676" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 320, + 382, + 558, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 382, + 558, + 415 + ], + "spans": [ + { + "bbox": [ + 320, + 382, + 558, + 415 + ], + "type": "text", + "content": "[68] ChengAo Shen, Zhengzhang Chen, Dongsheng Luo, Dongkuan Xu, Haifeng Chen, and Jingchao Ni. 2024. Exploring Multi-Modal Integration with Tool-Augmented LLM Agents for Precise Causal Discovery. arXiv:2412.13667 [cs.LG] https://arxiv.org/abs/2412.13667" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 320, + 415, + 558, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 415, + 558, + 446 + ], + "spans": [ + { + "bbox": [ + 320, + 415, + 558, + 446 + ], + "type": "text", + "content": "[69] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. 2022. Learning Audio-Visual Speech Representation by Masked Multimodal Cluster Prediction. In International Conference on Learning Representations. https://openreview.net/forum?id=Z1Qlm11uOM" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 320, + 446, + 558, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 446, + 558, + 471 + ], + "spans": [ + { + "bbox": [ + 320, + 446, + 558, + 471 + ], + "type": "text", + "content": "[70] Geri Skenderi, Christian Joppi, Matteo Denitto, and Marco Cristani. 2024. Well googled is half done: Multimodal forecasting of new fashion product sales with image-based google trends. Journal of Forecasting 43, 6 (2024), 1982-1997." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 320, + 471, + 558, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 471, + 558, + 494 + ], + "spans": [ + { + "bbox": [ + 320, + 471, + 558, + 494 + ], + "type": "text", + "content": "[71] Patrick Wagner, Nils Strothhoff, Ralf-Dieter Bousseljot, Dieter Kreiseler, Fatima I Lunze, Wojciech Samek, and Tobias Schaeffer. 2020. PTB-XL, a large publicly available electrocardiography dataset. Scientific Data 7, 1 (2020), 154." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 320, + 494, + 558, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 494, + 558, + 526 + ], + "spans": [ + { + "bbox": [ + 320, + 494, + 558, + 526 + ], + "type": "text", + "content": "[72] Jiahao Wang, Mingyue Cheng, Qingyang Mao, Yitong Zhou, Feiyang Xu, and Xin Li. 2025. TableTime: Reformulating Time Series Classification as Training-Free Table Understanding with Large Language Models. arXiv:2411.15737 [cs.AI] https://arxiv.org/abs/2411.15737" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 320, + 525, + 558, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 525, + 558, + 582 + ], + "spans": [ + { + "bbox": [ + 320, + 525, + 558, + 582 + ], + "type": "text", + "content": "[73] Xinlei Wang, Maike Feng, Jing Qiu, JINJIN GU, and Junhua Zhao. 2024. From News to Forecast: Integrating Event Analysis in LLM-Based Time Series Forecasting with Reflection. In Advances in Neural Information Processing Systems, A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang (Eds.), Vol. 37. Curran Associates, Inc., 58118-58153. https://proceedings.neurips.cc/paper_files/paper/2024/file/6aef8bffb372096ee73d98da30119f89-Paper-Conference.pdf" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 320, + 582, + 558, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 582, + 558, + 629 + ], + "spans": [ + { + "bbox": [ + 320, + 582, + 558, + 629 + ], + "type": "text", + "content": "[74] Xiaochen Wang, Junyu Luo, Jiaqi Wang, Ziyi Yin, Suhan Cui, Yuan Zhong, Yaqing Wang, and Fenglong Ma. 2023. Hierarchical Pretraining on Multimodal Electronic Health Records. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, Houda Bouamor, Juan Pino, and Kalika Bali (Eds.). Association for Computational Linguistics, Singapore, 2839-2852. doi:10.18653/v1/2023.emnlp-main.171" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 320, + 629, + 558, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 629, + 558, + 662 + ], + "spans": [ + { + "bbox": [ + 320, + 629, + 558, + 662 + ], + "type": "text", + "content": "[75] Zhenhailong Wang and Heng Ji. 2021. Open Vocabulary Electroencephalography-To-Text Decoding and Zero-shot Sentiment Classification. In AAAI Conference on Artificial Intelligence. https://apisemantic scholar.org/CorpusID:244909027" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 320, + 662, + 558, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 662, + 558, + 693 + ], + "spans": [ + { + "bbox": [ + 320, + 662, + 558, + 693 + ], + "type": "text", + "content": "[76] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models (NIPS '22). Curran Associates Inc., Red Hook, NY, USA, Article 1800, 14 pages." + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 524, + 60, + 558, + 68 + ], + "type": "text", + "content": "Jiang, et al." + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 86, + 296, + 700 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 56, + 86, + 296, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 86, + 296, + 126 + ], + "spans": [ + { + "bbox": [ + 56, + 86, + 296, + 126 + ], + "type": "text", + "content": "[77] Andrew Robert Williams, Arjun Ashok, Étienne Marcotte, Valentina Zantedeschi, Jithendarraa Subramanian, Roland Riachi, James Requeima, Alexandre Lacoste, Irina Rish, Nicolas Chapados, et al. 2024. Context is key: A benchmark for forecasting with essential textual information. arXiv preprint arXiv:2410.18959 (2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 126, + 294, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 126, + 294, + 151 + ], + "spans": [ + { + "bbox": [ + 55, + 126, + 294, + 151 + ], + "type": "text", + "content": "[78] Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. 2023. TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis. In The Eleventh International Conference on Learning Representations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 151, + 294, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 151, + 294, + 190 + ], + "spans": [ + { + "bbox": [ + 56, + 151, + 294, + 190 + ], + "type": "text", + "content": "[79] Huizhe Wu, Wei Zhang, Weiwei Shen, and Jun Wang. 2018. Hybrid Deep Sequential Modeling for Social Text-Driven Stock Prediction. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management (Torino, Italy) (CIKM '18). Association for Computing Machinery, New York, NY, USA, 1627–1630. doi:10.1145/3269206.3269290" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 190, + 294, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 190, + 294, + 222 + ], + "spans": [ + { + "bbox": [ + 56, + 190, + 294, + 222 + ], + "type": "text", + "content": "[80] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, 753-763." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 222, + 294, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 222, + 294, + 247 + ], + "spans": [ + { + "bbox": [ + 56, + 222, + 294, + 247 + ], + "type": "text", + "content": "[81] Qianqian Xie, Weiguang Han, Yanzhao Lai, Min Peng, and Jimin Huang. 2023. The wall street neophyte: A zero-shot analysis of chatgpt over multimodal stock movement prediction challenges. arXiv preprint arXiv:2304.05351 (2023)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 247, + 294, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 247, + 294, + 278 + ], + "spans": [ + { + "bbox": [ + 56, + 247, + 294, + 278 + ], + "type": "text", + "content": "[82] Zhikai Xing and Yigang He. 2023. Multi-modal information analysis for fault diagnosis with time-series data from power transformer. International Journal of Electrical Power & Energy Systems 144 (2023), 108567. doi:10.1016/j.ijepes.2022.108567" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 278, + 294, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 278, + 294, + 303 + ], + "spans": [ + { + "bbox": [ + 56, + 278, + 294, + 303 + ], + "type": "text", + "content": "[83] Haojun Xu, Yan Gao, Zheng Hui, Jie Li, and Xinbo Gao. 2023. Language Knowledge-Assisted Representation Learning for Skeleton-Based Action Recognition. arXiv:2305.12398 [cs.CV] https://arxiv.org/abs/2305.12398" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 303, + 294, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 303, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 303, + 294, + 342 + ], + "type": "text", + "content": "[84] Yumo Xu and Shay B. Cohen. 2018. Stock Movement Prediction from Tweets and Historical Prices. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Iryna Gurevych and Yusuke Miyao (Eds.). Association for Computational Linguistics, Melbourne, Australia, 1970-1979. doi:10.18653/v1/P18-1183" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 342, + 294, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 342, + 294, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 342, + 294, + 373 + ], + "type": "text", + "content": "[85] Bo Yang and Lijun Wu. 2021. How to Leverage the Multimodal EHR Data for Better Medical Prediction?. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics, 4029-4038. doi:10.18653/v1/2021.emnlp-main.329" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 373, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 373, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 56, + 373, + 294, + 397 + ], + "type": "text", + "content": "[86] Haiyang Yang, Li Kuang, and FengQiang Xia. 2021. Multimodal Temporal-Clinical Note Network for Mortality Prediction. Journal of Biomedical Semantics 12, 1 (2021), 1-14. doi:10.1186/s13326-021-00235-3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 398, + 294, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 398, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 56, + 398, + 294, + 430 + ], + "type": "text", + "content": "[87] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. 2023. Tree of Thoughts: Deliberate Problem Solving with Large Language Models. In Thirty-seventh Conference on Neural Information Processing Systems. https://openreview.net/forum?id=5Xc1ecxO1h" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 430, + 294, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 430, + 294, + 461 + ], + "spans": [ + { + "bbox": [ + 56, + 430, + 294, + 461 + ], + "type": "text", + "content": "[88] Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. 2024. Frequency-domain MLPs are more effective learners in time series forecasting. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 461, + 294, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 461, + 294, + 501 + ], + "spans": [ + { + "bbox": [ + 56, + 461, + 294, + 501 + ], + "type": "text", + "content": "[89] Xinli Yu, Zheng Chen, and Yanbin Lu. 2023. Harnessing LLMs for Temporal Data - A Study on Explainable Financial Time Series Forecasting. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, Mingxuan Wang and Imed Zitouni (Eds.). Association for Computational Linguistics, Singapore, 739-753. doi:10.18653/v1/2023.emnlp-industry.69" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 501, + 294, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 501, + 294, + 533 + ], + "spans": [ + { + "bbox": [ + 56, + 501, + 294, + 533 + ], + "type": "text", + "content": "[90] Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. 2023. SpeechGPT: Empowering Large Language Models with Intrinsic Cross-Modal Conversational Abilities. arXiv:2305.11000 [cs.CL] https://arxiv.org/abs/2305.11000" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 533, + 294, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 294, + 556 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 294, + 556 + ], + "type": "text", + "content": "[91] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 556, + 294, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 556, + 294, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 556, + 294, + 588 + ], + "type": "text", + "content": "[92] Liheng Zhang, Charu Aggarwal, and Guo-Jun Qi. 2017. Stock price prediction via discovering multi-frequency trading patterns. In Proceedings of the 23rd ACM SIGKDD international conference on knowledge discovery and data mining. 2141-2149." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 589, + 294, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 589, + 294, + 620 + ], + "spans": [ + { + "bbox": [ + 56, + 589, + 294, + 620 + ], + "type": "text", + "content": "[93] Xiyue Zhang, Chao Huang, Yong Xu, Lianghao Xia, Peng Dai, Liefeng Bo, Junbo Zhang, and Yu Zheng. 2021. Traffic flow forecasting with spatial-temporal graph diffusion network. In Proceedings of the AAAI conference on artificial intelligence, Vol. 35. 15008-15015." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 620, + 294, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 620, + 294, + 652 + ], + "spans": [ + { + "bbox": [ + 56, + 620, + 294, + 652 + ], + "type": "text", + "content": "[94] Xiang Zhang, Lina Yao, Manqing Dong, Zhe Liu, Yu Zhang, and Yong Li. 2020. Adversarial representation learning for robust patient-independent epileptic seizure detection. IEEE journal of biomedical and health informatics 24, 10 (2020), 2852-2859." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 652, + 294, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 652, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 652, + 294, + 677 + ], + "type": "text", + "content": "[95] Yuwei Zhang, Tong Xia, Aaqib Saeed, and Cecilia Mascolo. 2024. RespLLM: Unifying Audio and Text with Multimodal LLMs for Generalized Respiratory Health Prediction. arXiv:2410.05361 [cs.LG] https://arxiv.org/abs/2410.05361" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 677, + 294, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 677, + 294, + 700 + ], + "spans": [ + { + "bbox": [ + 56, + 677, + 294, + 700 + ], + "type": "text", + "content": "[96] Xiaohu Zhao, Kebin Jia, Benjamin Letcher, Jennifer Fair, Yiqun Xie, and Xiaowei Jia. 2022. VIMTS: Variational-based Imputation for Multi-modal Time Series. In 2022 IEEE International Conference on Big Data (Big Data). IEEE, 349-358." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 318, + 86, + 559, + 183 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 320, + 86, + 559, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 86, + 559, + 110 + ], + "spans": [ + { + "bbox": [ + 320, + 86, + 559, + 110 + ], + "type": "text", + "content": "[97] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. MU-LAN: multi-modal causal structure learning and root cause analysis for microservice systems. In Proceedings of the ACM Web Conference 2024. 4107-4116." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 111, + 559, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 111, + 559, + 142 + ], + "spans": [ + { + "bbox": [ + 320, + 111, + 559, + 142 + ], + "type": "text", + "content": "[98] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv:2502.04395 [cs.CV] https://arxiv.org/abs/2502.04395" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 142, + 559, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 142, + 559, + 159 + ], + "spans": [ + { + "bbox": [ + 320, + 142, + 559, + 159 + ], + "type": "text", + "content": "[99] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 318, + 159, + 559, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 159, + 559, + 183 + ], + "spans": [ + { + "bbox": [ + 318, + 159, + 559, + 183 + ], + "type": "text", + "content": "[100] Jiaxin Zhuang, Leon Yan, Zhenwei Zhang, Ruiqi Wang, Jiawei Zhang, and Yuantao Gu. 2024. See it, Think it, Sorted: Large Multimodal Models are Few-shot Time Series Anomaly Analyzers. arXiv preprint arXiv:2411.02465 (2024)." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 59, + 217, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 59, + 217, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 59, + 217, + 68 + ], + "type": "text", + "content": "Multi-modal Time Series Analysis: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_content_list.json b/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..98be1265f097af7d7d3d3fcb16e88d1cf2c7403f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_content_list.json @@ -0,0 +1,4160 @@ +[ + { + "type": "text", + "text": "Optimizing ML Training with Metagradient Descent", + "text_level": 1, + "bbox": [ + 169, + 135, + 826, + 164 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Logan Engstrom\\*1, Andrew Ilyas\\*2†, Benjamin Chen\\*1, Axel Feldmann\\*1, William Moses\\*3, Aleksander Madry\\*1", + "bbox": [ + 256, + 185, + 738, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Equal contribution ${}^{1}$ MIT, ${}^{2}$ Stanford, ${}^{3}$ UIUC", + "bbox": [ + 320, + 231, + 676, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 467, + 279, + 529, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A major challenge in training large-scale machine learning models is configuring the training process to maximize model performance, i.e., finding the best training setup from a vast design space. In this work, we unlock a gradient-based approach to this problem. We first introduce an algorithm for efficiently calculating metagradient gradients through model training at scale. We then introduce a \"smooth model training\" framework that enables effective optimization using metagradient. With metagradient descent (MGD), we greatly improve on existing dataset selection methods, outperform accuracy-degrading data poisoning attacks by an order of magnitude, and automatically find competitive learning rate schedules.", + "bbox": [ + 151, + 297, + 844, + 398 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 420, + 290, + 439 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How should I clean my data? What architecture should I use? Training large-scale (i.e., deep) machine learning models entails making many design decisions. When making such decisions, typical practice is to exhaustively search over a small set of standard options. For example, we might try a few well-known data cleaning heuristics, construct a grid over a hyperparameters, and choose the options that yield the best models. However, given that this process explores only a small part of the overall design space (e.g., one can construct $2^{n}$ possible training datasets from a pool of $n$ candidate datapoints), it is unlikely that this approach really yields the optimal training configuration.", + "bbox": [ + 109, + 452, + 883, + 558 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How can we find optimal (or at least, better) training configurations? To do so, we take the optimization perspective on designing model training. From this well-studied perspective, deciding on a training configuration—or as we will call it, a set of metaparameters—is just a high-dimensional optimization problem. The input space of this problem comprises all possible metaparameter choices, including which datapoints to train on, what model architecture to use, and how to initialize model weights. The objective function takes in a set of metaparameters, trains a machine learning model according to those metaparameters, and then returns a target metric evaluated on that model (e.g., test accuracy). From this perspective, any procedure for selecting metaparameters—including the typical practice of grid-searching over standard options—is just an optimization algorithm, whose goal is to maximize the objective function with respect to the (high-dimensional) input.", + "bbox": [ + 109, + 559, + 883, + 710 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Given that selecting metaparameters is \"just\" a high-dimensional optimization problem, a natural tool to consider is the gradient. After all, in many contexts, gradients offer a more effective approach to maximizing high-dimensional functions than grid search. Indeed, for a sufficiently \"well-behaved\" function $f(x)$ with gradient $\\nabla f(x)$ , we can optimize $f$ by iteratively updating $x$ in the direction of $\\nabla f(x)$ . This insight suggests a generic recipe for selecting metaparameters: first, make the objective differentiable with respect to the metaparameters; second, update via gradient steps.", + "bbox": [ + 109, + 709, + 880, + 800 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Now, the idea of using gradients to search for metaparameters is not new. Indeed, there is a substantial line of work that aims to optimize metaparameters (e.g., architectures, regularizers, or data augmentation schemes) with gradient-based methods [MDA15; LSY18; LVD20]. However, such methods have not managed to scale beyond relatively small settings. This state of affairs prompts our main question:", + "bbox": [ + 109, + 800, + 880, + 861 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Can we scalably configure model training using gradient-based methods?", + "bbox": [ + 251, + 866, + 740, + 883 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13751v1 [stat.ML] 17 Mar 2025", + "bbox": [ + 22, + 255, + 57, + 717 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{\\dagger}$ Work done at MIT EECS. Correspondence to {engstrom,ailyas,benchen}@mit.edu.", + "bbox": [ + 130, + 897, + 632, + 911 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4f160909052d60c475611d729d8627a48f2ec03ab554e430d02e6ecf55122fc3.jpg", + "image_caption": [ + "Figure 1: Our proto-algorithm, metagradient descent (MGD), uses gradients to achieve state-of-the-art performance across a variety of applications, including data selection and data poisoning." + ], + "image_footnote": [], + "bbox": [ + 133, + 92, + 367, + 281 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ae72b147ec83b8978675affb3f8e2678e0176541836b311165c6969778223f46.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 93, + 619, + 281 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/25d0520397f487be3b99371e5ad5a60cf7c24eb09b3e0e6bcccb837ff7007b8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 90, + 867, + 280 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1.1 Contributions", + "text_level": 1, + "bbox": [ + 112, + 352, + 287, + 368 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we answer this question in the affirmative, adding \"gradient descent on metaparameters\" to the large-scale machine learning toolkit. Along the way, we will face—and address—two main challenges.", + "bbox": [ + 111, + 377, + 883, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "First, existing methods for computing metagradients do not scale. In response, we devise an algorithm, REPLAY, that can take metagradients in large-scale settings. By combining reverse-mode autodifferentiation (AD) with an efficient data structure, REPLAY can calculate exact metagradients for models with billions of parameters and thousands of training steps.", + "bbox": [ + 111, + 407, + 883, + 468 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Second, we find that metagradients of standard training routines are not necessarily helpful for optimization, which we connect to non-smoothness of the metaparameter optimization landscape. Borrowing tools from convex optimization, we devise a framework for designing \"metasmooth\" training routines that do admit helpful metagradients.", + "bbox": [ + 111, + 468, + 883, + 529 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Addressing the challenges above unlocks a simple recipe for solving a broad range of machine learning tasks: (a) frame the task as a continuous optimization problem over metaparameters; (b) design a metasmooth training routine; (c) perform metagradient descent (MGD). Applying this recipe:", + "bbox": [ + 111, + 529, + 882, + 575 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- In the DataComp-small11 competition [GIF+24], we achieve state-of-the-art pre-training data selection for CLIP (2x larger performance improvement than the previous DataComp-small1 leader [Eco24]);", + "- In the context of data selection for instruction tuning (as introduced by Xia et al. [XMG+24]), we substantially improve on data selection for Gemma-2B (outperforming existing selection methods as well as full-data training);", + "- In the accuracy-degrading data poisoning setting (defined by Huber [Hub64] and pioneered by Lu et al. [LKY22] for deep neural networks), we improve attacks on DNNs by an order of magnitude, dropping CIFAR-10 accuracy from $92\\% \\rightarrow 78\\%$ (the best previous attack [LKY23] only reduces accuracy to $91\\%$ );", + "- For the task of hyperparameter optimization, we efficiently find a competitive CIFAR-10 learning rate schedule (matching the performance of a schedule found by grid search)." + ], + "bbox": [ + 135, + 584, + 880, + 766 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Scalably computing metagradients", + "text_level": 1, + "bbox": [ + 111, + 800, + 524, + 821 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section we present REPLAY, an algorithm for computing metagradients of large-scale iterative ML algorithms. We first detail the setting, then discuss existing approaches to computing metagradients, and conclude by describing REPLAY.", + "bbox": [ + 111, + 832, + 883, + 878 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1dc990e467236d18f25ade5d515382e45fc4338efb7565274a7fd059602d5385.jpg", + "image_caption": [ + "Training setup", + "Trained model", + "Observed behavior", + "Figure 2: An illustration of the metagradient. We embed a given aspect of the training setup (e.g., the training dataset, or optimizer hyperparameters) into a continuous metaparameter vector $z \\in \\mathbb{R}^d$ . This metaparameter defines a model $\\mathcal{A}(z)$ by way of the learning algorithm $\\mathcal{A}$ , which in turn defines an output $\\phi(z)$ . The metagradient $\\nabla_z \\phi(\\mathcal{A}(z)) \\in \\mathbb{R}^d$ is the gradient of this model output with respect to the metaparameter." + ], + "image_footnote": [], + "bbox": [ + 344, + 90, + 653, + 174 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 What is a metagradient?", + "text_level": 1, + "bbox": [ + 111, + 292, + 375, + 311 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training a machine learning model is a two-step process. First, we decide on a training setup—we must pick, for example, a neural network architecture, a training dataset, and an optimizer for training. Second, we apply the algorithm defined by this training setup to train a model.", + "bbox": [ + 109, + 319, + 883, + 364 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our overall goal in this paper is to optimize model behavior as a function of the training setup (or, as we call it, the metaparameters) using gradient-based methods. To this end, we define the following notation:", + "bbox": [ + 111, + 364, + 883, + 396 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Let $\\mathbf{z} \\in \\mathbb{R}^n$ be a vector of continuous metaparameters representing the aspects of the training setup we aim to optimize. For example, if we only want to adjust the learning rate and weight decay of SGD then $n = 2$ . We handle discrete metaparameters (e.g., choice of training data) by finding a continuous relaxation (e.g., importance weights).", + "- Let $\\mathcal{A}$ be an algorithm mapping $\\mathbf{z}$ to a trained machine learning model; we assume all other aspects of the training setup outside $\\mathbf{z}$ are fixed and thus part of the algorithm $\\mathcal{A}$ .", + "- Finally, let $\\phi$ be an output function mapping a model $\\theta$ to a vector $\\phi(\\theta) \\in \\mathbb{R}$ . For example, $\\phi(\\theta)$ might represent the validation loss of the model $\\theta$ . We require that $\\phi$ be differentiable with respect to $\\theta$ , but otherwise make no assumptions on $\\phi$ ." + ], + "bbox": [ + 135, + 404, + 880, + 559 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "With this notation in place, we define the training function $f \\coloneqq \\phi \\circ \\mathcal{A}$ mapping the training setup $\\mathbf{z}$ directly to the output function $\\phi$ evaluated on the corresponding model.", + "bbox": [ + 111, + 570, + 883, + 599 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, the metagradient is the gradient of the training function with respect to the metaparameters, $\\nabla_{\\mathbf{z}}f(\\mathbf{z})$ . Intuitively, the metagradient defines the \"direction of steepest ascent\" in metaparameter space.", + "bbox": [ + 112, + 601, + 880, + 632 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our focus: iterative algorithms. To efficiently compute the metagradient, we restrict our focus to cases where the algorithm $\\mathcal{A}$ is iterative, i.e., when it can be written in the form", + "bbox": [ + 109, + 650, + 883, + 680 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\underbrace {\\mathcal {A} (z) := \\mathbf {s} _ {T}} _ {\\text {m o d e l s t a t e a f t e r T s t e p s}}, \\quad \\text {w h e r e} \\quad \\underbrace {\\mathbf {s} _ {t + 1} : = h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})} _ {\\text {o p t i m i z e r s t e p t}}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 691, + 883, + 729 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, $\\mathbf{s}_t$ is the optimizer state at step $t$ (with $\\mathbf{s}_0$ being the initial state) and $h_t$ is the update mapping from state $t$ to state $t + 1$ . The form of (1) captures most large-scale training algorithms. For example, if the setup $\\mathbf{z} \\in \\mathbb{R}^T$ is a per-step learning rate, and the algorithm $\\mathcal{A}$ is full batch gradient descent, then each update $h_t$ is", + "bbox": [ + 109, + 739, + 883, + 787 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nh _ {t} (\\mathbf {s} _ {t}, \\mathbf {z}) := \\mathbf {s} _ {t} - z _ {t} \\nabla \\ell (\\mathbf {s} _ {t}),\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 797, + 589, + 814 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $z_{t}$ is the learning rate at step $t$ , $\\ell$ is the training loss, and the state $\\mathbf{s}_t$ comprises the parameters at step $t$ . For more complex algorithms like Adam [KB15], the state $\\mathbf{s}_t$ includes terms like gradient moments.", + "bbox": [ + 109, + 825, + 883, + 858 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Warmup: Metagradients via autodifferentiation", + "text_level": 1, + "bbox": [ + 112, + 89, + 584, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A key primitive we leverage to calculate metagradients is automatic differentiation (AD)—a standard tool for taking gradients through computer-defined functions. AD takes gradients by decomposing functions into elementary operations with known derivatives, then combining these derivatives using the chain rule. Concretely, AD operates in two passes: a \"forward pass,\" which executes the function of interest and stores intermediate products for each elementary operation; and a \"backward pass,\" which calculates the gradient by propagating chains of partial derivatives using these stored products. For the purposes of this paper, we will view AD as a black box that calculates the gradient of a many-to-one function (i.e., any $f: \\mathbb{R}^d \\to \\mathbb{R}$ ) at a given point using only a small constant factor more time than calculating the function itself (along with the space cost of storing the necessary forward-pass products).", + "bbox": [ + 109, + 114, + 883, + 251 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "What does this have to do with metagradients? Well, seeing as how training itself is a computer-defined function, AD is a natural tool for calculating the metagradient. The main challenge, as we discuss in the sequel, is that AD-based approaches to calculating the metagradient tend to be too resource-intensive for the large-scale machine learning algorithms we consider. In the remainder of this section we build up background before finally describing REPLAY, our algorithm for scalably computing (exact) metagradients.", + "bbox": [ + 109, + 251, + 883, + 327 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Approach #1: Direct AD. The direct approach to calculating metagradients exploits the fact that nearly any learning algorithm is itself a sequence of differentiable computer-defined operations—meaning the training function $f$ is also differentiable.", + "bbox": [ + 109, + 345, + 883, + 390 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, operationalizing this observation to compute metagradients turns out to be challenging. The reason is that AD stores intermediate products for each operation. The amount of data stored thus scales with the number of operations in the function of interest. In the case of our training function $f$ , this number encompasses all the operations used to train a machine learning model. As a result, even in a toy scenario like MNIST training, computing metagradients with naive AD would require storing terabytes of data.", + "bbox": [ + 109, + 391, + 883, + 467 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Approach #2: Exploiting structure with step-wise AD. A more efficient method for calculating the metagradient, step-wise AD, leverages the structure of iterative learning algorithms [Wer90; MDA15; FDF+17]. Recall from (1) that such algorithms take the form", + "bbox": [ + 109, + 484, + 883, + 531 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {A} (\\mathbf {z}) := \\mathbf {s} _ {T}, \\quad \\text {w h e r e} \\quad \\mathbf {s} _ {t + 1} := h _ {t} (\\mathbf {s} _ {t}, \\mathbf {z}).\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 541, + 638, + 558 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Algebraic manipulation (in particular, using the chain rule, the law of the total derivative, and the identity $\\mathbf{s}_t = h_{t-1}(\\mathbf{s}_{t-1}, \\mathbf{z})$ ) allows us to write the metagradient over an iterative algorithm as", + "bbox": [ + 109, + 566, + 883, + 599 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial f (\\mathbf {z})}{\\partial \\mathbf {z}} = \\frac {\\partial \\phi (\\mathcal {A} (\\mathbf {z}))}{\\partial \\mathbf {z}} = \\sum_ {t = 1} ^ {T} \\underbrace {\\overbrace {\\partial \\phi (\\mathbf {s} _ {T})} ^ {A _ {t}} . \\overbrace {\\partial \\mathbf {s} _ {t}} ^ {\\partial \\phi (\\mathbf {s} _ {T})}} _ {B _ {t}}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 609, + 883, + 678 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where we have introduced the notation $A_{t}$ and $B_{t}$ for notational convenience. Step-wise AD computes the metagradient by calculating each term in the sum of (2) one at a time. For each term, the main challenge lies in computing $A_{t}$ , since given $A_{t}$ we can straightforwardly compute $B_{t}$ (the entire term) by differentiating through a single model update, i.e.,", + "bbox": [ + 109, + 686, + 883, + 750 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nB _ {t} := A _ {t} \\cdot \\frac {\\partial h _ {t - 1} (\\mathbf {s} _ {t - 1} , \\mathbf {z})}{\\partial \\mathbf {z}} = \\frac {\\partial (A _ {t} \\cdot h _ {t - 1} (\\mathbf {s} _ {t - 1} , \\mathbf {z}))}{\\partial \\mathbf {z}},\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 758, + 671, + 790 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "which is just a single call to our assumed \"AD oracle\" on the function $\\mathbf{z} \\mapsto A_t \\cdot h_{t-1}(\\mathbf{s}_{t-1}, \\mathbf{z})$ . Computing the $A_t$ terms is less straightforward as we need to relate $s_t$ and $s_T$ ; to do so, we exploit the recurrence", + "bbox": [ + 109, + 797, + 883, + 830 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nA _ {t} := \\frac {\\partial \\phi (\\mathbf {s} _ {T})}{\\partial \\mathbf {s} _ {t}} = \\frac {\\partial \\phi (\\mathbf {s} _ {T})}{\\partial \\mathbf {s} _ {t + 1}} \\cdot \\frac {\\partial h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})}{\\partial \\mathbf {s} _ {t}} = \\frac {\\partial \\left(A _ {t + 1} \\cdot h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})\\right)}{\\partial \\mathbf {s} _ {t}}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 838, + 883, + 872 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "making $A_{t}$ straightforward to compute (again, a single \"AD oracle\" call) given $A_{t+1}$ . Step-wise AD exploits this fact to successively calculate the gradient with respect to each state, from state $T$ down to state 0.", + "bbox": [ + 109, + 881, + 883, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e61aefc6d8918a5c6e6ab0e0697fcbfc36486ad10f9f7a6216b3e5fefb7518f9.jpg", + "image_caption": [ + "Figure 3: The lazy $k$ -ary tree structure for traversing optimizer states in reverse order, with $k = 2$ . Recall that $n$ is the number of states (parameterized such that $n = T + 1$ ). Each node represents the correspondingly numbered state. We give an example of the traversal using the blue arrows in the figure, which denote the traversal path up to state $s_{\\frac{3n}{4} + 1}$ . The gray cylinders indicate the states that are stored when the traversal is at state $s_{\\frac{3n}{4} + 1}$ ; the other states are not stored at this point in the traversal. Traversing this structure requires storing $\\mathcal{O}(\\log(n))$ state and computing $\\mathcal{O}(n \\log(n))$ optimizer steps—compared to $n$ for simply training." + ], + "image_footnote": [], + "bbox": [ + 147, + 109, + 851, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Bringing these ingredients together, the algorithm executes as follows. As a preprocessing step, it trains the model and stores all intermediate states $\\mathbf{s}_0,\\dots ,\\mathbf{s}_T$ . Then, the algorithm calculates and sums the terms in (2). It first computes $A_{T}\\coloneqq \\partial \\phi (\\mathbf{s}_{T}) / \\mathbf{s}_{T}$ , the gradient of the output function $\\phi$ with respect to the final state. Then, the algorithm steps through $\\mathbf{s}_{T - 1},\\ldots ,\\mathbf{s}_0$ in reverse order, calculating (a) the gradient with respect to each state $A_{t}$ (via (3)) and (b) the gradient with respect to $\\mathbf{z}$ at that step $B_{t}$ (via (2), using the previously calculated gradient with respect to that state). AD calculates both quantities--each requires differentiating over only one train step. Finally, the algorithm returns the final metagradient as the sum of the terms.", + "bbox": [ + 109, + 505, + 883, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Despite improving storage overhead compared to \"direct AD\", step-wise AD is still too space-intensive at scale. After all, this algorithm saves every optimizer state.", + "bbox": [ + 109, + 611, + 883, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3 REPLAY", + "text_level": 1, + "bbox": [ + 112, + 662, + 232, + 678 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "REPLAY is our algorithm for efficiently and exactly computing metagradients. It uses $\\mathcal{O}(k\\log_k(T))$ space and requires running the learning algorithm $\\mathcal{A}$ a total of $1 + \\log_{k}(T)$ times, with $k$ a user-chosen constant. The main idea is to make the space-intensive subroutine of step-wise AD—a reverse-order traversal of the optimizer states at each step—much more efficient. After all, step-wise AD stores all the states to reverse traverse them. REPLAY modifies step-wise AD to traverse states in less space by exploiting a simple observation: when training is deterministic, one can reinstantiate an optimizer state $\\mathbf{s}_t$ by \"replaying\" training from a fixed point $t' < t$ at the compute cost of $t - t'$ training steps. For example, one simple scheme saves every other state, then \"replays\" the remaining states when (reverse) traversing; this routine stores $T/2$ states but computes an extra $T/2$ model updates compared to storing all the states.", + "bbox": [ + 109, + 686, + 883, + 823 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "REPLAY performs a reverse-order traversal the optimizer states while balancing the compute cost of \"replaying\" training with the storage cost of saving states. We use a combination of deterministic training (fixing data ordering, data augmentation, and any other randomness in the training process) and an efficient data structure (similar to a segment tree; see Figure 3) to reverse-order traverse the optimizer states with $\\mathcal{O}(k\\log_k(T))$ space and an additional $T\\log_k(T)$ model steps.", + "bbox": [ + 109, + 823, + 883, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, REPLAY recursively saves and replays training states. The algorithm splits the training trajectory into $k$ segments, performs the full training routine while saving only the start of each segment, then recurses into each segment (in reverse) to retrieve the states in reverse-order. The recursion depth bottoms out at $\\log_k(T)$ , at which point the algorithm has $k$ consecutive optimizer states in memory; the algorithm then backpropagates along this segment, before deleting all these states from memory and then reinstantating the next $k$ -length segment of optimizer states. We provide additional details on the algorithm in Appendix A.2. REPLAY unlocks computing large-scale metagradients by requiring only logarithmic storage and additional compute time.", + "bbox": [ + 109, + 90, + 887, + 213 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remark 1 (Connection to rematerialization). In a broad sense, both REPLAY and step-wise AD above can be viewed as special cases of a classical approach in AD (and computing broadly) known as rematerialization [CAC+81; BCT92; ZP00; GW08; CXZ+16]. To our knowledge, however, REPLAY is the first application of this particular rematerialization technique to the problem of computing metagradients through model training.", + "bbox": [ + 109, + 220, + 883, + 284 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remark 2 (Reversible learning). An alternative approach to calculating metagradients that does not save any state is reversible learning [MDA15], for which one can \"invert\" previous training states from future ones. We focus here on general (non-reversible) learning algorithms for two reasons: first, even simple algorithms such as SGD without momentum are non-reversible; second, reversibility in practice introduces numerical precision issues.", + "bbox": [ + 109, + 292, + 883, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Designing metasmooth training routines", + "text_level": 1, + "bbox": [ + 111, + 377, + 589, + 398 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Given a training function $f$ , REPLAY enables us to compute metagradients $\\nabla f(\\mathbf{z})$ for any setup $\\mathbf{z}$ . Can we immediately use these metagradients to optimize model training setups? The answer is (generally) no: we find that applying REPLAY to a function $f$ representing a standard model training and evaluation routine yields metagradients that are often $\\pm \\infty$ -valued and generally unhelpful for optimization. Indeed, previous work has observed similar issues optimizing over even (very) small-scale training [BSF94; Pea96; MDA15].", + "bbox": [ + 109, + 409, + 883, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we show that an underlying source of the issue is the landscape of the metaparameter optimization problem. We then present a framework for modifying standard learning algorithms to admit useful metagradient, i.e., to be metasmooth. To use a familiar analogy: just as residual connections and improved initialization schemes can improve optimization in standard deep learning algorithms, our framework introduces an analogous set of modifications to enable optimization with metagradient.", + "bbox": [ + 109, + 484, + 883, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 The metaparameter optimization landscape", + "text_level": 1, + "bbox": [ + 111, + 580, + 549, + 599 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first review the notion of smoothness from optimization theory, and then adapt it to the setting of metagradients. The resulting metasmoothness metric allows us to quantify (and later, improve) the amenability of the metaparameter optimization problem to gradient-based methods.", + "bbox": [ + 109, + 606, + 885, + 654 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Smoothness. In optimization theory, the basic property of a function that controls how effectively it can be optimized with first-order methods is smoothness. Specifically, a function $f(\\mathbf{z})$ is $\\beta$ -smooth at a point $\\mathbf{z}$ if its gradient $\\nabla f$ satisfies the property that", + "bbox": [ + 109, + 670, + 883, + 717 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla f (\\mathbf {z}) - \\nabla f \\left(\\mathbf {z} ^ {\\prime}\\right) \\right\\| \\leq \\beta \\cdot \\left\\| \\mathbf {z} - \\mathbf {z} ^ {\\prime} \\right\\| \\quad \\text {f o r a l l} \\mathbf {z} ^ {\\prime}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 726, + 883, + 744 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "or in other words, if its gradient does not change too quickly around $\\mathbf{z}$ . To motivate this definition: if a function $f$ is $\\beta$ -smooth at $\\mathbf{z}$ , then a step of gradient descent with step size $1 / \\beta$ will successfully decrease the value of the function:", + "bbox": [ + 109, + 756, + 882, + 801 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nf \\left(\\mathbf {z} - \\frac {1}{\\beta} \\nabla f (\\mathbf {z})\\right) \\leq f (\\mathbf {z}) - \\frac {1}{2 \\beta} \\| \\nabla f (\\mathbf {z}) \\| ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 809, + 648, + 844 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This guarantee makes $\\beta$ -smoothness a good measure of gradient utility.", + "bbox": [ + 112, + 853, + 632, + 871 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metasmoothness. There are two main challenges in adapting the smoothness property to the metagradient setting. First, evaluating (4) requires a search over all possible $\\mathbf{z}'$ , which is infeasible. Second, even if we could exactly evaluate the left-hand side of (4), it would be difficult to disentangle non-smoothness of the training function $f$ from potential error in metagradient computation (e.g., a numerically unstable operation in REPLAY).", + "bbox": [ + 109, + 90, + 880, + 165 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To sidestep these issues, we propose a metric called metasmoothness, given in Definition 1. Metasmoothness is cheap to compute—requiring only three evaluations of the training function—and does not rely on metagradient computation. For the remainder of this section, we fix a small constant $h > 0$ , and define the corresponding finite-differences estimator of the directional derivative $\\Delta_f$ as", + "bbox": [ + 109, + 167, + 880, + 228 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta_ {f} (\\mathbf {z}; \\mathbf {v}) := \\frac {f (\\mathbf {z} + h \\mathbf {v}) - f (\\mathbf {z})}{h}.\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 238, + 607, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Definition 1 (Metasmoothness of $f$ at $\\mathbf{z}$ towards $\\mathbf{v}$ ). Consider a training function $f$ mapping metaparameters $\\mathbf{z} \\in \\mathbb{R}^n$ to model output $f(\\mathbf{z}) \\in \\mathbb{R}$ . Given a metaparameter $\\mathbf{z}$ and a vector $\\mathbf{v} \\in \\mathbb{R}^n$ , the metasmoothness of $f$ at $\\mathbf{z}$ towards $\\mathbf{v}$ is given by", + "bbox": [ + 109, + 277, + 880, + 323 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nS _ {h, \\mathbf {v}} (f; \\mathbf {z}) := \\left| \\frac {\\Delta_ {f} (\\mathbf {z} + h \\mathbf {v}) - \\Delta_ {f} (\\mathbf {z})}{h} \\right|. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 332, + 880, + 367 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Definition 1 measures the rate of change of the derivative of $f(\\mathbf{z})$ in the direction of a given vector $\\mathbf{v}$ , and is therefore related to $\\beta$ -smoothness in that:", + "bbox": [ + 109, + 376, + 880, + 407 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) If $f$ is $\\beta$ -smooth at $\\mathbf{z}$ , then $S_{h,\\mathbf{v}}(f;\\mathbf{z}) \\leq \\beta$ for any $(h,\\mathbf{v})$ (so Definition 1 is necessary for smoothness).", + "(b) If $\\lim_{h\\to 0}S_{h,\\mathbf{v}}(f;\\mathbf{z})\\leq \\beta$ for all $\\mathbf{z}\\in \\mathbb{R}^n$ and $\\mathbf{v}\\in \\mathbb{S}^{n - 1}$ , then $f$ is $\\beta$ -smooth everywhere (so a global version of Definition 1 is sufficient for smoothness)." + ], + "bbox": [ + 124, + 414, + 879, + 470 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Empirical metasmoothness. Definition 1 lets us measure the meta-smoothness of a training function $f$ at a particular metaparameter $\\mathbf{z}$ (towards a direction $\\mathbf{v}$ ). This definition, however, has two shortcomings. First, recall that the training function $f$ is a composition of a learning algorithm $\\mathcal{A}$ and an output function $\\phi$ , so the smoothness of $f$ depends on that of both $\\mathcal{A}$ and $\\phi$ (in particular, $\\frac{\\partial f}{\\partial \\mathbf{z}} = \\frac{\\partial \\phi}{\\partial \\mathcal{A}} \\cdot \\frac{\\partial \\mathcal{A}}{\\partial \\mathbf{z}}$ ). Since the output function $\\phi$ might be unknown ahead of time, we are most interested in measuring the overall metasmoothness of a learning algorithm $\\mathcal{A}$ . Second, while the result of (5) does have a concrete basis in optimization theory, it may not be easy to interpret in practice (e.g., what does $S = 200$ mean?). We address both issues simultaneously by (a) proposing an interpretable \"binarized\" version of Definition 1, and (b) studying metasmoothness in the space of model parameters $\\theta$ , instead of the output space.", + "bbox": [ + 109, + 489, + 880, + 627 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Definition 2 (Empirical metasmoothness of $\\mathcal{A}$ ). Let $\\mathcal{A}$ be a learning algorithm which maps metaparameters $\\mathbf{z} \\in \\mathbb{R}^n$ to model parameters $\\theta \\in \\mathbb{R}^d$ , let $\\mathbf{z}$ be a metaparameter vector, and let $\\mathbf{v}$ be a given direction. Let $\\mathbf{d} \\in \\mathbb{R}^d$ be the per-coordinate variation in $\\theta$ , i.e.,", + "bbox": [ + 109, + 633, + 880, + 679 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {d} = \\left| \\mathcal {A} (\\mathbf {z} + 2 h \\mathbf {v}) - \\mathcal {A} (\\mathbf {z}) \\right|\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 689, + 588, + 705 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The empirical $(h,\\mathbf{v})$ -metasmoothness of $\\mathcal{A}$ at $\\mathbf{z}$ is given by", + "bbox": [ + 112, + 715, + 501, + 732 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {S} _ {h, \\mathbf {v}} (\\mathcal {A}; \\mathbf {z}) = \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} (\\mathbf {z}; \\mathbf {v})\\right) ^ {\\top} \\cdot \\operatorname {d i a g} \\left(\\frac {\\mathbf {d}}{\\| \\mathbf {d} \\| _ {1}}\\right) \\cdot \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} (\\mathbf {z} + h \\mathbf {v}; \\mathbf {v})\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 741, + 883, + 773 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "weights each parameter by its range.", + "bbox": [ + 112, + 782, + 354, + 799 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Intuitively, (6) measures the agreement in sign between the (finite-difference approximation of the) metagradient in the direction of $\\mathbf{v}$ at $\\mathbf{z}$ and at $\\mathbf{z} + h\\mathbf{v}$ , averaged across parameter coordinates and weighted by the variation in each coordinate. Taking a weighted average of sign agreements ensures that $\\widehat{S} \\in [-1,1]$ (making it easier to interpret than Definition 1). The $\\mathrm{diag}(\\mathsf{d} / \\| \\mathsf{d}\\|_1)$ term weights each agreement proportionally to the scale of the corresponding parameter change (downweighting, e.g., coordinates $i$ that are essentially constant). Finally, observe that Definition 2 is efficient to compute in practice: it requires only three calls to the learning algorithm $\\mathcal{A}$ .", + "bbox": [ + 109, + 806, + 880, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8903e4b2221d100768bfe268b5a001dd442b4e5c27be2741e58072ab69f0a150.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 124, + 85, + 387, + 313 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1a484312f1ed10309ea34d6652f551a0d6047e126a3f781ebb74cc68734bab99.jpg", + "image_caption": [ + "Figure 4: (a) For a variety of training configurations of a ResNet-9 model, we plot metasmoothness (Def. 2) against test accuracy. Strategies such as increasing width, placing batch normalization before activations, and scaling down network outputs consistently improve metasmoothness, at a minor cost to accuracy. (b) Smoother training configurations can be optimized via metagradients more effectively. Here, as in Section 4.3, we use metagradients to gradient ascend on validation loss." + ], + "image_footnote": [], + "bbox": [ + 398, + 87, + 588, + 281 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a19a4da2f67b18c44fb26d6e2a9cceb669d036ffc05618b89b9c68b99f1485ad.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 601, + 85, + 875, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Remark 3. Ideally, recalling the smoothness definition (4), we would evaluate metasmoothness in all possible directions $\\mathbf{v}$ and all points $\\mathbf{z}$ . Empirically, we find in the sequel (Section 3.2) that this single-direction approximation at a single point $\\mathbf{z}$ still yields a useful estimate of metasmoothness (e.g., one that correlates with metagradients utility).", + "bbox": [ + 109, + 446, + 883, + 494 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Estimating and improving metasmoothness", + "text_level": 1, + "bbox": [ + 111, + 517, + 549, + 535 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Having established a method for quantifying metasmoothness, we turn to the practical question: how can we design learning algorithms that are amenable to metagradient optimization? To answer this question, we introduce a straightforward framework: given a learning algorithm, explore a fixed menu of possible modifications to the training setup, and choose the combination that maximizes empirical metasmoothness. In practice, we find that this framework allows us to slightly modify learning algorithms in a way that makes them amenable to first-order methods.", + "bbox": [ + 109, + 542, + 883, + 632 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As a case study, we study the task of training ResNet-9 on the CIFAR-10 dataset [Kri09]. We let the metaparameters $\\mathbf{z}$ be a perturbation to the pixels of 1000 random training images (so $\\mathbf{z} \\in \\mathbb{R}^{1000 \\times 32 \\times 32 \\times 3}$ ). We estimate the empirical metasmoothness of different learning algorithms $\\mathcal{A}$ at $\\mathbf{z} = \\mathbf{0}$ using Definition 2. Concretely, we proceed as follows for each learning algorithm $\\mathcal{A}$ :", + "bbox": [ + 109, + 633, + 883, + 694 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Let $\\mathbf{z}_0 = \\mathbf{0}$ be the metaparameter corresponding to the original dataset.", + "2. Sample a random perturbation vector $\\mathbf{v} \\sim \\mathcal{N}(0,1)$ .", + "3. Compute the empirical metasmoothness (6), i.e.," + ], + "bbox": [ + 130, + 698, + 666, + 761 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) Let $\\theta_0\\coloneqq \\mathcal{A}(\\mathbf{z}_0)$ , $\\theta_h\\coloneqq \\mathcal{A}(\\mathbf{z}_0 + h\\cdot \\mathbf{v})$ , and $\\theta_{2h}\\coloneqq \\mathcal{A}(\\mathbf{z}_0 + 2h\\cdot \\mathbf{v})$ be the model parameters that result from training with training dataset perturbations $\\mathbf{z}_0,\\mathbf{z}_0 + h\\mathbf{v}$ , and $\\mathbf{z}_0 + 2h\\mathbf{v}$ , respectively.", + "(b) Compute the approximate derivatives" + ], + "bbox": [ + 160, + 767, + 883, + 816 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\Delta_ {\\mathcal {A}} (\\mathbf {z} _ {0}; \\mathbf {v}) = \\left(\\theta_ {h} - \\theta_ {0}\\right) / h, \\quad \\Delta_ {\\mathcal {A}} (\\mathbf {z} _ {0} + h \\mathbf {v}; \\mathbf {v}) = \\left(\\theta_ {2 h} - \\theta_ {h}\\right) / h.\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 821, + 750, + 840 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(c) Compute the weighting vector $\\mathbf{d} = |\\theta_{2h} - \\theta_0|$ , and compute the average metasmoothness (6), i.e.,", + "bbox": [ + 161, + 845, + 883, + 863 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {S} _ {h, \\mathbf {v}} (\\mathcal {A}; z _ {0}) = \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} \\left(\\mathbf {z} _ {0} + h \\mathbf {v}; \\mathbf {v}\\right)\\right) ^ {\\top} \\cdot \\operatorname {d i a g} \\left(\\frac {\\mathbf {d}}{\\| \\mathbf {d} \\| _ {1}}\\right) \\cdot \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} \\left(\\mathbf {z} _ {0}; \\mathbf {v}\\right)\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 867, + 785, + 901 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/95cf8c6f67f7a1ff5f5908ae786adfca0bdc4e43fa7de5fa95c29ea9182013b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 89, + 488, + 282 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8ef162573b94d345eed4da85496a669bb5b67c2e40af2ad7caf94d5e5efc2d81.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 90, + 836, + 281 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/65efa3e7039ec2bf0c17df31db0fea551abb1e8ebec13cd70da4376df689dec2.jpg", + "image_caption": [ + "Figure 5: The effect of metasmoothness on the optimization landscape. Each plot above visualizes the loss landscape of a (deterministic) learning algorithm $\\mathcal{A}$ , with the $x$ - and $y$ -axes representing additive perturbations to 1000 examples in the training set and the $z$ -axis representing the resulting model's loss on the test example given in the title. In each row, the left plot is a non-smooth algorithm, and the right plot is a smooth algorithm (as per Definition 2) evaluated on the same example. Overall, empirical metasmoothness seems to strongly correlate with qualitative landscape smoothness. See Figure 12 for more examples." + ], + "image_footnote": [], + "bbox": [ + 151, + 290, + 488, + 479 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b822311a6fc4b5b0c4d7fcafd79dd0e3e2f03c4bdeeb8e5787a92fec3738137b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 290, + 838, + 479 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Metasmooth learning algorithms. We apply the procedure above to estimate the metasmoothness of learning algorithms induced by different design choices (batch size, network width, BatchNorm placement, gradient scaling), and report the results in Figure 4 (left). On one hand, \"standard\" learning algorithms (i.e., those designed without metasmoothness in mind) are not metasmooth. On the other hand, our investigation reveals central factors driving metasmoothness. In addition to \"standard\" hyperparameters such as batch size and network width playing a role, we find that placing Batch Normalization layers prior to nonlinearities (instead of after) and scaling the final layer output are both crucial to metasmoothness. Note that the modifications we consider above are not exhaustive—see Appendix E for the full training setup.", + "bbox": [ + 109, + 611, + 883, + 731 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Finally, in Figure 5, we plot the optimization landscape of both metasmooth (right) and non-metasmooth (left) models. We find that the landscapes of metasmooth models are much smoother and—qualitatively—more straightforward to optimize.", + "bbox": [ + 111, + 732, + 883, + 777 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Metasmoothness/performance tradeoffs? Figure 4 (left) relates metasmoothness to model accuracy for the considered learning algorithms. While there is no clear trend, the top-performing learning algorithms are not always metasmooth. However, the trade-off is not too severe: the most metasmooth algorithms still achieve near-optimal accuracy. Furthermore, it is possible that with additional searching we could identify even more accurate metasmooth models. Taken together with our previous experiment, our results suggest that jointly searching over metasmoothness and model accuracy is a general recipe for designing learning algorithms that are both performant and metasmooth. Finally, as we discuss in Section 5, a fruitful avenue", + "bbox": [ + 109, + 796, + 883, + 902 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "for future work may be to design metasmooth learning algorithms directly, i.e., without relying on stability heuristics or grid search.", + "bbox": [ + 109, + 90, + 885, + 122 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Does metasmoothness aid downstream optimization? Recall that our motivation for studying metasmoothness is to develop learning algorithms that we can optimize the metaparameters of via metagradients (using first-order methods). We started with the notion of $\\beta$ -smoothness from optimization theory, and we adapted it to the setting of metagradients by making a series of approximations and modifications. The final question we address is: does our final notion of metasmoothness actually predict the utility of metagradients for optimization? Figure 4 (right) demonstrates that metasmoothness strongly predicts our ability to optimize the metaparameters of a given learning algorithm. We use metagradients (computed by REPLAY) to gradient ascend on validation loss with respect to the metaparameters $\\mathbf{z}$ , and measure the change in model loss.", + "bbox": [ + 109, + 140, + 883, + 277 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4 Applications", + "text_level": 1, + "bbox": [ + 112, + 300, + 292, + 321 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this section, apply metagradients to three problems in machine learning: selecting training data, poisoning training data, and searching for hyperparameters. In each setting we follow the same recipe: we frame the task as an optimization problem, modify the learning algorithm of interest to be smooth, then solve by first-order optimizing with meta-gradients—which we refer to, in a catch-all manner across algorithms, as metagradient descent (MGD). In particular: we substantially improve on existing dataset selection methods (Section 4.1, Section 4.2), perform the first effective accuracy-degrading data poisoning attack (Section 4.3), and discover one-cycle learning rate schedules with MGD (Section 4.4).", + "bbox": [ + 109, + 332, + 883, + 441 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1 Selecting multimodal training data", + "text_level": 1, + "bbox": [ + 109, + 458, + 475, + 477 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Curating a training dataset from a mass of unfiltered data is a necessary and influential step in any large-scale machine learning pipeline. Deciding how to curate such a dataset is a challenging problem that has attracted substantial recent interest [FiW+22; ATS+23; EFM24; GIF+24]. In this section, we frame pre-training data selection as an optimization problem, and then solve this problem by first-order optimizing with metagradient. Applying our method to the DataComp-small benchmark [GIF+24], we greatly improve on the state-of-the-art (our improvement over state-of-the-art is roughly the same as the improvement of state-ofthe-art over training on random data).", + "bbox": [ + 109, + 484, + 883, + 592 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1.1 Setup", + "text_level": 1, + "bbox": [ + 112, + 609, + 210, + 626 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The goal of dataset selection is to choose a training data subset (out of a broad pool of data) that maximizes trained machine learning model performance. Given this goal, dataset selection has a natural interpretation as a combinatorial metaparameter optimization problem. In particular, in the language of Section 2.1, for a training set of size $n$ , let", + "bbox": [ + 109, + 632, + 883, + 696 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) the metaparameters $\\mathbf{c} \\in \\mathcal{C} \\coloneqq \\mathbb{Z}_{\\geq 0}^{n}$ be non-negative data counts representing the number of times each training sample repeats in the training data;", + "(b) the algorithm $\\mathcal{A}$ be a standard large-scale learning procedure, which runs on a training set comprising $c_{i}$ copies of each sample $i$ for $i\\in [n]$ ;", + "(c) the output function $\\phi$ be the loss of the trained model on a target distribution $D$ ." + ], + "bbox": [ + 124, + 702, + 880, + 799 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Then, defining $f(\\mathbf{c}) \\coloneqq \\phi(\\mathcal{A}(\\mathbf{c}))$ (as in Section 2.1), our goal is to find the data counts $\\mathbf{c}^*$ that solve", + "bbox": [ + 112, + 808, + 816, + 825 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {c} ^ {*} := \\underset {\\mathbf {c} \\in \\mathcal {C}} {\\arg \\min } f (\\mathbf {c}). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 835, + 883, + 864 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 934, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1.2 Gradient descent on training data", + "text_level": 1, + "bbox": [ + 112, + 90, + 416, + 107 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Metagradients let us directly minimize the target task loss (7) with respect to the choice of training data. At a high level, our algorithm operates as follows: we start with a randomly chosen set of training data, then iteratively update the dataset selection using metagradients with respect to importance weights placed on each training datapoint. The specifics of our method are in Algorithm 1; we describe its core ideas below.", + "bbox": [ + 111, + 114, + 883, + 176 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Idea 1: A surrogate algorithm. We cannot use metagradients to optimize (7) directly, because the metaparameters of interest $\\mathbf{c}$ are discrete counts (and so the algorithm $\\mathcal{A}$ is non-differentiable with respect to $\\mathbf{c}$ ). To circumvent this problem, we relax $\\mathcal{A}$ : we define a surrogate algorithm $\\mathcal{A}_{\\mathbf{c}}^{\\prime}$ that takes in a continuous metaparameter $\\mathbf{z} \\in \\mathbb{R}^{n}$ , whose metagradient we can compute, then optimize using the metagradient on $\\mathcal{A}_{\\mathbf{c}}^{\\prime}$ .", + "bbox": [ + 111, + 194, + 883, + 255 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This surrogate learning algorithm $\\mathcal{A}_{\\mathrm{c}}^{\\prime}$ maps a metaparameter $\\mathbf{z} \\in \\mathbb{R}^{n}$ (representing a perturbation to training data weights) to a machine learning model. The surrogate is defined by a set of counts $\\mathbf{c} \\in \\mathbb{Z}_{+}^{n}$ , and a hyperparameter $k$ denoting a specific training iteration, both of which we bake into the surrogate algorithm itself. Given a metaparameter $\\mathbf{z} \\in \\mathbb{R}^{n}$ , the algorithm $\\mathcal{A}_{\\mathrm{c}}^{\\prime}$ trains a model \"as usual\" using the fixed counts $\\mathbf{c}$ . That is, it makes $c_{i}$ copies of each training sample $i$ , shuffles and partitions the data into batches, and then at each iteration minimizes the batch loss with a step—just as the original learning algorithm $\\mathcal{A}$ . At iteration $k$ , however, in addition to the original loss on the $k$ -th batch, the algorithm upweights each training sample $i$ according to the metaparameter $z_{i}$ . In other words, the objective at iteration $t$ of the surrogate algorithm $\\mathcal{A}_{\\mathrm{c}}^{\\prime}$ is", + "bbox": [ + 111, + 255, + 883, + 391 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {t} ^ {\\prime} (\\theta) := \\left\\{ \\begin{array}{l l} \\sum_ {x \\in t ^ {\\text {t h}} \\text {b a t c h}} \\ell (x; \\theta) & \\text {i f} t \\neq k \\\\ \\sum_ {x \\in t ^ {\\text {t h}} \\text {b a t c h}} \\ell (x; \\theta) + \\sum_ {i = 1} ^ {n} z _ {i} \\ell (x _ {i}; \\theta) & \\text {i f} t = k \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 391, + 692, + 431 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $\\ell (x;\\theta)$ is the training loss on example $x$", + "bbox": [ + 112, + 436, + 452, + 452 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Observe that when $\\mathbf{z} = \\mathbf{0}_n$ , the algorithm $\\mathcal{A}_{\\mathbf{c}}'$ is identical to the standard learning algorithm $\\mathcal{A}$ . And while $\\mathcal{A}$ was a function of (nondifferentiable) discrete data counts $\\mathbf{c}$ , $\\mathcal{A}_{\\mathbf{c}}'$ is differentiable with respect to its input $\\mathbf{z}$ , and so we can compute the metagradient", + "bbox": [ + 111, + 452, + 883, + 498 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {g} := \\nabla_ {\\mathbf {z}} \\phi \\big (\\mathcal {A} _ {\\mathbf {c}} ^ {\\prime} (\\mathbf {z}) \\big) \\big | _ {\\mathbf {z} = \\mathbf {0} _ {n}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 410, + 508, + 584, + 530 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Intuitively, the entries of the metagradient $\\mathbf{g}$ capture the effect of adding an infinitesimal amount of each training sample $i$ to the training data at iteration $k$ . A positive entry $g_{i}$ indicates that adding an infinitesimal amount of sample $i$ to the training data would increase the loss, and a negative entry indicates that adding an infinitesimal amount of sample $i$ to the training data would decrease the loss; the slot at $i$ represents the (estimated) effect of adding a copy of sample $i$ to the training data at every batch containing the sample.", + "bbox": [ + 111, + 539, + 883, + 617 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Idea 2: Block coordinate descent. We then use the metagradient $\\mathbf{g}$ to iteratively update our selected dataset. We update data counts as", + "bbox": [ + 111, + 633, + 883, + 665 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {c} \\leftarrow \\mathbf {c} - \\operatorname {s i g n} (\\mathbf {g}) \\odot \\mathbf {m}, \\quad \\mathbf {m} \\sim \\text {B e r n o u l l i} (p), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 676, + 883, + 694 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $p$ is a hyperparameter controlling the fraction of sample counts to update. This algorithm resembles a block coordinate descent algorithm [OR00], with the main difference being that we take signed gradient steps with step size 1 (projected onto non-negative integers) to ensure that the counts remain well-defined. As a result, $p$ implicitly controls the algorithm's step size.", + "bbox": [ + 111, + 704, + 883, + 765 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Applying (8) concludes a single optimization step. By repeating this process of estimating the metagradient, updating our counts vector, then constructing a new training dataset, we iteratively improve the selected data. Pseudocode for our algorithm can be found in Algorithm 1.", + "bbox": [ + 111, + 765, + 883, + 811 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.1.3 Results", + "text_level": 1, + "bbox": [ + 112, + 829, + 222, + 843 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We evaluate our data selection algorithm using DataComp [GIF+24], a standardized framework for evaluating data selection methods for multimodal models. Algorithm 1 greatly improves on the state-of-the-art for the benchmark. Below, we describe the setting, outline our method, and conclude with our results.", + "bbox": [ + 111, + 853, + 883, + 898 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Dataset selection using using metagradient descent (MGD)." + ], + "code_body": "Input: initial data counts $\\mathbf{c}\\in \\mathbb{Z}_{\\geq 0}^{n}$ , learning algorithm $\\mathcal{A}$ output function Hyperparameters: step size $p$ # opt steps $T$ iteration number $k$ for $t\\gets 1$ to $T$ do \n2 $\\mathbf{z}\\gets \\mathbf{0}_n / /$ Build input to surrogate \n3 $\\mathbf{g}\\leftarrow \\frac{\\partial\\phi(\\mathcal{A}_c'(\\mathbf{z}))}{\\partial\\mathbf{z}} / /$ Calculate metagradient using REPLAY \n4 m<- sample from Bernoulli(p) // Sample indices to step on \n5 c<- c-sign(g) $\\odot$ m// Take optimization step \n6 Return c// Return final data counts", + "bbox": [ + 119, + 112, + 661, + 250 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Setting. DataComp [GIF+24] is a multimodal model training competition and benchmark for evaluating dataset selection methods. DataComp provides a fixed learning algorithm chosen in advance by the organizers and a large fixed candidate pool of internet data. The goal is to choose a subset of the candidate pool—possibly with repeated datapoints—that yields the best-performing model after training with the given learning algorithm, as measured by a predetermined set of 38 benchmarks. Given a submission subset, the mean score on the evaluation datasets for a model trained with that subset is taken as the final \"score.\" DataComp offers four separate \"scales\" requiring different amounts of compute; we focus on the small scale in this paper due to compute limitations.", + "bbox": [ + 109, + 281, + 883, + 402 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Method. We select data with MGD (Algorithm 1) to minimize loss on data on a \"target set\" that is distributionally similar to the DataComp benchmark tasks, and select hyperparameters with a held-out \"validation set.\" In particular, we construct target and validation sets by taking samples from the DataComp evaluation tasks with extra samples available beyond those used in the DataComp test set (e.g., ImageNet, one of the tasks in DataComp, has a training set in addition to the test set evaluated in DataComp). See Appendix C for the exact details of the target and validation sets, the precise hyperparameters used with Algorithm 1, and a discussion on scalability (including further engineering details on executing our algorithm efficiently).", + "bbox": [ + 109, + 421, + 883, + 529 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Results. MGD greatly outperforms the current state-of-the-art: the difference in accuracy between MGD and the current best method is roughly as large as the difference between the previous state-of-the-art (EcoDatum [Eco24]) and training on randomly chosen data (cf. Figure 6). Inspecting scores over the course of the optimization in Figure 6, we find that only a few steps are necessary to outperform previous methods.", + "bbox": [ + 109, + 546, + 883, + 608 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/b4f07dfbec991e71985782d6905bc8288383a71ad6a7e6b4aaf74a949165576e.jpg", + "image_caption": [ + "Figure 6: MGD dataset selection greatly outperforms existing methods (improving over the previous SOTA by as much as the previous SOTA improves over no filtering at all). We compare DataComp scores for MGD (over optimization steps), training on the entire candidate pool, the best baseline originally proposed by DataComp, and the previous SOTA [Eco24]." + ], + "image_footnote": [], + "bbox": [ + 127, + 619, + 475, + 773 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/f7cdf24f42ff7038688bd05478168802f6a6fa09213f5ffad00db62ef65980fa.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScoreΔ
- - - Baseline: No filtering0.13-
- - - Best baseline from [GIF+24]0.17+0.04
- - - Previous SOTA [Eco24]0.18+0.05
- - - MGD-DS (ours)0.22+0.09
", + "bbox": [ + 495, + 621, + 856, + 714 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 934, + 509, + 946 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c5d7306e1433a0b6c6b42b22995ae8e6a7ebb4b28513e507342f46c61081428d.jpg", + "image_caption": [ + "Figure 7: MGD dataset selection outperforms baselines. Comparing to training on all the data: it achieves over double the margin of improvement of LESS on MMLU, and improves by $+1.5\\%$ on BBH (where LESS does not improve at all). The $\\Delta$ column denotes improvement over not filtering." + ], + "image_footnote": [], + "bbox": [ + 140, + 90, + 480, + 233 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/93a1935980d353a47f17bc2136d959a36b3a20b7b844279a9d48017c1ff7548b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BBH [SSS+22]MMLU [HBB+20]
Acc.ΔAcc.Δ
All Data35.2%-41.2%-
■ LESS35.2%-0.0%41.8%+0.5%
■ MGD-DS36.7%+1.5%42.5%+1.3%
", + "bbox": [ + 493, + 90, + 880, + 195 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.2 Selecting instruction-tuning data", + "text_level": 1, + "bbox": [ + 111, + 315, + 457, + 333 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In our second application, we select training data for instruction fine-tuning (IFT) using the same MGD-based method detailed in Algorithm 1 of Section 4.1. As with multimodal data, training on the \"right\" post-training data (such as the \"right\" IFT data) can greatly impact deployment-time model performance [LFX+24; DJP+24; TGZ+23]. MGD improves over baselines at choosing IFT data for MMLU [HBK+21], a general knowledge task, and BBH [SSS+22], a reasoning/chain-of-thought task.", + "bbox": [ + 109, + 340, + 890, + 416 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To overview this section: we start by detailing the setting, then describe the specifics of our MGD instantiation before concluding with results.", + "bbox": [ + 111, + 416, + 885, + 446 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Setting. We adopt the setting of LESS [XMG+24]. Here, the goal is to select a training data subset from four combined IFT datasets (Flan V2 [LHV+23], CoT [WWS+22], DOLLY [CHM+23], and Open Assistant 1 [KKR+24]) to maximize accuracy on a given target task. We consider two target tasks from LESS: MMLU (which comprises multiple choice questions spanning a variety of disciplines) and BBH (a 23 task subset of BIG-Bench [SRR+22]). In this setup, the data selector can access samples from each task built from the in-context learning prompts. Following Xia et al. [XMG+24], we fine-tune a 128-width LoRA [HY20] (in our work, on Gemma-2B [TMH+24]). See Appendix D for full details on the tasks and learning algorithm.", + "bbox": [ + 109, + 465, + 883, + 571 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Method. We split up the available task samples into two sets—a \"target\" set and a \"validation\" set—then select data with MGD (via Algorithm 1) by minimizing causal language modeling loss on the \"target\" set of samples. We select hyperparameters like step size and number of SGD iterations with the validation set; see Appendix D for more details.", + "bbox": [ + 109, + 590, + 883, + 652 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Results. Comparing with two baselines—training on all the data and training with data selected with LESS [XMG+24]—MGD yields strictly better training dataset selections for each target task (cf. Figure 7). MGD improves most on BBH, a reasoning task, compared to the best baseline $(+1.5\\%)$ accuracy). On MMLU, a knowledge-based task, we outperform baselines by slightly less compared to the best baseline $(+0.8\\%)$ ; one explanation is that selecting IFT data lends more control over reasoning than over intrinsic knowledge available in the LM.", + "bbox": [ + 109, + 669, + 883, + 758 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Beyond raw accuracy, we inspect losses across each step of the optimization process. Overall, our method improves validation loss over MGD steps (cf. Appendix Figures 13), but also exhibits signs of overfitting. Given intuition from overparameterized learning, we might expect this behavior: we optimize a total of 270,679 \"weights\"—each corresponding to a count for a datapoint—to minimize loss on only a handful of test samples (cf. Table 3).", + "bbox": [ + 109, + 761, + 883, + 835 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.3 Accuracy-degrading (Huber) data poisoning", + "text_level": 1, + "bbox": [ + 111, + 856, + 555, + 875 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The goal of an accuracy-degrading data poisoning attack is to degrade the performance of a machine learning model by corrupting a small fraction of its training data. Here, the considered threat model is as follows.", + "bbox": [ + 109, + 881, + 883, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The attacker is given a training set $\\mathbf{X} = \\{x_{1},\\dots,x_{n}\\}$ drawn from a distribution $P$ , and a function $\\theta (\\cdot)$ mapping training data to model parameters (representing the learning algorithm used by the victim). The attacker's goal is to return a new training set $\\mathbf{X}'$ that differs from $\\mathbf{X}$ in at most $\\varepsilon \\cdot n$ datapoints while inducing model parameters $\\theta (\\mathbf{X}')$ that perform as poorly as possible on a freshly drawn test set $T$ from $P$ .", + "bbox": [ + 111, + 90, + 880, + 150 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Formally, the adversary aims to solve the following optimization problem:", + "bbox": [ + 135, + 151, + 676, + 167 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\arg \\max _ {\\tilde {x} _ {1}, \\dots , \\tilde {x} _ {n _ {p}}} \\mathbb {E} _ {x \\sim P} [ \\ell (x; \\theta (\\mathbf {X} ^ {\\prime})) ], \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 176, + 883, + 205 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\mathbf{X}^{\\prime} = \\{\\widetilde{x}_1,\\dots ,\\widetilde{x}_{n_p},x_{n_p + 1},\\dots ,x_n\\}$ and $n_p = \\lfloor \\varepsilon n\\rfloor$ . Note that our goal is to degrade the overall model performance on a test set $\\mathbf{X}_{test}$ drawn from $P$ (in particular, the test set $\\mathbf{X}_{test}$ is unknown to the adversary). In this way, this setting resembles the Huber contamination model in statistics [Hub64], and is strictly more challenging than the usual data poisoning settings in deep learning (e.g., backdoor attacks [GDG17] or attacks that target specific test examples [KL17]).", + "bbox": [ + 109, + 217, + 883, + 294 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For large-scale machine learning models, finding strong adversaries has proven challenging—standard loss-minimizing learning algorithms seem quite robust to maliciously-inserted data [LKY23]. In fact, the first non-trivial accuracy degradation data poisoning attacks on deep models were pioneered by Lu et al. [LKY22] and later improved upon by the same set of authors [LKY23]. Broadly speaking, even constructing attacks that degrade the overall performance of a learning algorithm by more than the adversarial budget $\\varepsilon$ has proven challenging.", + "bbox": [ + 111, + 294, + 883, + 385 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.3.1 Setup", + "text_level": 1, + "bbox": [ + 112, + 402, + 210, + 419 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We observe that (9) is a continuous optimization problem to which we can directly apply our metagradient framework, approximating the expectation over $P$ by a finite-sample average over a validation set $\\mathbf{X}_{val}$ . In particular, given a (randomly shuffled) training set $\\mathbf{X}$ and validation set $\\mathbf{X}_{val}$ , we set up the following metaparameter optimization problem (see Section 2.1):", + "bbox": [ + 111, + 426, + 883, + 489 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) the metaparameter $\\mathbf{z} \\in \\mathcal{X}^{n_p}$ is a tensor of $n_p = \\lfloor \\varepsilon n \\rfloor$ poisoned samples;", + "(b) the algorithm $\\mathcal{A}$ maps metaparameters $\\mathbf{z}$ to a trained model $\\mathcal{A}(\\mathbf{z})$ by replacing the first $n_p$ samples in $\\mathbf{X}$ with the samples in $\\mathbf{z}$ and then training on the resulting dataset;", + "(c) the output function $\\phi$ evaluates average loss on the validation set $\\mathbf{X}_{val}$ ." + ], + "bbox": [ + 124, + 497, + 882, + 580 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.3.2 Algorithm", + "text_level": 1, + "bbox": [ + 112, + 599, + 243, + 614 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To apply our first-order methods to this problem, we start by initializing the poisoned data to be exactly the first $n_p$ samples in $\\mathbf{X}$ , $\\mathbf{z}^{(0)} \\coloneqq \\{\\widetilde{x}_i^{(0)} = x_i : i \\in [n_p]\\}$ . Then, for $t = 1, \\dots, T$ , we sample a minibatch $\\mathbf{X}_{val}^{(t)}$ from $\\mathbf{X}_{val}$ and use REPLAY to compute the metagradient", + "bbox": [ + 111, + 623, + 883, + 674 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf{g}_{t} = \\frac{d}{d\\mathbf{z}}\\left(\\sum_{x\\in \\mathbf{X}_{val}^{(t)}}\\ell (x;\\mathcal{A}(\\mathbf{z}^{(t - 1)}))\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 684, + 620, + 739 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "and update the poisoned data using (projected) gradient ascent:", + "bbox": [ + 111, + 750, + 578, + 767 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {z} ^ {(t)} = \\Pi_ {\\mathcal {X}} \\left(\\mathbf {z} ^ {(t - 1)} + \\eta \\cdot \\operatorname {s i g n} (\\mathbf {g} _ {t})\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 777, + 620, + 803 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\Pi_{\\mathcal{X}}$ is the projection operator onto the sample space $\\mathcal{X}$ . (For example, when $\\mathcal{X}$ is the space of image-label pairs, $\\Pi_{\\mathcal{X}}$ clips images' pixel values to [0, 1] and ensures labels are valid probability distributions.)", + "bbox": [ + 111, + 813, + 883, + 845 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "In principle, the adversary can also decide which samples to poison, but for simplicity we consider this \"fixed\" case.", + "bbox": [ + 129, + 853, + 803, + 868 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/bee2bdf66cc8b88534900202c77d8a154c55594c42b87de51729a3c9fc1551b5.jpg", + "image_caption": [ + "Figure 8: Examples of poisoned images from Section 4.3." + ], + "image_footnote": [], + "bbox": [ + 117, + 85, + 883, + 164 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/599688cbc30a73e86945ceef1aaa6104947a8bf4fb89679b2b4e04e12dbef15b.jpg", + "image_caption": [ + "Figure 9: For each iteration of MGD ( $x$ -axis), we train a new model from random initialization on a randomly shuffled training set with the current iterate of poisoned data injected. We evaluate the test accuracy ( $y$ -axis), and use REPLAY to compute the metagradient. MGD outperforms the best known attack [LKY23] by an order of magnitude and (for reference) results in a model that has the same accuracy as a single-layer neural network trained on random image features [CNL11]." + ], + "image_footnote": [], + "bbox": [ + 129, + 204, + 483, + 361 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/5f56febcc07b1551f91d149adb7bbceab2088e31bcd8e08cd237fa6bf1e3b1d6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAcc.Δ
- - Original model92.0%-
- - GradCancel [LKY23]91.2%-0.80%
- MGD-DP (ours)78.1%-13.9%
1-layer NN (for reference) [CNL11]83.3%-8.7%
", + "bbox": [ + 488, + 203, + 887, + 303 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4.3.3 Evaluation", + "text_level": 1, + "bbox": [ + 112, + 477, + 246, + 491 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We use the CIFAR-10 dataset which consists of 60,000 total images each labeled as one of 10 classes. We partition the data into 40,000 training examples, 10,000 validation examples, and 10,000 test examples. We consider a simple 12-epoch CIFAR-10 training procedure, which reaches $92.4\\%$ accuracy on the CIFAR-10 test set when applied to the 40,000 training examples. See Appendix E for training hyperparameters.", + "bbox": [ + 109, + 500, + 883, + 560 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As described above, we allow the adversary to modify (in-place) a fixed, $\\varepsilon$ -fraction of the training data (in our case, $2.5\\%$ ) subject to the constraint that the poisoned images still lay in the valid (normalized) image range of [0, 1]. We compare our approach—direct optimization of the data poisoning objective using metagratings—to the state-of-the-art \"Gradient Cancelling\" (GradCancel) method of Lu et al. [LKY23]. In short, GradCancel is a two-step method which first finds a poorly performing model, then finds poisoned data that induces this model as a minimizer of the training loss. We present the full method in Appendix E.", + "bbox": [ + 109, + 561, + 883, + 654 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Results. We find that metagradients enable state-of-the-art data poisoning attacks, degrading accuracy by $14\\%$ . In particular, when allowed to corrupt 1000 of the 40,000 training samples $(2.5\\%)$ , our method reduces test set accuracy to $78\\%$ — for reference, the accuracy of a single-layer neural networked trained on the unmodified CIFAR-10 training set is $83\\%$ . The strongest existing data poisoning attack, GradCancel, only reduces test set accuracy by less than $1\\%$ . In Figure 8, we visualize the poisoned images and labels found by our method. In Figure 9, we visualize the minibatch loss at each step of the optimization process.", + "bbox": [ + 109, + 671, + 883, + 763 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Remark 4 (Poisoning non-smooth learning algorithms). Recall that to apply metagradient descent, we alter the learning algorithm $\\mathcal{A}$ to be metasmooth (see Section 3.1). This involves making modifications such as switching out max pooling layers for average pooling layers, moving batch normalization layers before activations, and scaling down the last layer's output by a factor of 10. It is natural to ask: how much does the efficacy of our method depend on this smoothness? After all, in practice the adversary cannot control the learning algorithm. To answer this question, we take the poison samples generated by MGD and insert them into the training set of a corresponding standard (i.e., non-metasmooth) learning algorithm. We find that our method still significantly degrades the performance of the model, from $92.8\\%$ to $82.6\\%$ (a drop of $10.2\\%$ ).", + "bbox": [ + 109, + 771, + 883, + 892 + ], + "page_idx": 14 + }, + { + "type": "page_footnote", + "text": "2Lu et al. [LKY23] report a larger drop; the discrepancy is due to our constraint that poisoned data are valid bounded RGB images.", + "bbox": [ + 129, + 897, + 882, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/ff507e583af2cf8d875c5175cfaf529dfdc7148e3aef2836098edd006a592450.jpg", + "image_caption": [ + "Figure 10: Target and test accuracies of MGD's learning rate schedule over time closely match or exceed those found by a grid search over hundreds of combinations of hyperparameters. $95\\%$ confidence intervals are plotted for MGD's results." + ], + "image_footnote": [], + "bbox": [ + 153, + 85, + 612, + 257 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/983c55704fec1c3570e69e9f79f79845d462ac252b5f882ff32f4fe1718609cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 88, + 844, + 170 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.4 Finding a learning rate schedule", + "text_level": 1, + "bbox": [ + 112, + 340, + 450, + 359 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As a final application, we optimize the learning rate schedule of stochastic gradient descent (SGD) for training a CIFAR-10 classifier. By following the metagradients with respect to the learning rate at each step of training, our procedure matches grid searching over standard learning rate schedules—despite starting with naive hyperparameters (a flat learning rate).", + "bbox": [ + 109, + 366, + 883, + 428 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Unlike the other applications discussed here, metagradients do not unlock state-of-the-art performance. Instead, we discuss this application to illustrate the flexibility of REPLAY, and in particular its ability to optimize metaparameters that do not directly affect the loss landscape (i.e., that only affect the model via the optimization trajectory). As we discuss in Section 6, approximate metagradient estimators cannot apply to these metaparameters.", + "bbox": [ + 109, + 428, + 883, + 503 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.4.1 Setting", + "text_level": 1, + "bbox": [ + 112, + 521, + 220, + 537 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To put learning rate schedule optimization into the metagradient framework, we parameterize a schedule as a vector $\\eta \\in \\mathbb{R}^k$ comprising $k$ evenly-spaced keypoints, so that the learning rate at iteration $t$ is given by", + "bbox": [ + 109, + 545, + 883, + 578 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\eta (t) = \\eta_ {\\lfloor k t / T \\rfloor} + \\frac {k t / T - \\lfloor k t / T \\rfloor}{\\lceil k t / T \\rceil - \\lfloor k t / T \\rfloor} \\left(\\eta_ {\\lceil k t / T \\rceil} - \\eta_ {\\lfloor k t / T \\rfloor}\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 587, + 883, + 625 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "i.e., a linear interpolation between the keypoints.", + "bbox": [ + 109, + 631, + 470, + 648 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) the metaparameter $\\eta \\in \\mathbb{R}^k$ is a vector of $k$ keypoints;", + "(b) the algorithm $\\mathcal{A}$ maps metaparameters $\\eta$ to a trained model $\\mathcal{A}(\\eta)$ by training a model for $T$ iterations with the learning rate schedule defined by (10);", + "(c) the output function $\\phi$ evaluates average loss on the validation set $\\mathbf{X}_{val}$ ." + ], + "bbox": [ + 124, + 655, + 883, + 738 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.4.2 Algorithm", + "text_level": 1, + "bbox": [ + 112, + 756, + 243, + 772 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Following the theme of the rest of this section, we optimize the metaparameter $\\eta$ directly using MGD. In particular, we initialize the keypoints to be a flat learning rate schedule, and then update the keypoints using the metagradient with respect to the validation loss,", + "bbox": [ + 109, + 779, + 883, + 825 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\eta} ^ {(t + 1)} = \\boldsymbol {\\eta} ^ {(t)} - \\alpha \\cdot \\operatorname {s i g n} \\left(\\nabla_ {\\boldsymbol {\\eta}} \\phi (\\mathcal {A} (\\boldsymbol {\\eta} ^ {(t)}))\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 837, + 643, + 863 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.4.3 Evaluation", + "text_level": 1, + "bbox": [ + 112, + 90, + 246, + 104 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We aim to select the learning rate schedule that minimizes the expected test set loss. To do so, we reserve $90\\%$ of the CIFAR-10 test set as a \"validation set\" on which we select hyperparameters. We then use the remaining $10\\%$ as a test set. We compare the following two approaches:", + "bbox": [ + 109, + 114, + 883, + 161 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Grid search: We construct a grid over different one cycle learning rate schedules, varying the peak learning rate, starting learning rate, ending learning rate, and peak learning rate time. In total, we consider over 1,000 different learning rate schedules. We use the reserved $90\\%$ of the test set to select the best learning rate schedule from the grid.", + "- Metagradient descent (MGD): We run 50 steps of MGD starting from a highly suboptimal flat learning rate schedule, aiming to minimize loss on the reserved $90\\%$ of the test set. We use the last iteration of MGD as our learned learning rate schedule." + ], + "bbox": [ + 135, + 170, + 879, + 286 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We evaluate the performance of each final learning rate schedule on the held-out $10\\%$ test set and average the results over the same set of 5 unseen random seeds.", + "bbox": [ + 109, + 295, + 883, + 325 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Results. Comparing our learned hyperparameter schedule to grid search, as shown in Figure 10, our learned schedule using only 50 steps of MGD matches the performance of the state-of-the-art onecycle schedule found via grid search over more than 1000 configurations. An important caveat, however, is that these numbers are not directly comparable: grid search can be run in parallel across many machines, while steps of MGD must be run sequentially.", + "bbox": [ + 109, + 345, + 883, + 421 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In practice, we do not advise using MGD for optimizing low-dimensional hyperparameters, especially ones that have been thoroughly optimized by grid search (such as CIFAR-10 learning rate schedules [SN17; Pag18; LA19; Jor24]). Still, an interesting avenue for future work is to study the utility of MGD for optimizing high-dimensional hyperparameters that are less well-studied, such as per-parameter/layer learning rates/weight decays for language models, attention hyperparameters, or gradient preconditioners.", + "bbox": [ + 109, + 421, + 883, + 497 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5 Discussion", + "text_level": 1, + "bbox": [ + 112, + 521, + 272, + 539 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we first present the main limitations of our method and outline future directions.", + "bbox": [ + 109, + 553, + 805, + 569 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Limitations. Although REPLAY is more efficient than existing methods at computing metagradients, it is still non-trivially more expensive than simply training a model once. The main reason is that metagradients require making a backwards pass over a backwards pass. This operation necessarily requires 2-3 times the operations of a backwards pass; furthermore, our current implementation requires float32/tensorfloat32 operations. Finally, standard training operations are often made more efficient by specialized software (e.g., via FlashAttention [DFE+22]); no such software (yet) exists for backwards-over-backwards operations. Beyond computational issues, successfully applying metagradients requires smooth model training.", + "bbox": [ + 109, + 587, + 883, + 695 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Metasmoothness: connections and future directions. While Section 3 describes a general procedure for finding metasmooth learning algorithms, an important future direction is to further explore and understand metasmoothness. This includes, for example: (a) characterizing the relationship between metasmoothness and numerical stability (and potentially using techniques from the latter to improve the former); (b) devising improved optimizers and/or architectures that lead directly to metasmooth learning algorithms (akin to skip connections or stable initialization in architecture design); (c) formalizing connections between metasmoothness and other optimization-related phenomena in deep learning [LM20; CKL+22]. A related but separate direction is to explore the possibility of using techniques from non-smooth optimization [Cla90] to perform metagradient descent on non-metasmooth learning algorithms.", + "bbox": [ + 109, + 712, + 883, + 849 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 934, + 509, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "**Applying metagradients.** Our methods apply to any ML task that requires optimizing with respect to a metaparameter. These include: poisoning data (generated or simply hosted on the internet) so that it cannot be trained on without permission (i.e., by maximizing training loss with respect to the text); selecting better training data at various stages of the model training lifecycle; and designing better model training routines and architectures with first-order methods. Another direction of future work lies in mitigating the computational limitations of our algorithm. Both (a) small-scale proxy-models [HBM+22; EFM24] and (b) low-hanging engineering improvements can likely make calculating metagradients much more efficient.", + "bbox": [ + 109, + 90, + 887, + 198 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6 Related work", + "text_level": 1, + "bbox": [ + 112, + 220, + 299, + 239 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We overview previous work on calculating and applying meta-gradients.", + "bbox": [ + 111, + 253, + 643, + 271 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6.1 Calculating metagradients", + "text_level": 1, + "bbox": [ + 111, + 287, + 397, + 306 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Previous work estimates the metagradient for large-scale models via one of two broad families of methods: implicit differentiation and automatic (explicit) differentiation. Note that in previous literature, synonyms for metagradient include \"hyper-gradient\" and \"outer gradient.\"", + "bbox": [ + 109, + 313, + 885, + 359 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Implicit differentiation. One family of methods aims to approximate the metagradient. To illustrate the idea behind such approaches, suppose that the learning algorithm $\\mathcal{A}$ returns a model state $\\theta$ that minimizes a strongly convex loss function $\\mathcal{L}(z,\\theta)$ . Here, the implicit function theorem tells us that", + "bbox": [ + 109, + 377, + 883, + 424 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {z} f (z) = \\overbrace {\\left(\\frac {d \\phi}{d \\theta} \\right| _ {\\theta = \\mathcal {A} (z)} ^ {\\text {w r t . f i n a l p a r a m s}} \\underbrace {\\left. \\left(\\frac {\\partial^ {2} \\mathcal {L} (z , \\theta)}{\\partial \\theta^ {2}} \\right| _ {\\theta = \\mathcal {A} (z)}\\right) ^ {- 1}} _ {p \\times p \\text {i n v e r s e H e s s i a n o f l o s s w r t . f i n a l p a r a m s}} ^ {1 \\times p \\text {g r a d i e n t o f o u t p u t w r t . f i n a l p a r a m s}} \\overbrace {\\left. \\left(\\frac {\\partial^ {2} \\mathcal {L} (z , \\theta)}{\\partial \\theta \\partial z} \\right| _ {\\theta = \\mathcal {A} (z)}\\right) ^ {- 1}} ^ {p \\times n \\text {J a c o b i a n o f l o s s g r a d i e n t w r t . m e t a p a r a m e t e r s}}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 433, + 885, + 536 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The form of (11) yields efficient and accurate estimators for metagradients of models learned by minimizing a strongly convex loss [BKB+20; BKM+22; KDJ20; BBC+22; SGB+22]. Such approaches can extend to estimate metagradients of large-scale, non-convex learning algorithms [Ben00; KL17; RFK+19; FAL17; LVD20; CH20; BNL+22], but lose any correctness guarantees. Indeed, applying this class of methods in large-scale settings is challenging as doing so requires (a) assuming conditions on the learning algorithm (e.g., Hessian invertibility, continuous differentiability) and (b) efficiently approximating the inverse Hessian (in practice, typically at the cost of estimate accuracy). Finally, implicit function-based approaches are fundamentally limited in that they can only differentiate with respect to metaparameters expressed in the loss function (e.g., these methods can differentiate with respect to the weight decay, but not learning rate).", + "bbox": [ + 109, + 545, + 887, + 684 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Automatic (explicit) differentiation. Beyond implicit differentiation approaches, there is a long line of work on directly calculating metagradients with AD (see Section 2). Previous work has used AD to estimate metagradients of learning algorithms ranging from those with convex objectives to small neural networks [HNM19; MDA15; FDF+17; MS21; ZSP+21; CXR+22; SGB+22]. As detailed in Section 2, the primary challenge with (reverse-mode) AD-based approaches to meta-differentiation is storing the intermediate products required for the backward pass. To circumvent this challenge, previous work either (a) only considers settings that are small enough that is possible to differentiate while requiring space that is linear in the number of iterations (i.e., 2 layer networks on MNIST), (b) uses forward-mode AD [FDF+17; MS21; CXR+22] (which requires no extra storage at the cost of additional compute that scales linearly with metaparameter dimension), (c) only approximates the metagradient by calculating over only a few training steps [LSY18; CH20; FAL17], or uses (d) a reversible learning algorithm [MDA15]. The fourth category is a promising direction for reducing space requirements when computing large-scale metagradients, but current approaches require (a) representing model parameters in a fixed-precision format (which current large-scale learning algorithms do not support) in addition to restricting the algorithm to be reversible (e.g.,", + "bbox": [ + 109, + 700, + 885, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "SGD and standard GD do not qualify). A common thread is that algorithms computing metagradient with AD often suffer from numerical instability and overflow issues [MS21; SGB+22]. In relation to previous work on AD, REPLAY (Section 2) can be seen as a strategy for choosing gradient checkpointing [CAC+81; BCT92; ZP00; GW08; CXZ+16] locations in the compute graph (an NP-complete task in general [Nau08]).", + "bbox": [ + 109, + 90, + 885, + 156 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "6.2 Applying metagradients", + "text_level": 1, + "bbox": [ + 109, + 171, + 382, + 191 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Previous work applies metagradients to optimize training setup, including distillation [MDA15; LVD20], training data selection [HNM19; EFM24], meta-learning [FAL17; RFK+19; HAM+21], learning rate/weight decay selection [MS21; CXR+22], tuning data augmentation [LVD20], and architecture search [MDA15; LSY18; ZSP+21]. Beyond optimizing metagradients, methods in data attribution apply metagradients to (Taylor) estimate the effect of dropping training data on model predictions [KL17; GBA+23; PGI+23]. To the Previous works either (a) calculate metagradients directly with AD (made feasible by working in a very small-scale learning setting) or (b) estimate the metagradient with an implicit function-based approach.", + "bbox": [ + 109, + 196, + 887, + 305 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 109, + 327, + 277, + 345 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In this work we add metagradients to the large-scale machine learning toolkit. To do so, we overcome two challenges: (a) calculating metagradients at scale and (b) modifying learning algorithms to be metasmooth—i.e., to admit metagradients that locally predict model behavior. We then successfully calculate and apply metagradients for large-scale models (up to 2B parameters) to select data for CLIP pretraining and instruction fine-tuning, to (Huber) poison training data to decrease overall model accuracy, and search for high-dimensional hyperparameters (per-iteration learning rates). Given the successful applications of metagradients in these settings, we are excited to see what unlocking metagradients enables in other areas of machine learning.", + "bbox": [ + 109, + 359, + 888, + 482 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "8 Acknowledgements", + "text_level": 1, + "bbox": [ + 109, + 505, + 369, + 526 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Work supported in part by the NSF grant DMS-2134108 and Open Philanthropy, and in part by NSF Grant No. 2346519. This work is also supported in part by the Alan Turing Institute, and the U.S. Department of Energy. The authors would like to thank Alex Damian, Harshay Shah, Jesse Michel, Joel Flynn, Manolis Zampetakis, Noah Moroze, Piotr Indyk, Sam Hopkins, Sung Min (Sam) Park, and Sarah Cen for helpful references as well as discussions and feedback on early versions of this work.", + "bbox": [ + 109, + 537, + 885, + 613 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 934, + 509, + 948 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 87, + 236, + 106 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[ATS+23] Amro Abbas, Kushal Tirumala, Dániel Simig, Surya Ganguli, and Ari S Morcos. \"SemDeDup: Data-efficient learning at web-scale through semantic dedduplication\". In: arXiv preprint arXiv:2303.09540 (2023).", + "[BAC+21] Sara Beery, Arushi Agarwal, Elijah Cole, and Vighnesh Birodkar. \"The iWildCam 2021 competition dataset\". In: arXiv preprint arXiv:2105.03494. 2021.", + "[BBC+22] Mathieu Blondel, Quentin Berthet, Marco Cuturi, Roy Frostig, Stephan Hoyer, Felipe Llinares-López, Fabian Pedregosa, and Jean-Philippe Vert. \"Efficient and modular implicit differentiation\". In: Advances in neural information processing systems 35 (2022), pp. 5230-5242.", + "[BBY+22] Yonatan Bitton, Nitzan Bitton Guetta, Ron Yosef, Yuval Elovici, Mohit Bansal, Gabriel Stanovsky, and Roy Schwartz. \"WinoGAViL: Gamified association benchmark to challenge vision-and-language models\". In: Advances in Neural Information Processing Systems. 2022.", + "[BCT92] Preston Briggs, Keith D Cooper, and Linda Torczon. \"Rematerialization\". In: Proceedings of the ACM SIGPLAN 1992 conference on Programming language design and implementation. 1992, pp. 311-321.", + "[Ben00] Yoshua Bengio. \"Gradient-based optimization of hyperparameters\". In: Neural computation 12.8 (2000), pp. 1889-1900.", + "[BGM+18] Peter Bandi, Oscar Geessink, Quirine Manson, Marcory Van Dijk, Maschenka Balkenhol, Meyke Hermsen, Babak Ehteshami Bejnordi, Byungjae Lee, Kyunghyun Paeng, Aoxiao Zhong, et al. \"From detection of individual metastases to classification of lymph node status at the patient level: the CAMELYON17 challenge\". In: IEEE Transactions on Medical Imaging (2018).", + "[BGV14] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. \"Food-101-mining discriminative components with random forests\". In: European conference on computer vision. 2014.", + "[BKB+20] Quentin Bertrand, Quentin Klopfenstein, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. \"Implicit differentiation of lasso-type models for hyperparameter optimization\". In: International Conference on Machine Learning. PMLR. 2020, pp. 810-821.", + "[BKM+22] Quentin Bertrand, Quentin Klopfenstein, Mathurin Massias, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. \"Implicit differentiation for fast hyperparameter selection in non-smooth convex learning\". In: Journal of Machine Learning Research 23.149 (2022), pp. 1-43.", + "[BMA+19] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. \"ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models\". In: Neural Information Processing Systems (NeurIPS). 2019.", + "[BNL+22] Juhan Bae, Nathan Ng, Alston Lo, Marzyeh Ghassemi, and Roger Grosse. \"If Influence Functions are the Answer, Then What is the Question?\" In: ArXiv preprint arXiv:2209.05364. 2022.", + "[BSF94] Yoshua Bengio, Patrice Simard, and Paolo Frasconi. \"Learning long-term dependencies with gradient descent is difficult\". In: IEEE Transactions on Neural Networks. 1994.", + "[CAC+81] Gregory J Chaitin, Marc A Auslander, Ashok K Chandra, John Cocke, Martin E Hopkins, and Peter W Markstein. \"Register allocation via coloring\". In: Computer languages 6.1 (1981), pp. 47-57.", + "[CFW+18] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. \"Functional Map of the World\". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). June 2018.", + "[CH20] Xiangning Chen and Cho-Jui Hsieh. \"Stabilizing differentiable architecture search via perturbation-based regularization\". In: International conference on machine learning. PMLR. 2020, pp. 1554-1565." + ], + "bbox": [ + 114, + 119, + 916, + 888 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[CHL17] Gong Cheng, Junwei Han, and Xiaoqiang Lu. \"Remote sensing image scene classification: Benchmark and state of the art\". In: Proceedings of the IEEE. 2017.", + "[CHM+23] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free Dolly: Introducing the World's First Truly Open Instruction-Tuned LLM. 2023. URL: https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm (visited on 06/30/2023).", + "[CKL+22] Jeremy M. Cohen, Simran Kaur, Yuanzhi Li, J. Zico Kolter, and Ameet Talwalkar. Gradient Descent on Neural Networks Typically Occurs at the Edge of Stability. 2022. arXiv: 2103.00065 [cs.LG]. URL: https://arxiv.org/abs/2103.00065.", + "[Cla90] Frank H Clarke. Optimization and nonsmooth analysis. SIAM, 1990.", + "[CMK+14] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. \"Describing textures in the wild\". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014.", + "[CNL11] Adam Coates, Andrew Ng, and Honglak Lee. \"An analysis of single-layer networks in unsupervised feature learning\". In: Proceedings of the fourteenth international conference on artificial intelligence and statistics. 2011.", + "[CXR+22] Kartik Chandra, Audrey Xie, Jonathan Ragan-Kelley, and Erik Meijer. \"Gradient descent: The ultimate optimizer\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 8214-8225.", + "[ CXZ+16] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin. \"Training Deep Nets with Sublinear Memory Cost\". In: CoRR abs/1604.06174 (2016). arXiv: 1604.06174. URL: http://arxiv.org/abs/1604.06174.", + "[DDS+09] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. \"Imagenet: A large-scale hierarchical image database\". In: Computer Vision and Pattern Recognition (CVPR). 2009.", + "[DFE+22] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. 2022. arXiv: 2205.14135 [cs.LG]. URL: https://arxiv.org/abs/2205.14135.", + "[DJP+24] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. \"The llama 3 herd of models\". In: arXiv preprint arXiv:2407.21783 (2024).", + "[ Eco24] Team EcoDatum. EcoDatum DataComp-small submission. https://www.datacomp.ai/dcclip/leaderboard.html. 2024.", + "[EFM24] Logan Engstrom, Axel Feldmann, and Aleksander Madry. \"DsDm: Model-Aware Dataset Selection with Datamodels\". In: 2024.", + "[EVW+10] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. \"The Pascal Visual Object Classes (VOC) Challenge\". In: International Journal of Computer Vision. 2010.", + "[FAL17] Chelsea Finn, Pieter Abbeel, and Sergey Levine. \"Model-agnostic meta-learning for fast adaptation of deep networks\". In: International conference on machine learning. PMLR. 2017, pp. 1126-1135.", + "[FDF+17] Luca Franceschi, Michele Donini, Paolo Frasconi, and Massimiliano Pontil. \"Forward and reverse gradient-based hyperparameter optimization\". In: International Conference on Machine Learning (ICML). 2017.", + "[FFP04] Li Fei-Fei, Rob Fergus, and Pietro Perona. \"Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories\". In: 2004 conference on computer vision and pattern recognition workshop. IEEE. 2004, pp. 178-178.", + "[FIW+22] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. \"Data Determines Distributional Robustness in Contrastive Language Image Pre-training (CLIP)\". In: ICML. 2022." + ], + "bbox": [ + 112, + 89, + 888, + 902 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[GBA+23] Roger Grosse, Juhan Bae, Cem Anil, Nelson Elhage, Alex Tamkin, Amirhossein Tajdini, Benoit Steiner, Dustin Li, Esin Durmus, Ethan Perez, et al. \"Studying large language model generalization with influence functions\". In: arXiv preprint arXiv:2308.03296 (2023).", + "[GDG17] Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. \"Badnets: Identifying Vulnerabilities in the Machine Learning Model Supply Chain\". In: arXiv preprint arXiv:1708.06733 (2017).", + "[GIF+24] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. \"DataComp: In search of the next generation of multimodal datasets\". In: Advances in Neural Information Processing Systems. 2024.", + "[GLU12] Andreas Geiger, Philip Lenz, and Raquel Urtasun. \"Are we ready for autonomous driving? The KITTI vision benchmark suite\". In: 2012 IEEE conference on computer vision and pattern recognition. 2012.", + "[GW08] Andreas Griewank and Andrea Walther. Evaluating derivatives: principles and techniques of algorithmic differentiation. SIAM, 2008.", + "[HAM+21] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey. \"Meta-learning in neural networks: A survey\". In: IEEE transactions on pattern analysis and machine intelligence 44.9 (2021), pp. 5149-5169.", + "[HBB+20] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. \"Measuring massive multitask language understanding\". In: arXiv preprint arXiv:2009.03300 (2020).", + "[HBD+19] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. \"EuroSAT: A novel dataset and deep learning benchmark for land use and land cover classification\". In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing. 2019.", + "[HBK+21] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. \"Measuring Mathematical Problem Solving With the MATH Dataset\". In: NeurIPS (2021).", + "[HBM+20] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization. 2020. arXiv: 2006.16241 [cs.CV].", + "[HBM+22] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. \"Training compute-optimal large language models\". In: arXiv preprint arXiv:2203.15556. 2022.", + "[HNM19] Satoshi Hara, Atsushi Nitanda, and Takanori Maehara. \"Data cleansing for models trained with SGD\". In: Advances in Neural Information Processing Systems 32 (2019).", + "[Hub64] Peter J. Huber. \"Robust estimation of a location parameter\". In: The Annals of Mathematical Statistics. 1964.", + "[HY20] Jiaoyang Huang and Horng-Tzer Yau. \"Dynamics of Deep Neural Networks and Neural Tangent Hierarchy\". In: Proceedings of the 37th International Conference on Machine Learning. 2020.", + "[HZB+19] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. \"Natural adversarial examples\". In: arXiv preprint arXiv:1907.07174 (2019).", + "[JHV+17] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. \"CLEVR: A diagnostic dataset for compositional language and elementary visual reasoning\". In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2017.", + "[Kor24] Keller Jordan. \"94 percent on CIFAR-10 in 3.29 Seconds on a Single GPU\". In: (2024).", + "[JS08] Yaochu Jin and Bernhard Sendhoff. \"Pareto-based multiobjective machine learning: An overview and case studies\". In: IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews) 38.3 (2008), pp. 397-415." + ], + "bbox": [ + 112, + 89, + 903, + 912 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[KB15] Diederik P. Kingma and Jimmy Ba. \"Adam: A Method for Stochastic Optimization\". In: International Conference on Learning Representations (ICLR). 2015.", + "[KDJ20] MJ Zico Kolter, David Duvenaud, and Matt Johnson. \"Deep implicit layers-neural odes, deep equilibrium models, and beyond, 2020\". In: NeurIPS Tutorial (2020).", + "[KKR+24] Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi Rui Tam, Keith Stevens, Abdullah Barhoum, Duc Nguyen, Oliver Stanley, Richard Nagyfi, et al. \"Open-assistant conversations-democratizing large language model alignment\". In: Advances in Neural Information Processing Systems 36 (2024).", + "[Pang Wei Koh and Percy Liang. \"Understanding Black-box Predictions via Influence Functions\". In: International Conference on Machine Learning. 2017.]", + "[Kri09] Alex Krizhevsky. \"Learning Multiple Layers of Features from Tiny Images\". In: Technical report. 2009.", + "[KSD+13] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. \"3d object representations for fine-grained categorization\". In: Proceedings of the IEEE international conference on computer vision workshops. 2013.", + "[KSM+20] Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsubramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Sara Beery, et al. \"WILDS: A Benchmark of in-the-Wild Distribution Shifts\". In: arXiv preprint arXiv:2012.07421 (2020).", + "[LA19] Zhiyuan Li and Sanjeev Arora. An Exponential Learning Rate Schedule for Deep Learning. 2019.", + "[LeC98] Yann LeCun. \"The MNIST database of handwritten digits\". In: Technical report. 1998.", + "[LFX+24] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. \"Deepseek-v3 technical report\". In: arXiv preprint arXiv:2412.19437. 2024.", + "[LHV+23] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. \"The flan collection: Designing data and methods for effective instruction tuning\". In: International Conference on Machine Learning. PMLR. 2023, pp. 22631-22648.", + "[LIE+22] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/. 2022.", + "[LKY22] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. \"Indiscriminate Data Poisoning Attacks on Neural Networks\". In: arXiv preprint arXiv:2204.09092 (2022).", + "[LY23] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. \"Exploring the limits of model-targeted indiscriminate data poisoning attacks\". In: International Conference on Machine Learning. PMLR. 2023, pp. 22856-22879.", + "[LM20] Guillaume Leclerc and Aleksander Madry. \"The two regimes of deep network training\". In: arXiv preprint arXiv:2002.10376. 2020.", + "[LMB+14] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C Lawrence Zitnick. \"Microsoft coco: Common objects in context\". In: European conference on computer vision (ECCV). 2014.", + "[LSY18] Hanxiao Liu, Karen Simonyan, and Yiming Yang. \"Darts: Differentiable architecture search\". In: arXiv preprint arXiv:1806.09055 (2018).", + "[LVD20] Jonathan Lorraine, Paul Vicol, and David Duvenaud. \"Optimizing millions of hyperparameters by implicit differentiation\". In: International conference on artificial intelligence and statistics. PMLR. 2020, pp. 1540-1552.", + "[MDA15] Dougal Maclaurin, David Duvenaud, and Ryan Adams. \"Gradient-based hyperparameter optimization through reversible learning\". In: International conference on machine learning (ICML). 2015." + ], + "bbox": [ + 112, + 89, + 883, + 905 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[MRK+13] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. \"Fine-grained visual classification of aircraft\". In: arXiv preprint arXiv:1306.5151 (2013).", + "[MS21] Paul Micaelli and Amos J Storkey. \"Gradient-based hyperparameter optimization over long horizons\". In: Advances in Neural Information Processing Systems 34 (2021), pp. 10798-10809.", + "[Nau08] Uwe Naumann. \"Optimal Jacobian accumulation is NP-complete\". In: Math. Program. 112.2 (Apr. 2008), pp. 427-441. ISSN: 0025-5610.", + "[NWC+11] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Baolin Wu, Andrew Y Ng, et al. \"Reading digits in natural images with unsupervised feature learning\". In: NIPS workshop on deep learning and unsupervised feature learning. 2011.", + "[NZ08] Maria-Elena Nilsback and Andrew Zisserman. \"Automated flower classification over a large number of classes\". In: 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing. 2008.", + "[OR00] James M Ortega and Werner C Rheinboldt. Iterative solution of nonlinear equations in several variables. SIAM, 2000.", + "[Pag18] David Page. CIFAR-10 Fast. GitHub Repository. Oct. 2018. URL: https://github.com/davidcpage/cifar10-fast.", + "[Pea96] Barak A Pearlmutter. \"An investigation of the gradient descent process in neural networks\". In: PhD thesis, Carnegie Mellon University. 1996.", + "[PGI+23] Sung Min Park, Kristian Georgiev, Andrew Ilyas, Guillaume Leclerc, and Aleksander Madry. \"TRAK: Attributing Model Behavior at Scale\". In: *Arxiv preprint arXiv:2303.14186*. 2023.", + "[PVZ+12] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. \"Cats and dogs\". In: 2012 IEEE conference on computer vision and pattern recognition. IEEE. 2012, pp. 3498-3505.", + "[RDK+22] William A Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. \"The dollar street dataset: Images representing the geographic and socioeconomic diversity of the world\". In: Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2022.", + "[RFK+19] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and Sergey Levine. \"Meta-learning with implicit gradients\". In: Advances in neural information processing systems 32 (2019).", + "[RKH+21] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. \"Learning transferable visual models from natural language supervision\". In: arXiv preprint arXiv:2103.00020. 2021.", + "[RLZ+24] Vikram V Ramaswamy, Sing Yu Lin, Dora Zhao, Aaron Adcock, Laurens van der Maaten, Deepti Ghadiyaram, and Olga Russakovsky. \"GeoDE: a geographically diverse evaluation dataset for object recognition\". In: Advances in Neural Information Processing Systems. 2024.", + "[RRS+19] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. \"Do ImageNet Classifiers Generalize to ImageNet?\" In: International Conference on Machine Learning (ICML). 2019.", + "[SGB+22] Damien Scieur, Gauthier Gidel, Quentin Bertrand, and Fabian Pedregosa. \"The curse of un-rolling: Rate of differentiating through optimization\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 17133–17145.", + "[SN17] Leslie N. Smith Smith and Topin Nicholay. \"Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates\". In: ArXiv preprint arXiv:1708.07120. 2017.", + "[SRR+22] Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. “Beyond the imitation game: Quantifying and extrapolating the capabilities of language models”. In: arXiv preprint arXiv:2206.04615 (2022).", + "[SSS+11] Johannes Stallkamp, Marc Schlipsing, Jan Salmen, and Christian Igel. \"The German traffic sign recognition benchmark: a multi-class classification competition\". In: The 2011 international joint conference on neural networks. 2011." + ], + "bbox": [ + 112, + 89, + 913, + 912 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[SSS+22] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. \"Challenging big-bench tasks and whether chain-of-thought can solve them\". In: arXiv preprint arXiv:2210.09261 (2022).", + "[TGZ+23] Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford Alpaca: An Instruction-following LLaMA model. https://github.com/tatsu-lab/stanford_alpaca.2023.", + "[TMH+24] Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Riviere, Mihir Sanjay Kale, Juliette Love, et al. \"Gemma: Open models based on gemini research and technology\". In: arXiv preprint arXiv:2403.08295 (2024).", + "[TSF+16] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. \"YFCC100M: The New Data in Multimedia Research\". In: Communications of the ACM (2016).", + "[VLW+18] Bastiaan S Veeling, Jasper Linmans, Jim Winkens, Taco Cohen, and Max Welling. \"Rotation equivariant CNNs for digital pathology\". In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2018: 21st International Conference, Granada, Spain, September 16-20, 2018, Proceedings, Part II 11. 2018.", + "[Web24] Team Webdataset. webdataset. 2024. URL: https://www.github.com/webdataset/webdataset.", + "[Wer90] Paul J Werbos. \"Backpropagation through time: what it does and how to do it\". In: Proceedings of the IEEE 78.10 (1990), pp. 1550-1560.", + "[WGX+19] Haohan Wang, Songwei Ge, Eric P Xing, and Zachary C Lipton. \"Learning robust global representations by penalizing local predictive power\". In: Neural Information Processing Systems (NeurIPS) (2019).", + "[WWS+22] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. \"Chain-of-thought prompting elicits reasoning in large language models\". In: Advances in neural information processing systems 35 (2022), pp. 24824-24837.", + "[XHE+10] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. \"Sun database: Large-scale scene recognition from abbey to zoo\". In: Computer Vision and Pattern Recognition (CVPR). 2010.", + "[XMG+24] Mengzhou Xia, Sadhika Malladi, Suchin Gururangan, Sanjeev Arora, and Danqi Chen. \"Less: Selecting influential data for targeted instruction tuning\". In: arXiv preprint arXiv:2402.04333 (2024).", + "[YLH+14] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. \"From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions\". In: Transactions of the Association for Computational Linguistics. 2014.", + "[ZP00] Geoffrey Zweig and Mukund Padmanabhan. \"Exact alpha-beta computation in logarithmic space with application to MAP word graph construction\". In: Sixth International Conference on Spoken Language Processing, ICSLP 2000 / INTERSPEECH 2000, Beijing, China, October 16-20, 2000. ISCA, 2000, pp. 855-858. DOI: 10.21437/ICSLP.2000-404. URL: https://doi.org/10.21437/ICSLP.2000-404.", + "[ZPK+19] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. \"A large-scale study of representation learning with the visual task adaptation benchmark\". In: arXiv preprint arXiv:1910.04867. 2019.", + "[ZSP+21] Miao Zhang, Steven W Su, Shirui Pan, Xiaojun Chang, Ehsan M Abbasnejad, and Reza Haffari. \"idarts: Differentiable architecture search with stochastic implicit gradients\". In: International Conference on Machine Learning. PMLR. 2021, pp. 12557-12566." + ], + "bbox": [ + 114, + 89, + 898, + 871 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 934, + 508, + 946 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "A Calculating metagradients with REPLAY", + "text_level": 1, + "bbox": [ + 112, + 88, + 584, + 109 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "This appendix contains supplementary material for Section 2. We describe two algorithms in detail: stepwise AD, and our own algorithm REPLAY. Refer to Section 2 for the notation used in this appendix.", + "bbox": [ + 111, + 119, + 883, + 151 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "A.1 Warmup: Step-wise AD", + "text_level": 1, + "bbox": [ + 112, + 170, + 379, + 188 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We fully describe step-wise AD in Algorithm 2. The algorithm requires storing all $T$ optimizer states, but requires constant memory overhead for each AD call (as each AD call is over a single step), making it feasible to compute for small setups.", + "bbox": [ + 111, + 195, + 883, + 242 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 2: metagradients in $\\mathcal{O}(T)$ space." + ], + "code_body": "1 // Store each optimizer state on disk \n2 $\\{s_i\\}_{i=0}^T \\leftarrow$ Train model via $A(z)$ \n3 \n4 // Variables; shorthand for $\\frac{\\partial f(z)}{\\partial z}$ and $\\frac{\\partial f(z)}{\\partial s_T}$ \n5 $\\bar{z} \\gets 0$ \n6 $\\bar{s}_T \\leftarrow \\frac{\\partial g(s_T)}{\\partial s_T} \\quad //$ One reverse-mode AD call \n7 \n8 // Reverse-mode differentiate step-by-step \n9 for $s_i \\gets s_{T-1}$ to $s_0$ do \n10 // One reverse-mode AD call. Left: $\\nabla_{s_i}f$ . Right: contribution to $\\nabla_{z}f$ at $i$ . \n11 $\\bar{s}_i \\gets \\bar{s}_{i+1} \\cdot \\frac{\\partial h_i(s_i, z)}{\\partial s_i}, \\quad \\bar{z}_i \\gets \\bar{s}_{i+1} \\cdot \\frac{\\partial h_i(s_i, z)}{\\partial z}$ \n12 \n13 $\\bar{z} \\gets \\bar{z} + \\bar{z}_i \\quad //$ Accumulate metagradient \n14 \n15 Return $\\bar{z}$", + "guess_lang": "txt", + "bbox": [ + 116, + 280, + 830, + 523 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "A.2 REPLAY", + "text_level": 1, + "bbox": [ + 112, + 566, + 236, + 580 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We now describe REPLAY, our method for calculating metagradients. For a free parameter $k \\in \\mathbb{N}$ , REPLAY requires storing $\\mathcal{O}(k\\log_k(T))$ optimizer states and an additional $\\mathcal{O}(\\log_k(T))$ factor of computation. The free parameter $k$ controls the trade-off between storage and required compute. We fully describe REPLAY in Algorithm 3. REPLAY modifies Algorithm 2 by retrieving the optimizer states in reverse order using a $k$ -ary tree structure in lieu of a list of all the stored states.", + "bbox": [ + 111, + 592, + 883, + 667 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "A.2.1 Lazy $k$ -ary tree", + "text_level": 1, + "bbox": [ + 112, + 686, + 279, + 702 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We now describe the $k$ -ary tree structure that underlies REPLAY; for a visual reference of this tree with $k = 2$ , see Figure 3. For ease of analysis we parameterize the total number of states as $n = T + 1$ (and therefore take $n - 1$ total training steps) when describing this data structure, and assume WLOG that $n$ is an integer power of $k$ . At a high level, traversing this tree recursively replays retraining to recover all the optimizer states in reverse order, while deleting states that are no longer needed. We call this tree \"lazy\" because it retransmits only when required to obtain states that are not yet retrieved.", + "bbox": [ + 111, + 710, + 883, + 801 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The tree is a complete $k$ -ary tree with $n$ leaves (and therefore $\\log_k(n)$ depth) structured as follows. We start at the root, then recursively define the rest of the tree. Every node in the tree represents a single optimizer state. The root represents state $s_0$ . To recursively define the remaining nodes: each non-leaf node $s_i$ at depth $d$ has $k$ equally spaced (in terms of state number) children starting—from left to right—at state $s_i$ and ending at $s_{i+n/k^{d+1}}$ . This means that the leaves correspond—from left to right—to the states $s_0, s_1, \\ldots, s_{n-1}$ .", + "bbox": [ + 111, + 801, + 883, + 893 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We reduce the problem of iterating over the states in reverse to the problem of reverse in-order traversing this tree and yielding just the leaves—this is exactly the states in reverse order. A reverse in-order traversal for this $k$ -ary tree requires repeatedly: recursively traversing child nodes from largest to smallest, then visiting the parent node. We design the specifics of this traversal to maximize space and compute efficiency. To access the children of a parent node at traversal time, we replay model training from the smallest child state (which is stored in the parent state) to the largest child state and store all the children. We perform this operation recursively each time we traverse a node. After traversing the node's left side (i.e., after ascending from this node), we delete all its child states.", + "bbox": [ + 109, + 90, + 887, + 210 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Reverse in-order traversing this tree requires storing at most $k \\log_k(n)$ optimizer states at a time, and in aggregate requires retraining the model $\\log_k(n)$ times. The argument for each is straightforward. Storage: the traversal requires storing at most $k$ states for each level that it descends (we store $k$ states whenever we first traverse to a parent node) and we remove $k$ states for each level that the traversal ascends (we remove $k$ states after we are done with the left traversal of a parent). Compute: we replay training to reinstantiate the children of every parent node a single time. The $k^d$ parent nodes at level $d$ each require replaying $\\mathcal{O}(n / k^d)$ states to reinstantiate children. Therefore, in a traversal, each level requires $\\mathcal{O}(n) (k^d \\cdot n / k^d)$ optimizer steps. There are $\\log_k(n)$ levels with parent nodes, which means a total of $\\mathcal{O}(n \\log_k(n))$ optimizer steps, or a multiplicative factor of $\\mathcal{O}(\\log_k(n))$ steps compared to model training.", + "bbox": [ + 109, + 210, + 883, + 349 + ], + "page_idx": 26 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 3: REPLAY. metagradients in $\\mathcal{O}(k\\log_k(T))$ space." + ], + "code_body": "1 $T\\gets$ Lazy $k$ -ary tree for $\\mathcal{A}(z)$ // Make lazy $k$ -ary tree of Appendix A.2 \n2 \n3 // Variables; shorthand for $\\frac{\\partial f(z)}{\\partial z}$ and $\\frac{\\partial f(z)}{\\partial s_T}$ \n4 $\\bar{z}\\gets 0$ \n5 $\\bar{s}_T\\gets \\frac{\\partial g(s_T)}{\\partial s_T}$ // One reverse-mode AD call \n6 \n7 // Reverse-mode differentiate step-by-step; traverse $T$ instead of stored states \n8 for $s_i\\gets s_{T - 1}$ to $s_0\\in$ reverse_inorder_traversal(T) do \n9 // One reverse-mode AD call. Left: $\\nabla_{s_i}f$ . Right: contribution to $\\nabla_{z}f$ at i. \n10 $\\bar{s}_i\\gets \\bar{s}_{i + 1}\\cdot \\frac{\\partial h_i(s_i,z)}{\\partial s_i},\\quad \\bar{z}_i\\gets \\bar{s}_{i + 1}\\cdot \\frac{\\partial h_i(s_i,z)}{\\partial z}$ \n11 \n12 $\\bar{z}\\gets \\bar{z} +\\bar{z}_i$ // Accumulate metagradient \n13 \n14 Return $\\bar{z}$", + "guess_lang": "txt", + "bbox": [ + 117, + 388, + 830, + 616 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 934, + 508, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "B Smooth Model Training", + "text_level": 1, + "bbox": [ + 112, + 87, + 413, + 109 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "B.1 Omitted Figures", + "text_level": 1, + "bbox": [ + 112, + 119, + 312, + 140 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/af3a8f87efacd4acaa3c56185ccfd5ffdb24bf0c973fc76594255668269e6e61.jpg", + "image_caption": [ + "Figure 11: The factors affecting metasmoothness of training a ResNet-9 on the CIFAR-10 dataset. See §3 for details." + ], + "image_footnote": [], + "bbox": [ + 122, + 157, + 867, + 301 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 934, + 508, + 946 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/ed91df62b477bd91c24737b738d49397bea204557c82454c3f8740aacb9e13e6.jpg", + "image_caption": [ + "Non-smooth (Example #1118)" + ], + "image_footnote": [], + "bbox": [ + 158, + 109, + 485, + 281 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/b1658df434e665ee2b41168eec4c27970ddad44acffd12d60b48a59d7a89737a.jpg", + "image_caption": [ + "Smooth (Example #1118)" + ], + "image_footnote": [], + "bbox": [ + 506, + 109, + 831, + 280 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/49bd3d8ee76b74e6943123a4c519082b91d14235276199ab531f34a03a34cc78.jpg", + "image_caption": [ + "Non-smooth (Example #3349)" + ], + "image_footnote": [], + "bbox": [ + 158, + 313, + 483, + 481 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/719d47ec6f96ce91103c9fde603e154cba7baa02ad50f8e48186c6c41b3a97ff.jpg", + "image_caption": [ + "Smooth (Example #3349)" + ], + "image_footnote": [], + "bbox": [ + 508, + 313, + 831, + 481 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/72ebdc565733f1e246ef1530655df2fd0a0556ed6198f739627b7b58781f7021.jpg", + "image_caption": [ + "Non-smooth (Example #10600)" + ], + "image_footnote": [], + "bbox": [ + 158, + 515, + 483, + 684 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/7fbbbef73dee36330195f6d0fc527ad0a7c1c618fee58e5669d44bef1d5bb793.jpg", + "image_caption": [ + "Smooth (Example #10600)" + ], + "image_footnote": [], + "bbox": [ + 508, + 515, + 831, + 684 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/a5641668a6941e25c895bd575ef0e74375dbeaa1da5fcb13884916b4ecfa5360.jpg", + "image_caption": [ + "Non-smooth (Example #15578)", + "Figure 12: Additional loss landscape visualizations." + ], + "image_footnote": [], + "bbox": [ + 158, + 718, + 483, + 886 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/87b96baf0a2e4bfad4e1c9d8f8d653181ea5e5f71efa5c492a82e9d1324d2dd6.jpg", + "image_caption": [ + "Smooth (Example #15578)" + ], + "image_footnote": [], + "bbox": [ + 508, + 718, + 831, + 886 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "C Metagradients for DataComp", + "text_level": 1, + "bbox": [ + 112, + 87, + 473, + 109 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "This appendix contains pseudocode for the main algorithm used to do dataset selection for DataComp. It also contains additional implementation details on how metagradients were applied to CLIP, and how they were specifically applied to the DataComp setting.", + "bbox": [ + 111, + 119, + 885, + 167 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "C.1 Dataset Selection Using MGD", + "text_level": 1, + "bbox": [ + 112, + 185, + 437, + 204 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "When implementing Algorithm 1, there are several differences from the pseudocode below: firstly, rather than selecting $\\mathbf{m}$ fully randomly every step, we randomly select a shard comprising fraction $p$ of the data and take steps on all datapoints in the shard (see Section C.2). To mitigate overfitting, we also bake a \"minibatch fraction\" $q$ into our model output function $\\phi$ . For example, if $\\phi$ calculates model loss on the ImageNet train set, each time $\\phi$ is called, we randomly sample fraction $q$ of the ImageNet train set to evaluate on.", + "bbox": [ + 111, + 210, + 885, + 303 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Adapting the CLIP loss function to our surrogate learning algorithm. Here, we explain how dataweights are incorporated into the CLIP loss function—the formulation given in Section 4.1 is actually slightly simplified and incorrect, as it does not account for cross terms in the CLIP contrastive loss. As a refresher, we first state the \"vanilla\" CLIP loss function, $\\ell$ , as it is defined in [RKH+21]. Letting $b$ be the batch size and $d$ be the embedding dimension, and $\\mathbf{x}$ be the training batch at timestep $k$ . Recall that the CLIP model internally has two \"submodules\": and image embedder, and a text embedder. We then use these to obtain image embeddings $E_{I} \\in \\mathbb{R}^{b \\times d}$ and text embeddings $E_{T} \\in \\mathbb{R}^{b \\times d}$ from $\\mathbf{x}$ . We then compute the image-wise scores, or logits, for this batch as $S = E_{I}E_{T}^{\\top}^{3}$ . Then, we can define the CLIP loss (as a function of the logits) as", + "bbox": [ + 111, + 320, + 883, + 453 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\nL (S) = \\frac {1}{2} (L _ {I} (S) + L _ {T} (S)),\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 453, + 596, + 482 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "where $L_{I}$ and $L_{T}$ are row-wise and column-wise cross-entropy losses, respectively:", + "bbox": [ + 111, + 487, + 712, + 503 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\nL _ {I} (S) = \\sum_ {i = 1} ^ {b} \\log \\left(\\frac {\\exp (S _ {i , i})}{\\sum_ {j = 1} ^ {b} \\exp (S _ {i , j})}\\right), \\quad L _ {T} (S) = \\sum_ {i = 1} ^ {b} \\log \\left(\\frac {\\exp (S _ {i , i})}{\\sum_ {j = 1} ^ {b} \\exp (S _ {j , i})}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 513, + 756, + 556 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We now wish to relax $L$ into a new function $L'$ that supports an additional input $\\mathbf{z} \\in \\mathbb{R}^n$ , where $\\frac{\\partial L'}{\\partial \\mathbf{z}}$ resembles the metagradients with respect to dataweights. In order to do this, we imagine expanding passing the entire dataset $D$ into our embedder to obtain $E_I'$ and $E_{T'}'$ and take our new logits $S' = E_I'E_T'^{\\top} \\in \\mathbb{R}^{n \\times n}$ .", + "bbox": [ + 111, + 568, + 882, + 614 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "There are some additional key conditions our relaxation $L'$ should satisfy. Particularly: when $\\mathbf{z} = \\mathbf{0}_n$ , we should recover the normal CLIP loss $L$ , and when $\\mathbf{z}$ is all 0's except for a single entry $i$ , $L'$ should act as if $i$ had been appended to the original batch $\\mathbf{x}$ . In addition, $L'$ should always have meaningful partials with respect to $\\mathbf{z}$ , even when some values in $\\mathbf{z}$ are 0.", + "bbox": [ + 111, + 614, + 883, + 674 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Letting $\\mathbf{1}_{i = j}$ and $\\mathbf{1}_{i\\neq j}$ be indicator variables and letting $\\mathbf{1}_k\\in \\{0,1\\} ^n$ be the indicator vector for the $k$ -th batch, we find that the definition", + "bbox": [ + 111, + 675, + 883, + 705 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\nL ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) = L _ {I} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) + L _ {T} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 705, + 612, + 722 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 112, + 729, + 161, + 742 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\nL _ {I} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) = \\sum_ {i = 1} ^ {n} \\left(z _ {i} + \\left(\\mathbf {1} _ {k}\\right) _ {i}\\right) \\log \\left(\\frac {\\exp \\left(S _ {i , i} ^ {\\prime}\\right)}{\\sum_ {j = 1} ^ {n} \\exp \\left(S _ {i , j} ^ {\\prime}\\right) \\left(\\mathbf {1} _ {i = j} + \\mathbf {1} _ {i \\neq j} \\left(z _ {j} + \\left(\\mathbf {1} _ {k}\\right) _ {j}\\right)\\right)}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 739, + 751, + 781 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 112, + 787, + 145, + 800 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\nL _ {T} ^ {\\prime} (S ^ {\\prime}, \\mathbf {z}) = \\sum_ {i = 1} ^ {b} \\left(z _ {i} + \\left(\\mathbf {1} _ {k}\\right) _ {i}\\right) \\log \\left(\\frac {\\exp \\left(S _ {i , i} ^ {\\prime}\\right)}{\\sum_ {j = 1} ^ {n} \\exp \\left(S _ {j , i} ^ {\\prime}\\right) \\left(\\mathbf {1} _ {i = j} + \\mathbf {1} _ {i \\neq j} \\left(z _ {j} + \\left(\\mathbf {1} _ {k}\\right) _ {j}\\right)\\right)}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 797, + 753, + 839 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "satisfy these conditions.", + "bbox": [ + 112, + 844, + 290, + 859 + ], + "page_idx": 29 + }, + { + "type": "page_footnote", + "text": "3The CLIP model scales these logits by a temperature parameter $\\tau$ before applying the softmax. While we omit $\\tau$ in our definitions, it can be easily incorporated. All our experiments use temperature scaling.", + "bbox": [ + 111, + 869, + 883, + 896 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Finally, we let define the loss for the entire batch $\\ell'$ as a function of $\\mathbf{z}$ and model parameters $\\theta$ which outputs the loss calculated according to $L'$ above. To summarize, letting $\\mathbf{x}^{(t)}$ denote the $t$ -th training batch, the loss function $\\ell_t$ at step $t$ of our surrogate learning algorithm $\\mathcal{A}'$ for CLIP training is:", + "bbox": [ + 109, + 90, + 885, + 138 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {t} ^ {\\prime} (\\theta) := \\left\\{ \\begin{array}{l l} \\ell (\\mathbf {x} ^ {(t)}; \\theta) & \\text {i f} t \\neq k \\\\ \\ell^ {\\prime} (\\mathbf {z}; \\theta) & \\text {i f} t = k. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 147, + 606, + 188 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "We find that this empirically works well for obtaining meaningful metagradients with respect to dataweights in the CLIP setting, and yields to strong dataset selection results.", + "bbox": [ + 109, + 194, + 888, + 226 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "C.2 Scaling MGD for CLIP and DataComp", + "text_level": 1, + "bbox": [ + 109, + 243, + 511, + 263 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "MGD is highly scalable, allowing it to be applied to large-scale settings like training CLIP models. In particular, computing metagratings is only up to a constant factor more expensive than training a model normally. Here, we outline challenges we faced in scaling MGD in this setting, and how they were resolved. Specifically, we will explain how we efficiently calculated metagratings for CLIP models and efficiently tracked/shuffled our dataset selection from step-to-step despite its large storage footprint.", + "bbox": [ + 109, + 268, + 883, + 347 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Computing metagradient. Due to the large batch size used in the CLIP contrastive loss, we implement manual gradient checkpointing to make the operations computationally feasible on our hardware. The most memory-intensive operation are model forward passes (and its gradients): obtaining the image and label embeddings given raw pixel data and tokens. So, we manually make gradient checkpoints before this operation, allowing us to run the embedder in minibatches to avoid memory issues. This setup also naturally lends itself to parallelization across multiple GPU's, which we make use of to further speed up our computations.", + "bbox": [ + 109, + 363, + 883, + 470 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Loading, writing, and storing data. Due to the data-intensive nature of training large models like CLIP and our need to frequently produce new datasets at each optimization step, we found that using the web-dataset [Web24] format given by DataComp was restrictively slow. To circumvent this, we rewrote all data following the format of FFCV [LIE+22], allowing us to load and write data much faster. Specifically, we divided the entire candidate pool into 8 base shards. Once we trained a model, we choose one of the 8 shards, compute metagradient corresponding to all datapoints in the shard, take a gradient step on them, and rewrite the shard. This roughly corresponds to $p = \\frac{1}{8}$ in Algorithm 1, which we empirically worked well for optimizing. In following steps, we always choose one of the 8 original shards to calculate metagradient for—this ensures that points removed from the dataset in some optimization step can return if they have a negative metagradient.", + "bbox": [ + 109, + 487, + 883, + 638 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "We also observed that always stepping on the sign causes the sizes of the shards to grow over time: stepping based on the sign of the metagradient does not decrease the weight on a positive-weight datapoint if its dataweight is already 0, so our steps are biased towards increasing the size of the shards. To combat this blowup, after some number of optimization steps, we choose a fixed shard size and enforce that subsequent steps must not change the size of the shards—the step size thereafter is controlled by hyperparameter $q$ representing the fraction of datapoints in a shard which are incremented. We experimented both with randomly sampling which points are added or removed, and stepping on the datapoints with the top $q$ and bottom $q$ metagradient; the latter seems to give empirically better performance.", + "bbox": [ + 109, + 640, + 883, + 761 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To maintain randomness during shuffling, we implement an 8-way dataloader which would shuffle all 8 shards individually. Then, to sample a batch of $b$ datapoints, we would sample $b / 8$ datapoints from each shard and concatenate them to fill our batch. This works better than simply sampling our entire batch from a single shard, as (especially in later optimization steps) shards may contain a high number of duplicate datapoints, which causes CLIP's contrastive loss function to misbehave if they appear in the same batch.", + "bbox": [ + 109, + 761, + 880, + 835 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To minimize disk space used, old shards can be deleted once they become \"stale\". Specifically, if shard s is rewritten into shard $s'$ , all future optimization steps will never read s again, and s can safely be deleted. Thus, when running MGD for a large number of steps and potentially rewriting each shard multiple times, the total disk space used by our algorithm is constant in the number of steps we take: it stores the 8 most recently written shards on disk at any given time, and any other shards are deleted to save space.", + "bbox": [ + 109, + 835, + 883, + 912 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "C.3 Details Pertaining to the DataComp Benchmark", + "text_level": 1, + "bbox": [ + 112, + 89, + 591, + 108 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Setting. We provide a brief summary of the DataComp competition here, and we refer readers to the original paper [GIF+24]. DataComp is a framework to compare different training dataset selection techniques. Participants submit a training dataset (which, for our purposes, is a subset of a larger dataset), upon which a CLIP model is trained from scratch with a fixed learning algorithm, model architecture, and number of training steps. We focus on DataComp-small, which has a candidate pool of 12.8 million samples. The number of training steps in this case is also fixed at 12.8 million samples.", + "bbox": [ + 109, + 114, + 883, + 205 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We try to match the optimization hyperparameters enforced by DataComp as closely as possible. As a refresher, our ADAM[KB15] update step can be written as", + "bbox": [ + 111, + 205, + 883, + 236 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {t + 1} = - \\alpha_ {t} \\cdot \\left(m _ {t} / \\left(\\sqrt {v _ {t} + \\varepsilon_ {\\mathrm {r o o t}}} + \\varepsilon\\right) + \\lambda \\theta_ {t}\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 247, + 883, + 267 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "where $m_{t}$ and $v_{t}$ are running estimates of the first and second moments of the gradients, respectively, $\\lambda$ represents weight decay, $\\alpha$ represents the learning rate, and $\\varepsilon$ and $\\varepsilon_{\\mathrm{root}}$ are hyperparameters to avoid blowup. Our training hyperparameters can be found in Table 1 and are identical to those mandated by DataComp-small, aside from a positive $\\varepsilon_{\\mathrm{root}}$ added for numerical stability. The values of $\\varepsilon_{\\mathrm{root}}$ and $k$ (the step at which metagradients are calculated) were chosen to empirically maximize metasmoothness.", + "bbox": [ + 109, + 276, + 883, + 351 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/1d53986854c1c4e6619d16a37b54e29fc0b88bdf3520d589c05111d30ec1c831.jpg", + "table_caption": [ + "Table 1: Hyperparameters for the CLIP DataComp experiments." + ], + "table_footnote": [], + "table_body": "
HyperparameterValue
DataComp Scalesmall
ModelViT-B/32
Train compute (MACs)9.5 × 1016
Pool size12.8M
# samples seen12.8M
Batch size4096
Training batches3125
k2800
Learning rate5 × 10-4
AdamW β10.9
AdamW β20.98
AdamW εroot1 × 10-17
Warmup500
", + "bbox": [ + 354, + 391, + 635, + 623 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Our experiments are also run on an incomplete subset of the entire DataComp candidate pool. DataComp did not store the raw image and text files when assembling their dataset; they only stored a list of URL's to download data from. Due to the nature of the internet, for various reasons, some of these URL's no longer point to the same data (or no longer point to any data at all). Thus, after ignoring these broken links, our candidate pool is only around $80\\%$ of the size of the original DataComp candidate pool when it was collected in 2023. All our results are obtained by running our methods on this subset of the DataComp pool.", + "bbox": [ + 109, + 637, + 883, + 743 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Evaluation tasks. In order to ensure that our method is truly improving trained models' performances on the entire target distribution and not overfitting to the target set, for each of the 38 evaluation tasks used by DataComp, we attempted to separately create a disjoint target and validation set (DataComp only creates test sets for each task). Thus, metagradients were computed on the target sets and model performance was evaluated on the validation set, before submitting with the official DataComp script and evaluating on the test sets. This ensures that our method's generalization ability is being evaluated, and we are not overfitting to our target set.", + "bbox": [ + 109, + 762, + 883, + 867 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "For various reasons, creating target splits was not possible for all 38 tasks; we summarize our setup in Table 2.", + "bbox": [ + 111, + 868, + 882, + 897 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/bbd757943fe4c1860ea2472611269a98d62befd2854367d7ed52cbad920bad35.jpg", + "table_caption": [ + "Table 2: All DataComp evaluation tasks. The \"Target set\" column refers to whether metagradients were taken on the target set corresponding to this dataset." + ], + "table_footnote": [], + "table_body": "
DatasetTaskTest sizeTrain sizeVal sizeMain metricTarget set
Caltech-101 [FFP04]Object recognition60852754306mean per class
CIFAR-10 [Kri09]Visual recognition10000450005000accuracy
CIFAR-100 [Kri09]Visual recognition10000450005000accuracy
CLEVR Counts [JHV+17; ZPK+19]Counting15000650005000accuracy
CLEVR Distance [JHV+17; ZPK+19]Distance prediction15000650005000accuracy
Country211 [RKH+21; TSF+16]Geolocation21100379804220accuracy
DTD [CMK+14]Texture classification18803384376accuracy
EuroSAT [HBD+19; ZPK+19]Satellite imagery recognition5400194402160accuracy
FGVC Aircraft [MRK+13]Aircraft recognition33336001666mean per class
Food-101 [BGV14]Food recognition25250707505000accuracy
GTSRB [SSS+11]Traffic sign recognition12630352893920accuracy
ImageNet 1k [DDS+09]Visual recognition5000012761675000accuracy
ImageNet Sketch [WGX+19]Visual recognition50889N/AN/Aaccuracy*
ImageNet V2 [RKS+19]Visual recognition10000N/AN/Aaccuracy*
ImageNet-A [HZB+19]Visual recognition7500N/AN/Aaccuracy*
ImageNet-O [HZB+19]Visual recognition2000N/AN/Aaccuracy*
ImageNet-R [HBM+20]Visual recognition30000N/AN/Aaccuracy*
KITTI distance [GLU12; ZPK+19]Distance prediction711N/AN/Aaccuracy
MNIST [LeC98]Digit recognition10000550005000accuracy
ObjectNet [BMA+19]Visual recognition18574N/AN/Aaccuracy*
Oxford Flowers-102 [NZ08]Flower recognition61491836204mean per class
Oxford-IIIT Pet [PVZ+12; ZPK+19]Pet classification36693312368mean per class
Pascal VOC 2007 [EVW+10]Object recognition14976140961566accuracy
PatchCamelyon [VLW+18; ZPK+19]Metastatic tissue cls.327682899125000accuracy
Rendered SST2 [ZPK+19]Sentiment classification18217013779accuracy
RESISC45 [CHL17; ZPK+19]Satellite imagery recognition6300226802520accuracy
Stanford Cars [KSD+13]Vehicle recognition80417329814accuracy
STL-10 [CNL11]Visual recognition80004500500accuracy
SUN-397 [XHE+10]Scene recognition108753N/AN/Aaccuracy
SVHN [NWC+11; ZPK+19]Digit recognition26032682575000accuracy
iWildCam [BAC+21; KSM+20]Animal recognition427911470845000macro F1 score
Camelyon17 [BGM+18; KSM+20]Metastatic tissue cls.850543659005000accuracy
FMoW [CFW+18; KSM+20]Satellite imagery recognition221081032615000worst-region acc.
Dollar Street [RDK+22]Object recognition3503138421537worst-income top-5 acc.
GeoDE [RLZ+24]Object recognition12438444884943worst-region acc.
Flickr30k [YLH+14]Image and text retrieval31014N/AN/AR@1§
MSCOCO [LMB+14]Image and text retrieval5000N/AN/AR@1§
WinoGAViL [BBY+22]Commonsense association3563N/AN/AJaccard score§
", + "bbox": [ + 114, + 132, + 883, + 542 + ], + "page_idx": 32 + }, + { + "type": "page_footnote", + "text": "*No train or val set exists for this dataset, so we were unable to create disjoint target and val sets.", + "bbox": [ + 130, + 861, + 691, + 875 + ], + "page_idx": 32 + }, + { + "type": "page_footnote", + "text": "We were unable to use this dataset due to technical difficulties.", + "bbox": [ + 132, + 875, + 501, + 883 + ], + "page_idx": 32 + }, + { + "type": "page_footnote", + "text": "Both the train and val sets were used by DataComp to make their test set, so we were unable to create disjoint target and val sets.", + "bbox": [ + 132, + 886, + 880, + 898 + ], + "page_idx": 32 + }, + { + "type": "page_footnote", + "text": "${}^{S}$ Retrieval tasks were not used for metagradients.", + "bbox": [ + 132, + 898, + 419, + 911 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "D Selecting IFT data", + "text_level": 1, + "bbox": [ + 112, + 87, + 356, + 109 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "In this section, we describe the details of the IFT setting of Xia et al. [XMG+24], as well as the details of our method.", + "bbox": [ + 109, + 119, + 883, + 151 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Setting. The setting contains a fixed data pool: instruction fine-tuning data from a data pool consisting of four combined IFT datasets (cf. Table 4 and Xia et al. [XMG+24] for more information). The goal is to select the data that yields the best possible task performance for a LoRA fine-tuning run. We adapt a LoRA to a Gemma-2B model (the pretraining-only Gemma-2B model) using the LoRA configuration from Xia et al. [XMG+24].", + "bbox": [ + 109, + 167, + 883, + 247 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Data splits. See Table 3 for a description of the available data for each task, along with the task setup details. Xia et al. [XMG+24] constructed these extra samples by drawing from the ICL samples given in the tasks originally. Note that we drop TydiQA from the original work of Xia et al. [XMG+24] as there are not enough samples to select with (there is only one from each category, for a total of 7).", + "bbox": [ + 109, + 263, + 883, + 327 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Method. We execute Algorithm 1 with $k$ as 150 steps from the end of training and the Bernoulli parameter $p$ controlling the step size as 0.2. At each step, we choose a \"minibatch\" with a size equal to half the target set and a quarter of the target set for BBH and MMLU, respectively (that is, we only select to optimize performance on a fraction of the target set at a time). We model select over iterates and hyperparameters by (a) choosing the top three steps in terms of validation loss for each run (b) selecting the best one in terms of full train set accuracy (including the part that we trained on). We perform this procedure—akin to Pareto optimization [JS08]—because the validation set is so small (as the overall set of samples is very small) that it is difficult to select models without overfitting otherwise.", + "bbox": [ + 109, + 343, + 883, + 464 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "We compare with two baselines: training on the full dataset (i.e., training on the entirety of all the data for a single epoch), and LESS (we use the data selected according to \"LESS-T\" [XMG+24], following the recommendation of 4 epochs).", + "bbox": [ + 109, + 464, + 883, + 508 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For model training, we train with ADAM ( $\\beta_{1} = 0.95$ , $\\beta_{2} = 0.975$ , decoupled weight decay as $10^{-5}$ ) and a one-cycle linear schedule starting at $10^{-6}$ of the maximum learning rate, reaching the peak over $25\\%$ of training, then ending at 0.1 of the maximum learning rate. We insert a positive $\\varepsilon_{\\mathrm{root}}$ into the inverse square root term in the ADAM update to prevent metagradient (and to a lesser extent update) blowup (see Eq. 12). The model training is the same across selected data, except that we use $\\varepsilon_{\\mathrm{root}} = 10^{-7}$ for MGD-selected data and $\\varepsilon_{\\mathrm{root}} = 10^{-9}$ for the other runs (we select the optimal parameter for each class of method). We additionally hyperparameter select for the best learning rate across each baseline by minimizing validation set loss; LESS performs best with a smaller learning rate (0.00024 for BBH and 0.00012 for MMLU) than training on the full dataset or with MGD (0.0006 for both). We normalize the loss of each training sample by taking the mean across predicted tokens during training, and do not divide by the batch size (important for scaling the $\\varepsilon_{\\mathrm{root}}$ term, but otherwise ADAM is invariant to the scale).", + "bbox": [ + 109, + 508, + 883, + 676 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Selecting smooth model training for MGD. For MGD runs, we jointly select learning rate and $\\varepsilon_{\\mathrm{root}}$ using the smoothness metric of Section 3. We find that the choice of $\\varepsilon_{\\mathrm{root}}$ term is important (just as the choice of $\\varepsilon$ is important in standard ADAM training); choosing a much larger term results in non-smooth training. We also find that metagradients are sensitive to learning rate schedule; choosing a much larger or smaller maximum learning rate results in non-smooth training.", + "bbox": [ + 109, + 694, + 883, + 772 + ], + "page_idx": 33 + }, + { + "type": "table", + "img_path": "images/0ee01eb4fa01b3734087808afd61cf8ee91b915d35fec2c214b7e2bfd450099d.jpg", + "table_caption": [ + "Table 3: Overview of datasets used in IFT dataset selection (from Xia et al. [XMG+24])." + ], + "table_footnote": [], + "table_body": "
Dataset# Shot# Tasksn_targetn_valn_testAnswer TypeType of Task
MMLU5575722818,721Letter optionsKnowledge/Recall
BBH3232346920COT and answerReasoning
", + "bbox": [ + 153, + 811, + 844, + 877 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 33 + }, + { + "type": "table", + "img_path": "images/ff814a44df13818d805fb28d14ad88facd4bb1f5a2e442555905552236576ed4.jpg", + "table_caption": [ + "Table 4: Details of IFT training datasets." + ], + "table_footnote": [], + "table_body": "
Dataset# InstanceSourced fromPrompt Len.Completion Len.
FLAN V2100,000NLP datasets and human-written instructions355.731.2
CoT100,000NLP datasets and human-written CoTs26653.2
Dolly15,011Human-written from scratch118.191.3
Open Assistant 155,668Human-written from scratch34.8212.5
", + "bbox": [ + 114, + 114, + 950, + 210 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "IFT results", + "text_level": 1, + "bbox": [ + 112, + 234, + 199, + 248 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/ab6a03663afe6a2c4a405104df1bd0065c08dd9c6f6cb7f26c3a6a34d3c3783e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 138, + 332, + 491, + 520 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/36df9944129f621ff0e1ecdf764695eb6b36f0e8d3eaba57ccb6745917a7a037.jpg", + "image_caption": [ + "Figure 13: MGD dataset selection improves the validation loss over metagradient steps, demonstrating our method's efficacy. However, the gap between loss on samples MGD directly optimizes on and the validation samples widens over the number of iterates, and there is overfitting depending on the number of steps taken." + ], + "image_footnote": [], + "bbox": [ + 501, + 329, + 854, + 518 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 934, + 509, + 946 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "E Accuracy-degrading data poisoning", + "text_level": 1, + "bbox": [ + 112, + 88, + 537, + 109 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "E.1 Background on Gradient Cancelling attack", + "text_level": 1, + "bbox": [ + 112, + 119, + 545, + 140 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "We briefly review the Gradient Cancelling attack [LKY23] used as a baseline in our experiments. We refer the reader to the original paper for details. Here we highlight the key ideas.", + "bbox": [ + 111, + 146, + 883, + 176 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "At a high level: Gradient Cancelling (GC) explicitly aims at making a specific malicious parameter configuration reachable through retraining on the poisoned dataset. The attack operates in two phases:", + "bbox": [ + 111, + 176, + 883, + 209 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Parameter Generation: The attacker generates a target malicious model parameter independently, often using a direct parameter corruption method like Gradient-based Parameter Corruption (GradPC) [LKY23]. The end result of this phase is a target model parameter $\\theta_{p}$ that achieves low accuracy on the test set, but is close to the original parameter $\\theta_0$ derived from training on the clean dataset.", + "2. Poison Data Crafting: In the second phase, GC finds values of the poison data that induce a near-zero gradient at the target parameter $\\theta_{p}$ . This is achieved by solving a gradient cancellation optimization problem: specifically, GC minimizes the total gradient of the loss function (with respect to the model parameters) evaluated over the combined (clean and poisoned) dataset, aiming to ensure that the gradient at the malicious parameter $\\theta_{p}$ approaches zero." + ], + "bbox": [ + 130, + 219, + 883, + 367 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "E.2 Metasmooth hyperparameters", + "text_level": 1, + "bbox": [ + 112, + 385, + 433, + 405 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/b60c76d8a268a432841f42e53cfdbdc3bf20196ce9a9c75b40fd38fb39467449.jpg", + "table_caption": [ + "Table 5: Hyperparameters used in the ResNet-9 [Jor24] CIFAR-10 poisoning experiments. The augmentations used are normalization, random horizontal flip, and random translate (2 pixels)" + ], + "table_footnote": [], + "table_body": "
HyperparameterValue
Learning rate0.5
β10.85
Weight decay10-5
Exclude BatchNormTrue
OptimizerSGD
Batch size250
Epochs18
Starting learning rate fraction0.5
Relative min. learning rate10000
Scheduler max. iterations50000
Nesterov momentumTrue
BatchNorm ε10-5
BatchNorm momentum0.5
Final biasTrue
Width multiplier2.0
Final scale0.125
Initial scale2.0
Batchnorm locationBefore activation
Activation functionGELU
Pooling typeAverage
Test-time augmentationTrue
", + "bbox": [ + 308, + 468, + 687, + 820 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 935, + 509, + 948 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "F LR optimization", + "text_level": 1, + "bbox": [ + 112, + 87, + 330, + 109 + ], + "page_idx": 36 + }, + { + "type": "table", + "img_path": "images/526431798295effe5a381666eea095e355d33e12f099452c4e2cca80fa48e073.jpg", + "table_caption": [ + "Table 6: The grid search was run over all 528 combinations of the hyperparameter values below." + ], + "table_footnote": [], + "table_body": "
ParameterValues
Peak learning rate[7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0]
Initial LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
Final LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
LR peak time[0.25, 0.5, 0.75]
", + "bbox": [ + 225, + 161, + 769, + 256 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/fec6b010bfcae93afb53b0c60c137838b889e707f0941f8e9d65d3df201207b2.jpg", + "image_caption": [ + "MGD step", + "Figure 14: Graphs of our learned LR schedules." + ], + "image_footnote": [], + "bbox": [ + 240, + 309, + 759, + 542 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 36 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_model.json b/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_model.json new file mode 100644 index 0000000000000000000000000000000000000000..98d1f78ab5a75a016e0bbb2c1400fc8c1c0cb56a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_model.json @@ -0,0 +1,5697 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.256, + 0.058, + 0.718 + ], + "angle": 270, + "content": "arXiv:2503.13751v1 [stat.ML] 17 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.17, + 0.136, + 0.828, + 0.165 + ], + "angle": 0, + "content": "Optimizing ML Training with Metagradient Descent" + }, + { + "type": "text", + "bbox": [ + 0.257, + 0.186, + 0.74, + 0.226 + ], + "angle": 0, + "content": "Logan Engstrom\\*1, Andrew Ilyas\\*2†, Benjamin Chen\\*1, Axel Feldmann\\*1, William Moses\\*3, Aleksander Madry\\*1" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.232, + 0.678, + 0.251 + ], + "angle": 0, + "content": "*Equal contribution \\( {}^{1} \\) MIT, \\( {}^{2} \\) Stanford, \\( {}^{3} \\) UIUC" + }, + { + "type": "title", + "bbox": [ + 0.468, + 0.28, + 0.531, + 0.294 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.299, + 0.846, + 0.399 + ], + "angle": 0, + "content": "A major challenge in training large-scale machine learning models is configuring the training process to maximize model performance, i.e., finding the best training setup from a vast design space. In this work, we unlock a gradient-based approach to this problem. We first introduce an algorithm for efficiently calculating metagradient gradients through model training at scale. We then introduce a \"smooth model training\" framework that enables effective optimization using metagradient. With metagradient descent (MGD), we greatly improve on existing dataset selection methods, outperform accuracy-degrading data poisoning attacks by an order of magnitude, and automatically find competitive learning rate schedules." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.421, + 0.291, + 0.44 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.453, + 0.885, + 0.559 + ], + "angle": 0, + "content": "How should I clean my data? What architecture should I use? Training large-scale (i.e., deep) machine learning models entails making many design decisions. When making such decisions, typical practice is to exhaustively search over a small set of standard options. For example, we might try a few well-known data cleaning heuristics, construct a grid over a hyperparameters, and choose the options that yield the best models. However, given that this process explores only a small part of the overall design space (e.g., one can construct \\(2^{n}\\) possible training datasets from a pool of \\(n\\) candidate datapoints), it is unlikely that this approach really yields the optimal training configuration." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.56, + 0.884, + 0.711 + ], + "angle": 0, + "content": "How can we find optimal (or at least, better) training configurations? To do so, we take the optimization perspective on designing model training. From this well-studied perspective, deciding on a training configuration—or as we will call it, a set of metaparameters—is just a high-dimensional optimization problem. The input space of this problem comprises all possible metaparameter choices, including which datapoints to train on, what model architecture to use, and how to initialize model weights. The objective function takes in a set of metaparameters, trains a machine learning model according to those metaparameters, and then returns a target metric evaluated on that model (e.g., test accuracy). From this perspective, any procedure for selecting metaparameters—including the typical practice of grid-searching over standard options—is just an optimization algorithm, whose goal is to maximize the objective function with respect to the (high-dimensional) input." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.71, + 0.882, + 0.801 + ], + "angle": 0, + "content": "Given that selecting metaparameters is \"just\" a high-dimensional optimization problem, a natural tool to consider is the gradient. After all, in many contexts, gradients offer a more effective approach to maximizing high-dimensional functions than grid search. Indeed, for a sufficiently \"well-behaved\" function \\( f(x) \\) with gradient \\( \\nabla f(x) \\), we can optimize \\( f \\) by iteratively updating \\( x \\) in the direction of \\( \\nabla f(x) \\). This insight suggests a generic recipe for selecting metaparameters: first, make the objective differentiable with respect to the metaparameters; second, update via gradient steps." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.801, + 0.882, + 0.862 + ], + "angle": 0, + "content": "Now, the idea of using gradients to search for metaparameters is not new. Indeed, there is a substantial line of work that aims to optimize metaparameters (e.g., architectures, regularizers, or data augmentation schemes) with gradient-based methods [MDA15; LSY18; LVD20]. However, such methods have not managed to scale beyond relatively small settings. This state of affairs prompts our main question:" + }, + { + "type": "text", + "bbox": [ + 0.252, + 0.867, + 0.741, + 0.884 + ], + "angle": 0, + "content": "Can we scalably configure model training using gradient-based methods?" + }, + { + "type": "page_footnote", + "bbox": [ + 0.132, + 0.898, + 0.633, + 0.912 + ], + "angle": 0, + "content": "\\(^{\\dagger}\\)Work done at MIT EECS. Correspondence to {engstrom,ailyas,benchen}@mit.edu." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.093, + 0.368, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.094, + 0.62, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.092, + 0.868, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.297, + 0.884, + 0.329 + ], + "angle": 0, + "content": "Figure 1: Our proto-algorithm, metagradient descent (MGD), uses gradients to achieve state-of-the-art performance across a variety of applications, including data selection and data poisoning." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.353, + 0.289, + 0.369 + ], + "angle": 0, + "content": "1.1 Contributions" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.378, + 0.884, + 0.409 + ], + "angle": 0, + "content": "In this work, we answer this question in the affirmative, adding \"gradient descent on metaparameters\" to the large-scale machine learning toolkit. Along the way, we will face—and address—two main challenges." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.409, + 0.884, + 0.469 + ], + "angle": 0, + "content": "First, existing methods for computing metagradients do not scale. In response, we devise an algorithm, REPLAY, that can take metagradients in large-scale settings. By combining reverse-mode autodifferentiation (AD) with an efficient data structure, REPLAY can calculate exact metagradients for models with billions of parameters and thousands of training steps." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.469, + 0.884, + 0.53 + ], + "angle": 0, + "content": "Second, we find that metagradients of standard training routines are not necessarily helpful for optimization, which we connect to non-smoothness of the metaparameter optimization landscape. Borrowing tools from convex optimization, we devise a framework for designing \"metasmooth\" training routines that do admit helpful metagradients." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.53, + 0.883, + 0.577 + ], + "angle": 0, + "content": "Addressing the challenges above unlocks a simple recipe for solving a broad range of machine learning tasks: (a) frame the task as a continuous optimization problem over metaparameters; (b) design a metasmooth training routine; (c) perform metagradient descent (MGD). Applying this recipe:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.585, + 0.88, + 0.616 + ], + "angle": 0, + "content": "- In the DataComp-small11 competition [GIF+24], we achieve state-of-the-art pre-training data selection for CLIP (2x larger performance improvement than the previous DataComp-small1 leader [Eco24]);" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.625, + 0.881, + 0.671 + ], + "angle": 0, + "content": "- In the context of data selection for instruction tuning (as introduced by Xia et al. [XMG+24]), we substantially improve on data selection for Gemma-2B (outperforming existing selection methods as well as full-data training);" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.68, + 0.881, + 0.728 + ], + "angle": 0, + "content": "- In the accuracy-degrading data poisoning setting (defined by Huber [Hub64] and pioneered by Lu et al. [LKY22] for deep neural networks), we improve attacks on DNNs by an order of magnitude, dropping CIFAR-10 accuracy from \\(92\\% \\rightarrow 78\\%\\) (the best previous attack [LKY23] only reduces accuracy to \\(91\\%\\));" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.736, + 0.881, + 0.767 + ], + "angle": 0, + "content": "- For the task of hyperparameter optimization, we efficiently find a competitive CIFAR-10 learning rate schedule (matching the performance of a schedule found by grid search)." + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.585, + 0.881, + 0.767 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.801, + 0.525, + 0.823 + ], + "angle": 0, + "content": "2 Scalably computing metagradients" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.833, + 0.884, + 0.88 + ], + "angle": 0, + "content": "In this section we present REPLAY, an algorithm for computing metagradients of large-scale iterative ML algorithms. We first detail the setting, then discuss existing approaches to computing metagradients, and conclude by describing REPLAY." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.345, + 0.091, + 0.654, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.297, + 0.178, + 0.405, + 0.194 + ], + "angle": 0, + "content": "Training setup" + }, + { + "type": "image_caption", + "bbox": [ + 0.437, + 0.181, + 0.545, + 0.194 + ], + "angle": 0, + "content": "Trained model" + }, + { + "type": "image_caption", + "bbox": [ + 0.56, + 0.181, + 0.7, + 0.195 + ], + "angle": 0, + "content": "Observed behavior" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.208, + 0.885, + 0.271 + ], + "angle": 0, + "content": "Figure 2: An illustration of the metagradient. We embed a given aspect of the training setup (e.g., the training dataset, or optimizer hyperparameters) into a continuous metaparameter vector \\( z \\in \\mathbb{R}^d \\). This metaparameter defines a model \\( \\mathcal{A}(z) \\) by way of the learning algorithm \\( \\mathcal{A} \\), which in turn defines an output \\( \\phi(z) \\). The metagradient \\( \\nabla_z \\phi(\\mathcal{A}(z)) \\in \\mathbb{R}^d \\) is the gradient of this model output with respect to the metaparameter." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.294, + 0.376, + 0.312 + ], + "angle": 0, + "content": "2.1 What is a metagradient?" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.32, + 0.884, + 0.365 + ], + "angle": 0, + "content": "Training a machine learning model is a two-step process. First, we decide on a training setup—we must pick, for example, a neural network architecture, a training dataset, and an optimizer for training. Second, we apply the algorithm defined by this training setup to train a model." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.365, + 0.884, + 0.397 + ], + "angle": 0, + "content": "Our overall goal in this paper is to optimize model behavior as a function of the training setup (or, as we call it, the metaparameters) using gradient-based methods. To this end, we define the following notation:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.405, + 0.88, + 0.466 + ], + "angle": 0, + "content": "- Let \\(\\mathbf{z} \\in \\mathbb{R}^n\\) be a vector of continuous metaparameters representing the aspects of the training setup we aim to optimize. For example, if we only want to adjust the learning rate and weight decay of SGD then \\(n = 2\\). We handle discrete metaparameters (e.g., choice of training data) by finding a continuous relaxation (e.g., importance weights)." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.475, + 0.881, + 0.506 + ], + "angle": 0, + "content": "- Let \\(\\mathcal{A}\\) be an algorithm mapping \\(\\mathbf{z}\\) to a trained machine learning model; we assume all other aspects of the training setup outside \\(\\mathbf{z}\\) are fixed and thus part of the algorithm \\(\\mathcal{A}\\)." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.515, + 0.881, + 0.56 + ], + "angle": 0, + "content": "- Finally, let \\(\\phi\\) be an output function mapping a model \\(\\theta\\) to a vector \\(\\phi(\\theta) \\in \\mathbb{R}\\). For example, \\(\\phi(\\theta)\\) might represent the validation loss of the model \\(\\theta\\). We require that \\(\\phi\\) be differentiable with respect to \\(\\theta\\), but otherwise make no assumptions on \\(\\phi\\)." + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.405, + 0.881, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.571, + 0.884, + 0.601 + ], + "angle": 0, + "content": "With this notation in place, we define the training function \\( f \\coloneqq \\phi \\circ \\mathcal{A} \\) mapping the training setup \\( \\mathbf{z} \\) directly to the output function \\( \\phi \\) evaluated on the corresponding model." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.602, + 0.881, + 0.633 + ], + "angle": 0, + "content": "Finally, the metagradient is the gradient of the training function with respect to the metaparameters, \\(\\nabla_{\\mathbf{z}}f(\\mathbf{z})\\). Intuitively, the metagradient defines the \"direction of steepest ascent\" in metaparameter space." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.651, + 0.884, + 0.681 + ], + "angle": 0, + "content": "Our focus: iterative algorithms. To efficiently compute the metagradient, we restrict our focus to cases where the algorithm \\(\\mathcal{A}\\) is iterative, i.e., when it can be written in the form" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.693, + 0.884, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\underbrace {\\mathcal {A} (z) := \\mathbf {s} _ {T}} _ {\\text {m o d e l s t a t e a f t e r T s t e p s}}, \\quad \\text {w h e r e} \\quad \\underbrace {\\mathbf {s} _ {t + 1} : = h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})} _ {\\text {o p t i m i z e r s t e p t}}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.741, + 0.884, + 0.788 + ], + "angle": 0, + "content": "Here, \\(\\mathbf{s}_t\\) is the optimizer state at step \\(t\\) (with \\(\\mathbf{s}_0\\) being the initial state) and \\(h_t\\) is the update mapping from state \\(t\\) to state \\(t + 1\\). The form of (1) captures most large-scale training algorithms. For example, if the setup \\(\\mathbf{z} \\in \\mathbb{R}^T\\) is a per-step learning rate, and the algorithm \\(\\mathcal{A}\\) is full batch gradient descent, then each update \\(h_t\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.799, + 0.591, + 0.815 + ], + "angle": 0, + "content": "\\[\nh _ {t} (\\mathbf {s} _ {t}, \\mathbf {z}) := \\mathbf {s} _ {t} - z _ {t} \\nabla \\ell (\\mathbf {s} _ {t}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.826, + 0.884, + 0.859 + ], + "angle": 0, + "content": "where \\( z_{t} \\) is the learning rate at step \\( t \\), \\( \\ell \\) is the training loss, and the state \\( \\mathbf{s}_t \\) comprises the parameters at step \\( t \\). For more complex algorithms like Adam [KB15], the state \\( \\mathbf{s}_t \\) includes terms like gradient moments." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.09, + 0.586, + 0.108 + ], + "angle": 0, + "content": "2.2 Warmup: Metagradients via autodifferentiation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.115, + 0.884, + 0.252 + ], + "angle": 0, + "content": "A key primitive we leverage to calculate metagradients is automatic differentiation (AD)—a standard tool for taking gradients through computer-defined functions. AD takes gradients by decomposing functions into elementary operations with known derivatives, then combining these derivatives using the chain rule. Concretely, AD operates in two passes: a \"forward pass,\" which executes the function of interest and stores intermediate products for each elementary operation; and a \"backward pass,\" which calculates the gradient by propagating chains of partial derivatives using these stored products. For the purposes of this paper, we will view AD as a black box that calculates the gradient of a many-to-one function (i.e., any \\( f: \\mathbb{R}^d \\to \\mathbb{R} \\)) at a given point using only a small constant factor more time than calculating the function itself (along with the space cost of storing the necessary forward-pass products)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.252, + 0.884, + 0.328 + ], + "angle": 0, + "content": "What does this have to do with metagradients? Well, seeing as how training itself is a computer-defined function, AD is a natural tool for calculating the metagradient. The main challenge, as we discuss in the sequel, is that AD-based approaches to calculating the metagradient tend to be too resource-intensive for the large-scale machine learning algorithms we consider. In the remainder of this section we build up background before finally describing REPLAY, our algorithm for scalably computing (exact) metagradients." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.346, + 0.884, + 0.391 + ], + "angle": 0, + "content": "Approach #1: Direct AD. The direct approach to calculating metagradients exploits the fact that nearly any learning algorithm is itself a sequence of differentiable computer-defined operations—meaning the training function \\( f \\) is also differentiable." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.392, + 0.884, + 0.468 + ], + "angle": 0, + "content": "However, operationalizing this observation to compute metagradients turns out to be challenging. The reason is that AD stores intermediate products for each operation. The amount of data stored thus scales with the number of operations in the function of interest. In the case of our training function \\( f \\), this number encompasses all the operations used to train a machine learning model. As a result, even in a toy scenario like MNIST training, computing metagradients with naive AD would require storing terabytes of data." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.485, + 0.884, + 0.532 + ], + "angle": 0, + "content": "Approach #2: Exploiting structure with step-wise AD. A more efficient method for calculating the metagradient, step-wise AD, leverages the structure of iterative learning algorithms [Wer90; MDA15; FDF+17]. Recall from (1) that such algorithms take the form" + }, + { + "type": "equation", + "bbox": [ + 0.356, + 0.542, + 0.64, + 0.559 + ], + "angle": 0, + "content": "\\[\n\\mathcal {A} (\\mathbf {z}) := \\mathbf {s} _ {T}, \\quad \\text {w h e r e} \\quad \\mathbf {s} _ {t + 1} := h _ {t} (\\mathbf {s} _ {t}, \\mathbf {z}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.568, + 0.884, + 0.6 + ], + "angle": 0, + "content": "Algebraic manipulation (in particular, using the chain rule, the law of the total derivative, and the identity \\(\\mathbf{s}_t = h_{t-1}(\\mathbf{s}_{t-1}, \\mathbf{z})\\)) allows us to write the metagradient over an iterative algorithm as" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.61, + 0.884, + 0.679 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial f (\\mathbf {z})}{\\partial \\mathbf {z}} = \\frac {\\partial \\phi (\\mathcal {A} (\\mathbf {z}))}{\\partial \\mathbf {z}} = \\sum_ {t = 1} ^ {T} \\underbrace {\\overbrace {\\partial \\phi (\\mathbf {s} _ {T})} ^ {A _ {t}} . \\overbrace {\\partial \\mathbf {s} _ {t}} ^ {\\partial \\phi (\\mathbf {s} _ {T})}} _ {B _ {t}}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.688, + 0.884, + 0.75 + ], + "angle": 0, + "content": "where we have introduced the notation \\( A_{t} \\) and \\( B_{t} \\) for notational convenience. Step-wise AD computes the metagradient by calculating each term in the sum of (2) one at a time. For each term, the main challenge lies in computing \\( A_{t} \\), since given \\( A_{t} \\) we can straightforwardly compute \\( B_{t} \\) (the entire term) by differentiating through a single model update, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.759, + 0.673, + 0.791 + ], + "angle": 0, + "content": "\\[\nB _ {t} := A _ {t} \\cdot \\frac {\\partial h _ {t - 1} (\\mathbf {s} _ {t - 1} , \\mathbf {z})}{\\partial \\mathbf {z}} = \\frac {\\partial (A _ {t} \\cdot h _ {t - 1} (\\mathbf {s} _ {t - 1} , \\mathbf {z}))}{\\partial \\mathbf {z}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.799, + 0.884, + 0.831 + ], + "angle": 0, + "content": "which is just a single call to our assumed \"AD oracle\" on the function \\(\\mathbf{z} \\mapsto A_t \\cdot h_{t-1}(\\mathbf{s}_{t-1}, \\mathbf{z})\\). Computing the \\(A_t\\) terms is less straightforward as we need to relate \\(s_t\\) and \\(s_T\\); to do so, we exploit the recurrence" + }, + { + "type": "equation", + "bbox": [ + 0.292, + 0.839, + 0.884, + 0.873 + ], + "angle": 0, + "content": "\\[\nA _ {t} := \\frac {\\partial \\phi (\\mathbf {s} _ {T})}{\\partial \\mathbf {s} _ {t}} = \\frac {\\partial \\phi (\\mathbf {s} _ {T})}{\\partial \\mathbf {s} _ {t + 1}} \\cdot \\frac {\\partial h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})}{\\partial \\mathbf {s} _ {t}} = \\frac {\\partial \\left(A _ {t + 1} \\cdot h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})\\right)}{\\partial \\mathbf {s} _ {t}}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.882, + 0.884, + 0.914 + ], + "angle": 0, + "content": "making \\( A_{t} \\) straightforward to compute (again, a single \"AD oracle\" call) given \\( A_{t+1} \\). Step-wise AD exploits this fact to successively calculate the gradient with respect to each state, from state \\( T \\) down to state 0." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.148, + 0.111, + 0.852, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.388, + 0.884, + 0.482 + ], + "angle": 0, + "content": "Figure 3: The lazy \\( k \\)-ary tree structure for traversing optimizer states in reverse order, with \\( k = 2 \\). Recall that \\( n \\) is the number of states (parameterized such that \\( n = T + 1 \\)). Each node represents the correspondingly numbered state. We give an example of the traversal using the blue arrows in the figure, which denote the traversal path up to state \\( s_{\\frac{3n}{4} + 1} \\). The gray cylinders indicate the states that are stored when the traversal is at state \\( s_{\\frac{3n}{4} + 1} \\); the other states are not stored at this point in the traversal. Traversing this structure requires storing \\( \\mathcal{O}(\\log(n)) \\) state and computing \\( \\mathcal{O}(n \\log(n)) \\) optimizer steps—compared to \\( n \\) for simply training." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.506, + 0.884, + 0.612 + ], + "angle": 0, + "content": "Bringing these ingredients together, the algorithm executes as follows. As a preprocessing step, it trains the model and stores all intermediate states \\(\\mathbf{s}_0,\\dots ,\\mathbf{s}_T\\). Then, the algorithm calculates and sums the terms in (2). It first computes \\(A_{T}\\coloneqq \\partial \\phi (\\mathbf{s}_{T}) / \\mathbf{s}_{T}\\), the gradient of the output function \\(\\phi\\) with respect to the final state. Then, the algorithm steps through \\(\\mathbf{s}_{T - 1},\\ldots ,\\mathbf{s}_0\\) in reverse order, calculating (a) the gradient with respect to each state \\(A_{t}\\) (via (3)) and (b) the gradient with respect to \\(\\mathbf{z}\\) at that step \\(B_{t}\\) (via (2), using the previously calculated gradient with respect to that state). AD calculates both quantities--each requires differentiating over only one train step. Finally, the algorithm returns the final metagradient as the sum of the terms." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.612, + 0.884, + 0.645 + ], + "angle": 0, + "content": "Despite improving storage overhead compared to \"direct AD\", step-wise AD is still too space-intensive at scale. After all, this algorithm saves every optimizer state." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.663, + 0.233, + 0.679 + ], + "angle": 0, + "content": "2.3 REPLAY" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.687, + 0.885, + 0.824 + ], + "angle": 0, + "content": "REPLAY is our algorithm for efficiently and exactly computing metagradients. It uses \\(\\mathcal{O}(k\\log_k(T))\\) space and requires running the learning algorithm \\(\\mathcal{A}\\) a total of \\(1 + \\log_{k}(T)\\) times, with \\(k\\) a user-chosen constant. The main idea is to make the space-intensive subroutine of step-wise AD—a reverse-order traversal of the optimizer states at each step—much more efficient. After all, step-wise AD stores all the states to reverse traverse them. REPLAY modifies step-wise AD to traverse states in less space by exploiting a simple observation: when training is deterministic, one can reinstantiate an optimizer state \\(\\mathbf{s}_t\\) by \"replaying\" training from a fixed point \\(t' < t\\) at the compute cost of \\(t - t'\\) training steps. For example, one simple scheme saves every other state, then \"replays\" the remaining states when (reverse) traversing; this routine stores \\(T/2\\) states but computes an extra \\(T/2\\) model updates compared to storing all the states." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.824, + 0.884, + 0.903 + ], + "angle": 0, + "content": "REPLAY performs a reverse-order traversal the optimizer states while balancing the compute cost of \"replaying\" training with the storage cost of saving states. We use a combination of deterministic training (fixing data ordering, data augmentation, and any other randomness in the training process) and an efficient data structure (similar to a segment tree; see Figure 3) to reverse-order traverse the optimizer states with \\(\\mathcal{O}(k\\log_k(T))\\) space and an additional \\(T\\log_k(T)\\) model steps." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.214 + ], + "angle": 0, + "content": "Specifically, REPLAY recursively saves and replays training states. The algorithm splits the training trajectory into \\( k \\) segments, performs the full training routine while saving only the start of each segment, then recurses into each segment (in reverse) to retrieve the states in reverse-order. The recursion depth bottoms out at \\( \\log_k(T) \\), at which point the algorithm has \\( k \\) consecutive optimizer states in memory; the algorithm then backpropagates along this segment, before deleting all these states from memory and then reinstantating the next \\( k \\)-length segment of optimizer states. We provide additional details on the algorithm in Appendix A.2. REPLAY unlocks computing large-scale metagradients by requiring only logarithmic storage and additional compute time." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.222, + 0.885, + 0.285 + ], + "angle": 0, + "content": "Remark 1 (Connection to rematerialization). In a broad sense, both REPLAY and step-wise AD above can be viewed as special cases of a classical approach in AD (and computing broadly) known as rematerialization [CAC+81; BCT92; ZP00; GW08; CXZ+16]. To our knowledge, however, REPLAY is the first application of this particular rematerialization technique to the problem of computing metagradients through model training." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.293, + 0.884, + 0.355 + ], + "angle": 0, + "content": "Remark 2 (Reversible learning). An alternative approach to calculating metagradients that does not save any state is reversible learning [MDA15], for which one can \"invert\" previous training states from future ones. We focus here on general (non-reversible) learning algorithms for two reasons: first, even simple algorithms such as SGD without momentum are non-reversible; second, reversibility in practice introduces numerical precision issues." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.378, + 0.591, + 0.399 + ], + "angle": 0, + "content": "3 Designing metasmooth training routines" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.41, + 0.884, + 0.486 + ], + "angle": 0, + "content": "Given a training function \\( f \\), REPLAY enables us to compute metagradients \\( \\nabla f(\\mathbf{z}) \\) for any setup \\( \\mathbf{z} \\). Can we immediately use these metagradients to optimize model training setups? The answer is (generally) no: we find that applying REPLAY to a function \\( f \\) representing a standard model training and evaluation routine yields metagradients that are often \\( \\pm \\infty \\)-valued and generally unhelpful for optimization. Indeed, previous work has observed similar issues optimizing over even (very) small-scale training [BSF94; Pea96; MDA15]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.486, + 0.884, + 0.562 + ], + "angle": 0, + "content": "In this section, we show that an underlying source of the issue is the landscape of the metaparameter optimization problem. We then present a framework for modifying standard learning algorithms to admit useful metagradient, i.e., to be metasmooth. To use a familiar analogy: just as residual connections and improved initialization schemes can improve optimization in standard deep learning algorithms, our framework introduces an analogous set of modifications to enable optimization with metagradient." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.581, + 0.55, + 0.6 + ], + "angle": 0, + "content": "3.1 The metaparameter optimization landscape" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.607, + 0.886, + 0.655 + ], + "angle": 0, + "content": "We first review the notion of smoothness from optimization theory, and then adapt it to the setting of metagradients. The resulting metasmoothness metric allows us to quantify (and later, improve) the amenability of the metaparameter optimization problem to gradient-based methods." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.671, + 0.884, + 0.718 + ], + "angle": 0, + "content": "Smoothness. In optimization theory, the basic property of a function that controls how effectively it can be optimized with first-order methods is smoothness. Specifically, a function \\( f(\\mathbf{z}) \\) is \\( \\beta \\)-smooth at a point \\( \\mathbf{z} \\) if its gradient \\( \\nabla f \\) satisfies the property that" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.727, + 0.884, + 0.746 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla f (\\mathbf {z}) - \\nabla f \\left(\\mathbf {z} ^ {\\prime}\\right) \\right\\| \\leq \\beta \\cdot \\left\\| \\mathbf {z} - \\mathbf {z} ^ {\\prime} \\right\\| \\quad \\text {f o r a l l} \\mathbf {z} ^ {\\prime}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.757, + 0.883, + 0.802 + ], + "angle": 0, + "content": "or in other words, if its gradient does not change too quickly around \\(\\mathbf{z}\\). To motivate this definition: if a function \\(f\\) is \\(\\beta\\)-smooth at \\(\\mathbf{z}\\), then a step of gradient descent with step size \\(1 / \\beta\\) will successfully decrease the value of the function:" + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.81, + 0.65, + 0.845 + ], + "angle": 0, + "content": "\\[\nf \\left(\\mathbf {z} - \\frac {1}{\\beta} \\nabla f (\\mathbf {z})\\right) \\leq f (\\mathbf {z}) - \\frac {1}{2 \\beta} \\| \\nabla f (\\mathbf {z}) \\| ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.854, + 0.633, + 0.872 + ], + "angle": 0, + "content": "This guarantee makes \\(\\beta\\)-smoothness a good measure of gradient utility." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.881, + 0.166 + ], + "angle": 0, + "content": "Metasmoothness. There are two main challenges in adapting the smoothness property to the metagradient setting. First, evaluating (4) requires a search over all possible \\(\\mathbf{z}'\\), which is infeasible. Second, even if we could exactly evaluate the left-hand side of (4), it would be difficult to disentangle non-smoothness of the training function \\(f\\) from potential error in metagradient computation (e.g., a numerically unstable operation in REPLAY)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.168, + 0.881, + 0.229 + ], + "angle": 0, + "content": "To sidestep these issues, we propose a metric called metasmoothness, given in Definition 1. Metasmoothness is cheap to compute—requiring only three evaluations of the training function—and does not rely on metagradient computation. For the remainder of this section, we fix a small constant \\( h > 0 \\), and define the corresponding finite-differences estimator of the directional derivative \\( \\Delta_f \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.239, + 0.609, + 0.27 + ], + "angle": 0, + "content": "\\[\n\\Delta_ {f} (\\mathbf {z}; \\mathbf {v}) := \\frac {f (\\mathbf {z} + h \\mathbf {v}) - f (\\mathbf {z})}{h}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.278, + 0.881, + 0.324 + ], + "angle": 0, + "content": "Definition 1 (Metasmoothness of \\( f \\) at \\( \\mathbf{z} \\) towards \\( \\mathbf{v} \\)). Consider a training function \\( f \\) mapping metaparameters \\( \\mathbf{z} \\in \\mathbb{R}^n \\) to model output \\( f(\\mathbf{z}) \\in \\mathbb{R} \\). Given a metaparameter \\( \\mathbf{z} \\) and a vector \\( \\mathbf{v} \\in \\mathbb{R}^n \\), the metasmoothness of \\( f \\) at \\( \\mathbf{z} \\) towards \\( \\mathbf{v} \\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.333, + 0.882, + 0.368 + ], + "angle": 0, + "content": "\\[\nS _ {h, \\mathbf {v}} (f; \\mathbf {z}) := \\left| \\frac {\\Delta_ {f} (\\mathbf {z} + h \\mathbf {v}) - \\Delta_ {f} (\\mathbf {z})}{h} \\right|. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.377, + 0.881, + 0.409 + ], + "angle": 0, + "content": "Definition 1 measures the rate of change of the derivative of \\( f(\\mathbf{z}) \\) in the direction of a given vector \\( \\mathbf{v} \\), and is therefore related to \\( \\beta \\)-smoothness in that:" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.415, + 0.86, + 0.432 + ], + "angle": 0, + "content": "(a) If \\( f \\) is \\( \\beta \\)-smooth at \\( \\mathbf{z} \\), then \\( S_{h,\\mathbf{v}}(f;\\mathbf{z}) \\leq \\beta \\) for any \\( (h,\\mathbf{v}) \\) (so Definition 1 is necessary for smoothness)." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.441, + 0.88, + 0.472 + ], + "angle": 0, + "content": "(b) If \\(\\lim_{h\\to 0}S_{h,\\mathbf{v}}(f;\\mathbf{z})\\leq \\beta\\) for all \\(\\mathbf{z}\\in \\mathbb{R}^n\\) and \\(\\mathbf{v}\\in \\mathbb{S}^{n - 1}\\), then \\(f\\) is \\(\\beta\\)-smooth everywhere (so a global version of Definition 1 is sufficient for smoothness)." + }, + { + "type": "list", + "bbox": [ + 0.125, + 0.415, + 0.88, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.491, + 0.881, + 0.628 + ], + "angle": 0, + "content": "Empirical metasmoothness. Definition 1 lets us measure the meta-smoothness of a training function \\( f \\) at a particular metaparameter \\( \\mathbf{z} \\) (towards a direction \\( \\mathbf{v} \\)). This definition, however, has two shortcomings. First, recall that the training function \\( f \\) is a composition of a learning algorithm \\( \\mathcal{A} \\) and an output function \\( \\phi \\), so the smoothness of \\( f \\) depends on that of both \\( \\mathcal{A} \\) and \\( \\phi \\) (in particular, \\( \\frac{\\partial f}{\\partial \\mathbf{z}} = \\frac{\\partial \\phi}{\\partial \\mathcal{A}} \\cdot \\frac{\\partial \\mathcal{A}}{\\partial \\mathbf{z}} \\)). Since the output function \\( \\phi \\) might be unknown ahead of time, we are most interested in measuring the overall metasmoothness of a learning algorithm \\( \\mathcal{A} \\). Second, while the result of (5) does have a concrete basis in optimization theory, it may not be easy to interpret in practice (e.g., what does \\( S = 200 \\) mean?). We address both issues simultaneously by (a) proposing an interpretable \"binarized\" version of Definition 1, and (b) studying metasmoothness in the space of model parameters \\( \\theta \\), instead of the output space." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.635, + 0.881, + 0.68 + ], + "angle": 0, + "content": "Definition 2 (Empirical metasmoothness of \\(\\mathcal{A}\\)). Let \\(\\mathcal{A}\\) be a learning algorithm which maps metaparameters \\(\\mathbf{z} \\in \\mathbb{R}^n\\) to model parameters \\(\\theta \\in \\mathbb{R}^d\\), let \\(\\mathbf{z}\\) be a metaparameter vector, and let \\(\\mathbf{v}\\) be a given direction. Let \\(\\mathbf{d} \\in \\mathbb{R}^d\\) be the per-coordinate variation in \\(\\theta\\), i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.69, + 0.589, + 0.707 + ], + "angle": 0, + "content": "\\[\n\\mathbf {d} = \\left| \\mathcal {A} (\\mathbf {z} + 2 h \\mathbf {v}) - \\mathcal {A} (\\mathbf {z}) \\right|\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.716, + 0.502, + 0.733 + ], + "angle": 0, + "content": "The empirical \\((h,\\mathbf{v})\\)-metasmoothness of \\(\\mathcal{A}\\) at \\(\\mathbf{z}\\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.256, + 0.742, + 0.884, + 0.775 + ], + "angle": 0, + "content": "\\[\n\\widehat {S} _ {h, \\mathbf {v}} (\\mathcal {A}; \\mathbf {z}) = \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} (\\mathbf {z}; \\mathbf {v})\\right) ^ {\\top} \\cdot \\operatorname {d i a g} \\left(\\frac {\\mathbf {d}}{\\| \\mathbf {d} \\| _ {1}}\\right) \\cdot \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} (\\mathbf {z} + h \\mathbf {v}; \\mathbf {v})\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.783, + 0.355, + 0.8 + ], + "angle": 0, + "content": "weights each parameter by its range." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.807, + 0.881, + 0.913 + ], + "angle": 0, + "content": "Intuitively, (6) measures the agreement in sign between the (finite-difference approximation of the) metagradient in the direction of \\(\\mathbf{v}\\) at \\(\\mathbf{z}\\) and at \\(\\mathbf{z} + h\\mathbf{v}\\), averaged across parameter coordinates and weighted by the variation in each coordinate. Taking a weighted average of sign agreements ensures that \\(\\widehat{S} \\in [-1,1]\\) (making it easier to interpret than Definition 1). The \\(\\mathrm{diag}(\\mathsf{d} / \\| \\mathsf{d}\\|_1)\\) term weights each agreement proportionally to the scale of the corresponding parameter change (downweighting, e.g., coordinates \\(i\\) that are essentially constant). Finally, observe that Definition 2 is efficient to compute in practice: it requires only three calls to the learning algorithm \\(\\mathcal{A}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.087, + 0.388, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.324, + 0.278, + 0.337 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.088, + 0.589, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.602, + 0.087, + 0.876, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.702, + 0.324, + 0.724, + 0.337 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.349, + 0.884, + 0.427 + ], + "angle": 0, + "content": "Figure 4: (a) For a variety of training configurations of a ResNet-9 model, we plot metasmoothness (Def. 2) against test accuracy. Strategies such as increasing width, placing batch normalization before activations, and scaling down network outputs consistently improve metasmoothness, at a minor cost to accuracy. (b) Smoother training configurations can be optimized via metagradients more effectively. Here, as in Section 4.3, we use metagradients to gradient ascend on validation loss." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.447, + 0.884, + 0.495 + ], + "angle": 0, + "content": "Remark 3. Ideally, recalling the smoothness definition (4), we would evaluate metasmoothness in all possible directions \\(\\mathbf{v}\\) and all points \\(\\mathbf{z}\\). Empirically, we find in the sequel (Section 3.2) that this single-direction approximation at a single point \\(\\mathbf{z}\\) still yields a useful estimate of metasmoothness (e.g., one that correlates with metagradients utility)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.518, + 0.55, + 0.536 + ], + "angle": 0, + "content": "3.2 Estimating and improving metasmoothness" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.543, + 0.884, + 0.633 + ], + "angle": 0, + "content": "Having established a method for quantifying metasmoothness, we turn to the practical question: how can we design learning algorithms that are amenable to metagradient optimization? To answer this question, we introduce a straightforward framework: given a learning algorithm, explore a fixed menu of possible modifications to the training setup, and choose the combination that maximizes empirical metasmoothness. In practice, we find that this framework allows us to slightly modify learning algorithms in a way that makes them amenable to first-order methods." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.634, + 0.884, + 0.695 + ], + "angle": 0, + "content": "As a case study, we study the task of training ResNet-9 on the CIFAR-10 dataset [Kri09]. We let the metaparameters \\(\\mathbf{z}\\) be a perturbation to the pixels of 1000 random training images (so \\(\\mathbf{z} \\in \\mathbb{R}^{1000 \\times 32 \\times 32 \\times 3}\\)). We estimate the empirical metasmoothness of different learning algorithms \\(\\mathcal{A}\\) at \\(\\mathbf{z} = \\mathbf{0}\\) using Definition 2. Concretely, we proceed as follows for each learning algorithm \\(\\mathcal{A}\\):" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.699, + 0.667, + 0.716 + ], + "angle": 0, + "content": "1. Let \\( \\mathbf{z}_0 = \\mathbf{0} \\) be the metaparameter corresponding to the original dataset." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.722, + 0.524, + 0.739 + ], + "angle": 0, + "content": "2. Sample a random perturbation vector \\(\\mathbf{v} \\sim \\mathcal{N}(0,1)\\)." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.745, + 0.507, + 0.762 + ], + "angle": 0, + "content": "3. Compute the empirical metasmoothness (6), i.e.," + }, + { + "type": "list", + "bbox": [ + 0.132, + 0.699, + 0.667, + 0.762 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.768, + 0.884, + 0.8 + ], + "angle": 0, + "content": "(a) Let \\(\\theta_0\\coloneqq \\mathcal{A}(\\mathbf{z}_0)\\), \\(\\theta_h\\coloneqq \\mathcal{A}(\\mathbf{z}_0 + h\\cdot \\mathbf{v})\\), and \\(\\theta_{2h}\\coloneqq \\mathcal{A}(\\mathbf{z}_0 + 2h\\cdot \\mathbf{v})\\) be the model parameters that result from training with training dataset perturbations \\(\\mathbf{z}_0,\\mathbf{z}_0 + h\\mathbf{v}\\), and \\(\\mathbf{z}_0 + 2h\\mathbf{v}\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.801, + 0.471, + 0.817 + ], + "angle": 0, + "content": "(b) Compute the approximate derivatives" + }, + { + "type": "list", + "bbox": [ + 0.161, + 0.768, + 0.884, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.823, + 0.75, + 0.841 + ], + "angle": 0, + "content": "\\[\n\\Delta_ {\\mathcal {A}} (\\mathbf {z} _ {0}; \\mathbf {v}) = \\left(\\theta_ {h} - \\theta_ {0}\\right) / h, \\quad \\Delta_ {\\mathcal {A}} (\\mathbf {z} _ {0} + h \\mathbf {v}; \\mathbf {v}) = \\left(\\theta_ {2 h} - \\theta_ {h}\\right) / h.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.847, + 0.884, + 0.864 + ], + "angle": 0, + "content": "(c) Compute the weighting vector \\(\\mathbf{d} = |\\theta_{2h} - \\theta_0|\\), and compute the average metasmoothness (6), i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.284, + 0.868, + 0.787, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\widehat {S} _ {h, \\mathbf {v}} (\\mathcal {A}; z _ {0}) = \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} \\left(\\mathbf {z} _ {0} + h \\mathbf {v}; \\mathbf {v}\\right)\\right) ^ {\\top} \\cdot \\operatorname {d i a g} \\left(\\frac {\\mathbf {d}}{\\| \\mathbf {d} \\| _ {1}}\\right) \\cdot \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} \\left(\\mathbf {z} _ {0}; \\mathbf {v}\\right)\\right).\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.152, + 0.09, + 0.49, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.091, + 0.837, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.291, + 0.49, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.291, + 0.839, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.494, + 0.884, + 0.586 + ], + "angle": 0, + "content": "Figure 5: The effect of metasmoothness on the optimization landscape. Each plot above visualizes the loss landscape of a (deterministic) learning algorithm \\(\\mathcal{A}\\), with the \\(x\\)- and \\(y\\)-axes representing additive perturbations to 1000 examples in the training set and the \\(z\\)-axis representing the resulting model's loss on the test example given in the title. In each row, the left plot is a non-smooth algorithm, and the right plot is a smooth algorithm (as per Definition 2) evaluated on the same example. Overall, empirical metasmoothness seems to strongly correlate with qualitative landscape smoothness. See Figure 12 for more examples." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.612, + 0.884, + 0.732 + ], + "angle": 0, + "content": "Metasmooth learning algorithms. We apply the procedure above to estimate the metasmoothness of learning algorithms induced by different design choices (batch size, network width, BatchNorm placement, gradient scaling), and report the results in Figure 4 (left). On one hand, \"standard\" learning algorithms (i.e., those designed without metasmoothness in mind) are not metasmooth. On the other hand, our investigation reveals central factors driving metasmoothness. In addition to \"standard\" hyperparameters such as batch size and network width playing a role, we find that placing Batch Normalization layers prior to nonlinearities (instead of after) and scaling the final layer output are both crucial to metasmoothness. Note that the modifications we consider above are not exhaustive—see Appendix E for the full training setup." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.733, + 0.884, + 0.779 + ], + "angle": 0, + "content": "Finally, in Figure 5, we plot the optimization landscape of both metasmooth (right) and non-metasmooth (left) models. We find that the landscapes of metasmooth models are much smoother and—qualitatively—more straightforward to optimize." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.797, + 0.885, + 0.904 + ], + "angle": 0, + "content": "Metasmoothness/performance tradeoffs? Figure 4 (left) relates metasmoothness to model accuracy for the considered learning algorithms. While there is no clear trend, the top-performing learning algorithms are not always metasmooth. However, the trade-off is not too severe: the most metasmooth algorithms still achieve near-optimal accuracy. Furthermore, it is possible that with additional searching we could identify even more accurate metasmooth models. Taken together with our previous experiment, our results suggest that jointly searching over metasmoothness and model accuracy is a general recipe for designing learning algorithms that are both performant and metasmooth. Finally, as we discuss in Section 5, a fruitful avenue" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.887, + 0.123 + ], + "angle": 0, + "content": "for future work may be to design metasmooth learning algorithms directly, i.e., without relying on stability heuristics or grid search." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.141, + 0.885, + 0.278 + ], + "angle": 0, + "content": "Does metasmoothness aid downstream optimization? Recall that our motivation for studying metasmoothness is to develop learning algorithms that we can optimize the metaparameters of via metagradients (using first-order methods). We started with the notion of \\(\\beta\\)-smoothness from optimization theory, and we adapted it to the setting of metagradients by making a series of approximations and modifications. The final question we address is: does our final notion of metasmoothness actually predict the utility of metagradients for optimization? Figure 4 (right) demonstrates that metasmoothness strongly predicts our ability to optimize the metaparameters of a given learning algorithm. We use metagradients (computed by REPLAY) to gradient ascend on validation loss with respect to the metaparameters \\(\\mathbf{z}\\), and measure the change in model loss." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.301, + 0.293, + 0.323 + ], + "angle": 0, + "content": "4 Applications" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.333, + 0.885, + 0.442 + ], + "angle": 0, + "content": "In this section, apply metagradients to three problems in machine learning: selecting training data, poisoning training data, and searching for hyperparameters. In each setting we follow the same recipe: we frame the task as an optimization problem, modify the learning algorithm of interest to be smooth, then solve by first-order optimizing with meta-gradients—which we refer to, in a catch-all manner across algorithms, as metagradient descent (MGD). In particular: we substantially improve on existing dataset selection methods (Section 4.1, Section 4.2), perform the first effective accuracy-degrading data poisoning attack (Section 4.3), and discover one-cycle learning rate schedules with MGD (Section 4.4)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.459, + 0.476, + 0.478 + ], + "angle": 0, + "content": "4.1 Selecting multimodal training data" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.485, + 0.885, + 0.593 + ], + "angle": 0, + "content": "Curating a training dataset from a mass of unfiltered data is a necessary and influential step in any large-scale machine learning pipeline. Deciding how to curate such a dataset is a challenging problem that has attracted substantial recent interest [FiW+22; ATS+23; EFM24; GIF+24]. In this section, we frame pre-training data selection as an optimization problem, and then solve this problem by first-order optimizing with metagradient. Applying our method to the DataComp-small benchmark [GIF+24], we greatly improve on the state-of-the-art (our improvement over state-of-the-art is roughly the same as the improvement of state-ofthe-art over training on random data)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.61, + 0.212, + 0.627 + ], + "angle": 0, + "content": "4.1.1 Setup" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.633, + 0.885, + 0.697 + ], + "angle": 0, + "content": "The goal of dataset selection is to choose a training data subset (out of a broad pool of data) that maximizes trained machine learning model performance. Given this goal, dataset selection has a natural interpretation as a combinatorial metaparameter optimization problem. In particular, in the language of Section 2.1, for a training set of size \\( n \\), let" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.703, + 0.881, + 0.735 + ], + "angle": 0, + "content": "(a) the metaparameters \\(\\mathbf{c} \\in \\mathcal{C} \\coloneqq \\mathbb{Z}_{\\geq 0}^{n}\\) be non-negative data counts representing the number of times each training sample repeats in the training data;" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.744, + 0.881, + 0.775 + ], + "angle": 0, + "content": "(b) the algorithm \\(\\mathcal{A}\\) be a standard large-scale learning procedure, which runs on a training set comprising \\(c_{i}\\) copies of each sample \\(i\\) for \\(i\\in [n]\\);" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.785, + 0.735, + 0.8 + ], + "angle": 0, + "content": "(c) the output function \\(\\phi\\) be the loss of the trained model on a target distribution \\(D\\)." + }, + { + "type": "list", + "bbox": [ + 0.125, + 0.703, + 0.881, + 0.8 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.818, + 0.827 + ], + "angle": 0, + "content": "Then, defining \\( f(\\mathbf{c}) \\coloneqq \\phi(\\mathcal{A}(\\mathbf{c})) \\) (as in Section 2.1), our goal is to find the data counts \\( \\mathbf{c}^* \\) that solve" + }, + { + "type": "equation", + "bbox": [ + 0.427, + 0.837, + 0.884, + 0.865 + ], + "angle": 0, + "content": "\\[\n\\mathbf {c} ^ {*} := \\underset {\\mathbf {c} \\in \\mathcal {C}} {\\arg \\min } f (\\mathbf {c}). \\tag {7}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.51, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.092, + 0.417, + 0.108 + ], + "angle": 0, + "content": "4.1.2 Gradient descent on training data" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.115, + 0.885, + 0.178 + ], + "angle": 0, + "content": "Metagradients let us directly minimize the target task loss (7) with respect to the choice of training data. At a high level, our algorithm operates as follows: we start with a randomly chosen set of training data, then iteratively update the dataset selection using metagradients with respect to importance weights placed on each training datapoint. The specifics of our method are in Algorithm 1; we describe its core ideas below." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.195, + 0.884, + 0.256 + ], + "angle": 0, + "content": "Idea 1: A surrogate algorithm. We cannot use metagradients to optimize (7) directly, because the metaparameters of interest \\( \\mathbf{c} \\) are discrete counts (and so the algorithm \\( \\mathcal{A} \\) is non-differentiable with respect to \\( \\mathbf{c} \\)). To circumvent this problem, we relax \\( \\mathcal{A} \\): we define a surrogate algorithm \\( \\mathcal{A}_{\\mathbf{c}}^{\\prime} \\) that takes in a continuous metaparameter \\( \\mathbf{z} \\in \\mathbb{R}^{n} \\), whose metagradient we can compute, then optimize using the metagradient on \\( \\mathcal{A}_{\\mathbf{c}}^{\\prime} \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.256, + 0.885, + 0.392 + ], + "angle": 0, + "content": "This surrogate learning algorithm \\(\\mathcal{A}_{\\mathrm{c}}^{\\prime}\\) maps a metaparameter \\(\\mathbf{z} \\in \\mathbb{R}^{n}\\) (representing a perturbation to training data weights) to a machine learning model. The surrogate is defined by a set of counts \\(\\mathbf{c} \\in \\mathbb{Z}_{+}^{n}\\), and a hyperparameter \\(k\\) denoting a specific training iteration, both of which we bake into the surrogate algorithm itself. Given a metaparameter \\(\\mathbf{z} \\in \\mathbb{R}^{n}\\), the algorithm \\(\\mathcal{A}_{\\mathrm{c}}^{\\prime}\\) trains a model \"as usual\" using the fixed counts \\(\\mathbf{c}\\). That is, it makes \\(c_{i}\\) copies of each training sample \\(i\\), shuffles and partitions the data into batches, and then at each iteration minimizes the batch loss with a step—just as the original learning algorithm \\(\\mathcal{A}\\). At iteration \\(k\\), however, in addition to the original loss on the \\(k\\)-th batch, the algorithm upweights each training sample \\(i\\) according to the metaparameter \\(z_{i}\\). In other words, the objective at iteration \\(t\\) of the surrogate algorithm \\(\\mathcal{A}_{\\mathrm{c}}^{\\prime}\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.392, + 0.693, + 0.432 + ], + "angle": 0, + "content": "\\[\n\\ell_ {t} ^ {\\prime} (\\theta) := \\left\\{ \\begin{array}{l l} \\sum_ {x \\in t ^ {\\text {t h}} \\text {b a t c h}} \\ell (x; \\theta) & \\text {i f} t \\neq k \\\\ \\sum_ {x \\in t ^ {\\text {t h}} \\text {b a t c h}} \\ell (x; \\theta) + \\sum_ {i = 1} ^ {n} z _ {i} \\ell (x _ {i}; \\theta) & \\text {i f} t = k \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.437, + 0.453, + 0.453 + ], + "angle": 0, + "content": "where \\(\\ell (x;\\theta)\\) is the training loss on example \\(x\\)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.453, + 0.884, + 0.499 + ], + "angle": 0, + "content": "Observe that when \\( \\mathbf{z} = \\mathbf{0}_n \\), the algorithm \\( \\mathcal{A}_{\\mathbf{c}}' \\) is identical to the standard learning algorithm \\( \\mathcal{A} \\). And while \\( \\mathcal{A} \\) was a function of (nondifferentiable) discrete data counts \\( \\mathbf{c} \\), \\( \\mathcal{A}_{\\mathbf{c}}' \\) is differentiable with respect to its input \\( \\mathbf{z} \\), and so we can compute the metagradient" + }, + { + "type": "equation", + "bbox": [ + 0.411, + 0.509, + 0.585, + 0.531 + ], + "angle": 0, + "content": "\\[\n\\mathbf {g} := \\nabla_ {\\mathbf {z}} \\phi \\big (\\mathcal {A} _ {\\mathbf {c}} ^ {\\prime} (\\mathbf {z}) \\big) \\big | _ {\\mathbf {z} = \\mathbf {0} _ {n}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.54, + 0.884, + 0.618 + ], + "angle": 0, + "content": "Intuitively, the entries of the metagradient \\(\\mathbf{g}\\) capture the effect of adding an infinitesimal amount of each training sample \\(i\\) to the training data at iteration \\(k\\). A positive entry \\(g_{i}\\) indicates that adding an infinitesimal amount of sample \\(i\\) to the training data would increase the loss, and a negative entry indicates that adding an infinitesimal amount of sample \\(i\\) to the training data would decrease the loss; the slot at \\(i\\) represents the (estimated) effect of adding a copy of sample \\(i\\) to the training data at every batch containing the sample." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.634, + 0.884, + 0.666 + ], + "angle": 0, + "content": "Idea 2: Block coordinate descent. We then use the metagradient \\(\\mathbf{g}\\) to iteratively update our selected dataset. We update data counts as" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.678, + 0.884, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\mathbf {c} \\leftarrow \\mathbf {c} - \\operatorname {s i g n} (\\mathbf {g}) \\odot \\mathbf {m}, \\quad \\mathbf {m} \\sim \\text {B e r n o u l l i} (p), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.705, + 0.884, + 0.766 + ], + "angle": 0, + "content": "where \\( p \\) is a hyperparameter controlling the fraction of sample counts to update. This algorithm resembles a block coordinate descent algorithm [OR00], with the main difference being that we take signed gradient steps with step size 1 (projected onto non-negative integers) to ensure that the counts remain well-defined. As a result, \\( p \\) implicitly controls the algorithm's step size." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.766, + 0.884, + 0.813 + ], + "angle": 0, + "content": "Applying (8) concludes a single optimization step. By repeating this process of estimating the metagradient, updating our counts vector, then constructing a new training dataset, we iteratively improve the selected data. Pseudocode for our algorithm can be found in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.83, + 0.223, + 0.844 + ], + "angle": 0, + "content": "4.1.3 Results" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.854, + 0.884, + 0.9 + ], + "angle": 0, + "content": "We evaluate our data selection algorithm using DataComp [GIF+24], a standardized framework for evaluating data selection methods for multimodal models. Algorithm 1 greatly improves on the state-of-the-art for the benchmark. Below, we describe the setting, outline our method, and conclude with our results." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.127, + 0.095, + 0.656, + 0.111 + ], + "angle": 0, + "content": "Algorithm 1: Dataset selection using using metagradient descent (MGD)." + }, + { + "type": "algorithm", + "bbox": [ + 0.12, + 0.113, + 0.663, + 0.251 + ], + "angle": 0, + "content": "Input: initial data counts \\(\\mathbf{c}\\in \\mathbb{Z}_{\\geq 0}^{n}\\) , learning algorithm \\(\\mathcal{A}\\) output function Hyperparameters: step size \\(p\\) # opt steps \\(T\\) iteration number \\(k\\) for \\(t\\gets 1\\) to \\(T\\) do \n2 \\(\\mathbf{z}\\gets \\mathbf{0}_n / /\\) Build input to surrogate \n3 \\(\\mathbf{g}\\leftarrow \\frac{\\partial\\phi(\\mathcal{A}_c'(\\mathbf{z}))}{\\partial\\mathbf{z}} / /\\) Calculate metagradient using REPLAY \n4 m<- sample from Bernoulli(p) // Sample indices to step on \n5 c<- c-sign(g) \\(\\odot\\) m// Take optimization step \n6 Return c// Return final data counts" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.282, + 0.885, + 0.404 + ], + "angle": 0, + "content": "Setting. DataComp [GIF+24] is a multimodal model training competition and benchmark for evaluating dataset selection methods. DataComp provides a fixed learning algorithm chosen in advance by the organizers and a large fixed candidate pool of internet data. The goal is to choose a subset of the candidate pool—possibly with repeated datapoints—that yields the best-performing model after training with the given learning algorithm, as measured by a predetermined set of 38 benchmarks. Given a submission subset, the mean score on the evaluation datasets for a model trained with that subset is taken as the final \"score.\" DataComp offers four separate \"scales\" requiring different amounts of compute; we focus on the small scale in this paper due to compute limitations." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.422, + 0.884, + 0.53 + ], + "angle": 0, + "content": "Method. We select data with MGD (Algorithm 1) to minimize loss on data on a \"target set\" that is distributionally similar to the DataComp benchmark tasks, and select hyperparameters with a held-out \"validation set.\" In particular, we construct target and validation sets by taking samples from the DataComp evaluation tasks with extra samples available beyond those used in the DataComp test set (e.g., ImageNet, one of the tasks in DataComp, has a training set in addition to the test set evaluated in DataComp). See Appendix C for the exact details of the target and validation sets, the precise hyperparameters used with Algorithm 1, and a discussion on scalability (including further engineering details on executing our algorithm efficiently)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.547, + 0.884, + 0.609 + ], + "angle": 0, + "content": "Results. MGD greatly outperforms the current state-of-the-art: the difference in accuracy between MGD and the current best method is roughly as large as the difference between the previous state-of-the-art (EcoDatum [Eco24]) and training on randomly chosen data (cf. Figure 6). Inspecting scores over the course of the optimization in Figure 6, we find that only a few steps are necessary to outperform previous methods." + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.62, + 0.477, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.496, + 0.622, + 0.857, + 0.715 + ], + "angle": 0, + "content": "
MethodScoreΔ
- - - Baseline: No filtering0.13-
- - - Best baseline from [GIF+24]0.17+0.04
- - - Previous SOTA [Eco24]0.18+0.05
- - - MGD-DS (ours)0.22+0.09
" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.789, + 0.885, + 0.852 + ], + "angle": 0, + "content": "Figure 6: MGD dataset selection greatly outperforms existing methods (improving over the previous SOTA by as much as the previous SOTA improves over no filtering at all). We compare DataComp scores for MGD (over optimization steps), training on the entire candidate pool, the best baseline originally proposed by DataComp, and the previous SOTA [Eco24]." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.51, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.142, + 0.092, + 0.481, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.495, + 0.092, + 0.882, + 0.196 + ], + "angle": 0, + "content": "
BBH [SSS+22]MMLU [HBB+20]
Acc.ΔAcc.Δ
All Data35.2%-41.2%-
■ LESS35.2%-0.0%41.8%+0.5%
■ MGD-DS36.7%+1.5%42.5%+1.3%
" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.245, + 0.885, + 0.293 + ], + "angle": 0, + "content": "Figure 7: MGD dataset selection outperforms baselines. Comparing to training on all the data: it achieves over double the margin of improvement of LESS on MMLU, and improves by \\(+1.5\\%\\) on BBH (where LESS does not improve at all). The \\(\\Delta\\) column denotes improvement over not filtering." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.316, + 0.458, + 0.334 + ], + "angle": 0, + "content": "4.2 Selecting instruction-tuning data" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.341, + 0.892, + 0.417 + ], + "angle": 0, + "content": "In our second application, we select training data for instruction fine-tuning (IFT) using the same MGD-based method detailed in Algorithm 1 of Section 4.1. As with multimodal data, training on the \"right\" post-training data (such as the \"right\" IFT data) can greatly impact deployment-time model performance [LFX+24; DJP+24; TGZ+23]. MGD improves over baselines at choosing IFT data for MMLU [HBK+21], a general knowledge task, and BBH [SSS+22], a reasoning/chain-of-thought task." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.417, + 0.886, + 0.448 + ], + "angle": 0, + "content": "To overview this section: we start by detailing the setting, then describe the specifics of our MGD instantiation before concluding with results." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.466, + 0.884, + 0.573 + ], + "angle": 0, + "content": "Setting. We adopt the setting of LESS [XMG+24]. Here, the goal is to select a training data subset from four combined IFT datasets (Flan V2 [LHV+23], CoT [WWS+22], DOLLY [CHM+23], and Open Assistant 1 [KKR+24]) to maximize accuracy on a given target task. We consider two target tasks from LESS: MMLU (which comprises multiple choice questions spanning a variety of disciplines) and BBH (a 23 task subset of BIG-Bench [SRR+22]). In this setup, the data selector can access samples from each task built from the in-context learning prompts. Following Xia et al. [XMG+24], we fine-tune a 128-width LoRA [HY20] (in our work, on Gemma-2B [TMH+24]). See Appendix D for full details on the tasks and learning algorithm." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.591, + 0.884, + 0.653 + ], + "angle": 0, + "content": "Method. We split up the available task samples into two sets—a \"target\" set and a \"validation\" set—then select data with MGD (via Algorithm 1) by minimizing causal language modeling loss on the \"target\" set of samples. We select hyperparameters like step size and number of SGD iterations with the validation set; see Appendix D for more details." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.67, + 0.884, + 0.76 + ], + "angle": 0, + "content": "Results. Comparing with two baselines—training on all the data and training with data selected with LESS [XMG+24]—MGD yields strictly better training dataset selections for each target task (cf. Figure 7). MGD improves most on BBH, a reasoning task, compared to the best baseline \\((+1.5\\%)\\) accuracy). On MMLU, a knowledge-based task, we outperform baselines by slightly less compared to the best baseline \\((+0.8\\%)\\); one explanation is that selecting IFT data lends more control over reasoning than over intrinsic knowledge available in the LM." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.762, + 0.884, + 0.837 + ], + "angle": 0, + "content": "Beyond raw accuracy, we inspect losses across each step of the optimization process. Overall, our method improves validation loss over MGD steps (cf. Appendix Figures 13), but also exhibits signs of overfitting. Given intuition from overparameterized learning, we might expect this behavior: we optimize a total of 270,679 \"weights\"—each corresponding to a count for a datapoint—to minimize loss on only a handful of test samples (cf. Table 3)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.857, + 0.557, + 0.876 + ], + "angle": 0, + "content": "4.3 Accuracy-degrading (Huber) data poisoning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.882, + 0.884, + 0.914 + ], + "angle": 0, + "content": "The goal of an accuracy-degrading data poisoning attack is to degrade the performance of a machine learning model by corrupting a small fraction of its training data. Here, the considered threat model is as follows." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.091, + 0.881, + 0.151 + ], + "angle": 0, + "content": "The attacker is given a training set \\(\\mathbf{X} = \\{x_{1},\\dots,x_{n}\\}\\) drawn from a distribution \\(P\\), and a function \\(\\theta (\\cdot)\\) mapping training data to model parameters (representing the learning algorithm used by the victim). The attacker's goal is to return a new training set \\(\\mathbf{X}'\\) that differs from \\(\\mathbf{X}\\) in at most \\(\\varepsilon \\cdot n\\) datapoints while inducing model parameters \\(\\theta (\\mathbf{X}')\\) that perform as poorly as possible on a freshly drawn test set \\(T\\) from \\(P\\)." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.152, + 0.678, + 0.168 + ], + "angle": 0, + "content": "Formally, the adversary aims to solve the following optimization problem:" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.178, + 0.884, + 0.207 + ], + "angle": 0, + "content": "\\[\n\\arg \\max _ {\\tilde {x} _ {1}, \\dots , \\tilde {x} _ {n _ {p}}} \\mathbb {E} _ {x \\sim P} [ \\ell (x; \\theta (\\mathbf {X} ^ {\\prime})) ], \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.218, + 0.884, + 0.295 + ], + "angle": 0, + "content": "where \\(\\mathbf{X}^{\\prime} = \\{\\widetilde{x}_1,\\dots ,\\widetilde{x}_{n_p},x_{n_p + 1},\\dots ,x_n\\}\\) and \\(n_p = \\lfloor \\varepsilon n\\rfloor\\) . Note that our goal is to degrade the overall model performance on a test set \\(\\mathbf{X}_{test}\\) drawn from \\(P\\) (in particular, the test set \\(\\mathbf{X}_{test}\\) is unknown to the adversary). In this way, this setting resembles the Huber contamination model in statistics [Hub64], and is strictly more challenging than the usual data poisoning settings in deep learning (e.g., backdoor attacks [GDG17] or attacks that target specific test examples [KL17])." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.295, + 0.884, + 0.386 + ], + "angle": 0, + "content": "For large-scale machine learning models, finding strong adversaries has proven challenging—standard loss-minimizing learning algorithms seem quite robust to maliciously-inserted data [LKY23]. In fact, the first non-trivial accuracy degradation data poisoning attacks on deep models were pioneered by Lu et al. [LKY22] and later improved upon by the same set of authors [LKY23]. Broadly speaking, even constructing attacks that degrade the overall performance of a learning algorithm by more than the adversarial budget \\(\\varepsilon\\) has proven challenging." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.404, + 0.212, + 0.42 + ], + "angle": 0, + "content": "4.3.1 Setup" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.428, + 0.884, + 0.49 + ], + "angle": 0, + "content": "We observe that (9) is a continuous optimization problem to which we can directly apply our metagradient framework, approximating the expectation over \\(P\\) by a finite-sample average over a validation set \\(\\mathbf{X}_{val}\\). In particular, given a (randomly shuffled) training set \\(\\mathbf{X}\\) and validation set \\(\\mathbf{X}_{val}\\), we set up the following metaparameter optimization problem (see Section 2.1):" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.498, + 0.668, + 0.515 + ], + "angle": 0, + "content": "(a) the metaparameter \\(\\mathbf{z} \\in \\mathcal{X}^{n_p}\\) is a tensor of \\(n_p = \\lfloor \\varepsilon n \\rfloor\\) poisoned samples;" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.524, + 0.883, + 0.557 + ], + "angle": 0, + "content": "(b) the algorithm \\(\\mathcal{A}\\) maps metaparameters \\(\\mathbf{z}\\) to a trained model \\(\\mathcal{A}(\\mathbf{z})\\) by replacing the first \\(n_p\\) samples in \\(\\mathbf{X}\\) with the samples in \\(\\mathbf{z}\\) and then training on the resulting dataset;" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.566, + 0.663, + 0.581 + ], + "angle": 0, + "content": "(c) the output function \\(\\phi\\) evaluates average loss on the validation set \\(\\mathbf{X}_{val}\\)." + }, + { + "type": "list", + "bbox": [ + 0.125, + 0.498, + 0.883, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.6, + 0.245, + 0.615 + ], + "angle": 0, + "content": "4.3.2 Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.624, + 0.884, + 0.675 + ], + "angle": 0, + "content": "To apply our first-order methods to this problem, we start by initializing the poisoned data to be exactly the first \\( n_p \\) samples in \\( \\mathbf{X} \\), \\( \\mathbf{z}^{(0)} \\coloneqq \\{\\widetilde{x}_i^{(0)} = x_i : i \\in [n_p]\\} \\). Then, for \\( t = 1, \\dots, T \\), we sample a minibatch \\( \\mathbf{X}_{val}^{(t)} \\) from \\( \\mathbf{X}_{val} \\) and use REPLAY to compute the metagradient" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.685, + 0.622, + 0.741 + ], + "angle": 0, + "content": "\\[\n\\mathbf{g}_{t} = \\frac{d}{d\\mathbf{z}}\\left(\\sum_{x\\in \\mathbf{X}_{val}^{(t)}}\\ell (x;\\mathcal{A}(\\mathbf{z}^{(t - 1)}))\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.751, + 0.579, + 0.768 + ], + "angle": 0, + "content": "and update the poisoned data using (projected) gradient ascent:" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.778, + 0.622, + 0.804 + ], + "angle": 0, + "content": "\\[\n\\mathbf {z} ^ {(t)} = \\Pi_ {\\mathcal {X}} \\left(\\mathbf {z} ^ {(t - 1)} + \\eta \\cdot \\operatorname {s i g n} (\\mathbf {g} _ {t})\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.814, + 0.884, + 0.846 + ], + "angle": 0, + "content": "where \\(\\Pi_{\\mathcal{X}}\\) is the projection operator onto the sample space \\(\\mathcal{X}\\). (For example, when \\(\\mathcal{X}\\) is the space of image-label pairs, \\(\\Pi_{\\mathcal{X}}\\) clips images' pixel values to [0, 1] and ensures labels are valid probability distributions.)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.13, + 0.854, + 0.805, + 0.869 + ], + "angle": 0, + "content": "In principle, the adversary can also decide which samples to poison, but for simplicity we consider this \"fixed\" case." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.087, + 0.885, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.175, + 0.704, + 0.192 + ], + "angle": 0, + "content": "Figure 8: Examples of poisoned images from Section 4.3." + }, + { + "type": "image", + "bbox": [ + 0.13, + 0.205, + 0.485, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.489, + 0.204, + 0.888, + 0.304 + ], + "angle": 0, + "content": "
ModelAcc.Δ
- - Original model92.0%-
- - GradCancel [LKY23]91.2%-0.80%
- MGD-DP (ours)78.1%-13.9%
1-layer NN (for reference) [CNL11]83.3%-8.7%
" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.375, + 0.884, + 0.455 + ], + "angle": 0, + "content": "Figure 9: For each iteration of MGD (\\(x\\)-axis), we train a new model from random initialization on a randomly shuffled training set with the current iterate of poisoned data injected. We evaluate the test accuracy (\\(y\\)-axis), and use REPLAY to compute the metagradient. MGD outperforms the best known attack [LKY23] by an order of magnitude and (for reference) results in a model that has the same accuracy as a single-layer neural network trained on random image features [CNL11]." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.478, + 0.248, + 0.492 + ], + "angle": 0, + "content": "4.3.3 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.501, + 0.884, + 0.561 + ], + "angle": 0, + "content": "We use the CIFAR-10 dataset which consists of 60,000 total images each labeled as one of 10 classes. We partition the data into 40,000 training examples, 10,000 validation examples, and 10,000 test examples. We consider a simple 12-epoch CIFAR-10 training procedure, which reaches \\(92.4\\%\\) accuracy on the CIFAR-10 test set when applied to the 40,000 training examples. See Appendix E for training hyperparameters." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.563, + 0.884, + 0.655 + ], + "angle": 0, + "content": "As described above, we allow the adversary to modify (in-place) a fixed, \\(\\varepsilon\\)-fraction of the training data (in our case, \\(2.5\\%\\)) subject to the constraint that the poisoned images still lay in the valid (normalized) image range of [0, 1]. We compare our approach—direct optimization of the data poisoning objective using metagratings—to the state-of-the-art \"Gradient Cancelling\" (GradCancel) method of Lu et al. [LKY23]. In short, GradCancel is a two-step method which first finds a poorly performing model, then finds poisoned data that induces this model as a minimizer of the training loss. We present the full method in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.672, + 0.884, + 0.765 + ], + "angle": 0, + "content": "Results. We find that metagradients enable state-of-the-art data poisoning attacks, degrading accuracy by \\(14\\%\\). In particular, when allowed to corrupt 1000 of the 40,000 training samples \\((2.5\\%)\\), our method reduces test set accuracy to \\(78\\%\\) — for reference, the accuracy of a single-layer neural networked trained on the unmodified CIFAR-10 training set is \\(83\\%\\). The strongest existing data poisoning attack, GradCancel, only reduces test set accuracy by less than \\(1\\%\\). In Figure 8, we visualize the poisoned images and labels found by our method. In Figure 9, we visualize the minibatch loss at each step of the optimization process." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.772, + 0.885, + 0.893 + ], + "angle": 0, + "content": "Remark 4 (Poisoning non-smooth learning algorithms). Recall that to apply metagradient descent, we alter the learning algorithm \\(\\mathcal{A}\\) to be metasmooth (see Section 3.1). This involves making modifications such as switching out max pooling layers for average pooling layers, moving batch normalization layers before activations, and scaling down the last layer's output by a factor of 10. It is natural to ask: how much does the efficacy of our method depend on this smoothness? After all, in practice the adversary cannot control the learning algorithm. To answer this question, we take the poison samples generated by MGD and insert them into the training set of a corresponding standard (i.e., non-metasmooth) learning algorithm. We find that our method still significantly degrades the performance of the model, from \\(92.8\\%\\) to \\(82.6\\%\\) (a drop of \\(10.2\\%\\))." + }, + { + "type": "page_footnote", + "bbox": [ + 0.13, + 0.898, + 0.883, + 0.913 + ], + "angle": 0, + "content": "2Lu et al. [LKY23] report a larger drop; the discrepancy is due to our constraint that poisoned data are valid bounded RGB images." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.154, + 0.087, + 0.614, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.089, + 0.846, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.27, + 0.885, + 0.318 + ], + "angle": 0, + "content": "Figure 10: Target and test accuracies of MGD's learning rate schedule over time closely match or exceed those found by a grid search over hundreds of combinations of hyperparameters. \\(95\\%\\) confidence intervals are plotted for MGD's results." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.341, + 0.452, + 0.361 + ], + "angle": 0, + "content": "4.4 Finding a learning rate schedule" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.367, + 0.884, + 0.429 + ], + "angle": 0, + "content": "As a final application, we optimize the learning rate schedule of stochastic gradient descent (SGD) for training a CIFAR-10 classifier. By following the metagradients with respect to the learning rate at each step of training, our procedure matches grid searching over standard learning rate schedules—despite starting with naive hyperparameters (a flat learning rate)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.429, + 0.884, + 0.504 + ], + "angle": 0, + "content": "Unlike the other applications discussed here, metagradients do not unlock state-of-the-art performance. Instead, we discuss this application to illustrate the flexibility of REPLAY, and in particular its ability to optimize metaparameters that do not directly affect the loss landscape (i.e., that only affect the model via the optimization trajectory). As we discuss in Section 6, approximate metagradient estimators cannot apply to these metaparameters." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.522, + 0.222, + 0.538 + ], + "angle": 0, + "content": "4.4.1 Setting" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.546, + 0.884, + 0.579 + ], + "angle": 0, + "content": "To put learning rate schedule optimization into the metagradient framework, we parameterize a schedule as a vector \\(\\eta \\in \\mathbb{R}^k\\) comprising \\(k\\) evenly-spaced keypoints, so that the learning rate at iteration \\(t\\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.588, + 0.884, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\eta (t) = \\eta_ {\\lfloor k t / T \\rfloor} + \\frac {k t / T - \\lfloor k t / T \\rfloor}{\\lceil k t / T \\rceil - \\lfloor k t / T \\rfloor} \\left(\\eta_ {\\lceil k t / T \\rceil} - \\eta_ {\\lfloor k t / T \\rfloor}\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.632, + 0.471, + 0.649 + ], + "angle": 0, + "content": "i.e., a linear interpolation between the keypoints." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.656, + 0.537, + 0.674 + ], + "angle": 0, + "content": "(a) the metaparameter \\(\\eta \\in \\mathbb{R}^k\\) is a vector of \\(k\\) keypoints;" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.682, + 0.884, + 0.713 + ], + "angle": 0, + "content": "(b) the algorithm \\(\\mathcal{A}\\) maps metaparameters \\(\\eta\\) to a trained model \\(\\mathcal{A}(\\eta)\\) by training a model for \\(T\\) iterations with the learning rate schedule defined by (10);" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.722, + 0.663, + 0.739 + ], + "angle": 0, + "content": "(c) the output function \\(\\phi\\) evaluates average loss on the validation set \\(\\mathbf{X}_{val}\\)." + }, + { + "type": "list", + "bbox": [ + 0.125, + 0.656, + 0.884, + 0.739 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.757, + 0.245, + 0.773 + ], + "angle": 0, + "content": "4.4.2 Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.78, + 0.884, + 0.827 + ], + "angle": 0, + "content": "Following the theme of the rest of this section, we optimize the metaparameter \\(\\eta\\) directly using MGD. In particular, we initialize the keypoints to be a flat learning rate schedule, and then update the keypoints using the metagradient with respect to the validation loss," + }, + { + "type": "equation", + "bbox": [ + 0.35, + 0.838, + 0.645, + 0.864 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\eta} ^ {(t + 1)} = \\boldsymbol {\\eta} ^ {(t)} - \\alpha \\cdot \\operatorname {s i g n} \\left(\\nabla_ {\\boldsymbol {\\eta}} \\phi (\\mathcal {A} (\\boldsymbol {\\eta} ^ {(t)}))\\right).\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.092, + 0.248, + 0.106 + ], + "angle": 0, + "content": "4.4.3 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.115, + 0.884, + 0.162 + ], + "angle": 0, + "content": "We aim to select the learning rate schedule that minimizes the expected test set loss. To do so, we reserve \\(90\\%\\) of the CIFAR-10 test set as a \"validation set\" on which we select hyperparameters. We then use the remaining \\(10\\%\\) as a test set. We compare the following two approaches:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.171, + 0.88, + 0.232 + ], + "angle": 0, + "content": "- Grid search: We construct a grid over different one cycle learning rate schedules, varying the peak learning rate, starting learning rate, ending learning rate, and peak learning rate time. In total, we consider over 1,000 different learning rate schedules. We use the reserved \\(90\\%\\) of the test set to select the best learning rate schedule from the grid." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.241, + 0.88, + 0.287 + ], + "angle": 0, + "content": "- Metagradient descent (MGD): We run 50 steps of MGD starting from a highly suboptimal flat learning rate schedule, aiming to minimize loss on the reserved \\(90\\%\\) of the test set. We use the last iteration of MGD as our learned learning rate schedule." + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.171, + 0.88, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.296, + 0.884, + 0.327 + ], + "angle": 0, + "content": "We evaluate the performance of each final learning rate schedule on the held-out \\(10\\%\\) test set and average the results over the same set of 5 unseen random seeds." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.346, + 0.884, + 0.422 + ], + "angle": 0, + "content": "Results. Comparing our learned hyperparameter schedule to grid search, as shown in Figure 10, our learned schedule using only 50 steps of MGD matches the performance of the state-of-the-art onecycle schedule found via grid search over more than 1000 configurations. An important caveat, however, is that these numbers are not directly comparable: grid search can be run in parallel across many machines, while steps of MGD must be run sequentially." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.422, + 0.884, + 0.498 + ], + "angle": 0, + "content": "In practice, we do not advise using MGD for optimizing low-dimensional hyperparameters, especially ones that have been thoroughly optimized by grid search (such as CIFAR-10 learning rate schedules [SN17; Pag18; LA19; Jor24]). Still, an interesting avenue for future work is to study the utility of MGD for optimizing high-dimensional hyperparameters that are less well-studied, such as per-parameter/layer learning rates/weight decays for language models, attention hyperparameters, or gradient preconditioners." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.522, + 0.273, + 0.54 + ], + "angle": 0, + "content": "5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.554, + 0.807, + 0.57 + ], + "angle": 0, + "content": "In this section, we first present the main limitations of our method and outline future directions." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.588, + 0.884, + 0.696 + ], + "angle": 0, + "content": "Limitations. Although REPLAY is more efficient than existing methods at computing metagradients, it is still non-trivially more expensive than simply training a model once. The main reason is that metagradients require making a backwards pass over a backwards pass. This operation necessarily requires 2-3 times the operations of a backwards pass; furthermore, our current implementation requires float32/tensorfloat32 operations. Finally, standard training operations are often made more efficient by specialized software (e.g., via FlashAttention [DFE+22]); no such software (yet) exists for backwards-over-backwards operations. Beyond computational issues, successfully applying metagradients requires smooth model training." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.713, + 0.885, + 0.851 + ], + "angle": 0, + "content": "Metasmoothness: connections and future directions. While Section 3 describes a general procedure for finding metasmooth learning algorithms, an important future direction is to further explore and understand metasmoothness. This includes, for example: (a) characterizing the relationship between metasmoothness and numerical stability (and potentially using techniques from the latter to improve the former); (b) devising improved optimizers and/or architectures that lead directly to metasmooth learning algorithms (akin to skip connections or stable initialization in architecture design); (c) formalizing connections between metasmoothness and other optimization-related phenomena in deep learning [LM20; CKL+22]. A related but separate direction is to explore the possibility of using techniques from non-smooth optimization [Cla90] to perform metagradient descent on non-metasmooth learning algorithms." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.51, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.199 + ], + "angle": 0, + "content": "**Applying metagradients.** Our methods apply to any ML task that requires optimizing with respect to a metaparameter. These include: poisoning data (generated or simply hosted on the internet) so that it cannot be trained on without permission (i.e., by maximizing training loss with respect to the text); selecting better training data at various stages of the model training lifecycle; and designing better model training routines and architectures with first-order methods. Another direction of future work lies in mitigating the computational limitations of our algorithm. Both (a) small-scale proxy-models [HBM+22; EFM24] and (b) low-hanging engineering improvements can likely make calculating metagradients much more efficient." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.222, + 0.3, + 0.24 + ], + "angle": 0, + "content": "6 Related work" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.254, + 0.644, + 0.272 + ], + "angle": 0, + "content": "We overview previous work on calculating and applying meta-gradients." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.289, + 0.398, + 0.308 + ], + "angle": 0, + "content": "6.1 Calculating metagradients" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.314, + 0.886, + 0.361 + ], + "angle": 0, + "content": "Previous work estimates the metagradient for large-scale models via one of two broad families of methods: implicit differentiation and automatic (explicit) differentiation. Note that in previous literature, synonyms for metagradient include \"hyper-gradient\" and \"outer gradient.\"" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.378, + 0.884, + 0.425 + ], + "angle": 0, + "content": "Implicit differentiation. One family of methods aims to approximate the metagradient. To illustrate the idea behind such approaches, suppose that the learning algorithm \\(\\mathcal{A}\\) returns a model state \\(\\theta\\) that minimizes a strongly convex loss function \\(\\mathcal{L}(z,\\theta)\\). Here, the implicit function theorem tells us that" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.434, + 0.887, + 0.537 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {z} f (z) = \\overbrace {\\left(\\frac {d \\phi}{d \\theta} \\right| _ {\\theta = \\mathcal {A} (z)} ^ {\\text {w r t . f i n a l p a r a m s}} \\underbrace {\\left. \\left(\\frac {\\partial^ {2} \\mathcal {L} (z , \\theta)}{\\partial \\theta^ {2}} \\right| _ {\\theta = \\mathcal {A} (z)}\\right) ^ {- 1}} _ {p \\times p \\text {i n v e r s e H e s s i a n o f l o s s w r t . f i n a l p a r a m s}} ^ {1 \\times p \\text {g r a d i e n t o f o u t p u t w r t . f i n a l p a r a m s}} \\overbrace {\\left. \\left(\\frac {\\partial^ {2} \\mathcal {L} (z , \\theta)}{\\partial \\theta \\partial z} \\right| _ {\\theta = \\mathcal {A} (z)}\\right) ^ {- 1}} ^ {p \\times n \\text {J a c o b i a n o f l o s s g r a d i e n t w r t . m e t a p a r a m e t e r s}}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.546, + 0.888, + 0.685 + ], + "angle": 0, + "content": "The form of (11) yields efficient and accurate estimators for metagradients of models learned by minimizing a strongly convex loss [BKB+20; BKM+22; KDJ20; BBC+22; SGB+22]. Such approaches can extend to estimate metagradients of large-scale, non-convex learning algorithms [Ben00; KL17; RFK+19; FAL17; LVD20; CH20; BNL+22], but lose any correctness guarantees. Indeed, applying this class of methods in large-scale settings is challenging as doing so requires (a) assuming conditions on the learning algorithm (e.g., Hessian invertibility, continuous differentiability) and (b) efficiently approximating the inverse Hessian (in practice, typically at the cost of estimate accuracy). Finally, implicit function-based approaches are fundamentally limited in that they can only differentiate with respect to metaparameters expressed in the loss function (e.g., these methods can differentiate with respect to the weight decay, but not learning rate)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.701, + 0.886, + 0.913 + ], + "angle": 0, + "content": "Automatic (explicit) differentiation. Beyond implicit differentiation approaches, there is a long line of work on directly calculating metagradients with AD (see Section 2). Previous work has used AD to estimate metagradients of learning algorithms ranging from those with convex objectives to small neural networks [HNM19; MDA15; FDF+17; MS21; ZSP+21; CXR+22; SGB+22]. As detailed in Section 2, the primary challenge with (reverse-mode) AD-based approaches to meta-differentiation is storing the intermediate products required for the backward pass. To circumvent this challenge, previous work either (a) only considers settings that are small enough that is possible to differentiate while requiring space that is linear in the number of iterations (i.e., 2 layer networks on MNIST), (b) uses forward-mode AD [FDF+17; MS21; CXR+22] (which requires no extra storage at the cost of additional compute that scales linearly with metaparameter dimension), (c) only approximates the metagradient by calculating over only a few training steps [LSY18; CH20; FAL17], or uses (d) a reversible learning algorithm [MDA15]. The fourth category is a promising direction for reducing space requirements when computing large-scale metagradients, but current approaches require (a) representing model parameters in a fixed-precision format (which current large-scale learning algorithms do not support) in addition to restricting the algorithm to be reversible (e.g.," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.887, + 0.157 + ], + "angle": 0, + "content": "SGD and standard GD do not qualify). A common thread is that algorithms computing metagradient with AD often suffer from numerical instability and overflow issues [MS21; SGB+22]. In relation to previous work on AD, REPLAY (Section 2) can be seen as a strategy for choosing gradient checkpointing [CAC+81; BCT92; ZP00; GW08; CXZ+16] locations in the compute graph (an NP-complete task in general [Nau08])." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.172, + 0.383, + 0.192 + ], + "angle": 0, + "content": "6.2 Applying metagradients" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.198, + 0.888, + 0.306 + ], + "angle": 0, + "content": "Previous work applies metagradients to optimize training setup, including distillation [MDA15; LVD20], training data selection [HNM19; EFM24], meta-learning [FAL17; RFK+19; HAM+21], learning rate/weight decay selection [MS21; CXR+22], tuning data augmentation [LVD20], and architecture search [MDA15; LSY18; ZSP+21]. Beyond optimizing metagradients, methods in data attribution apply metagradients to (Taylor) estimate the effect of dropping training data on model predictions [KL17; GBA+23; PGI+23]. To the Previous works either (a) calculate metagradients directly with AD (made feasible by working in a very small-scale learning setting) or (b) estimate the metagradient with an implicit function-based approach." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.328, + 0.279, + 0.347 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.36, + 0.889, + 0.483 + ], + "angle": 0, + "content": "In this work we add metagradients to the large-scale machine learning toolkit. To do so, we overcome two challenges: (a) calculating metagradients at scale and (b) modifying learning algorithms to be metasmooth—i.e., to admit metagradients that locally predict model behavior. We then successfully calculate and apply metagradients for large-scale models (up to 2B parameters) to select data for CLIP pretraining and instruction fine-tuning, to (Huber) poison training data to decrease overall model accuracy, and search for high-dimensional hyperparameters (per-iteration learning rates). Given the successful applications of metagradients in these settings, we are excited to see what unlocking metagradients enables in other areas of machine learning." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.506, + 0.37, + 0.527 + ], + "angle": 0, + "content": "8 Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.538, + 0.886, + 0.614 + ], + "angle": 0, + "content": "Work supported in part by the NSF grant DMS-2134108 and Open Philanthropy, and in part by NSF Grant No. 2346519. This work is also supported in part by the Alan Turing Institute, and the U.S. Department of Energy. The authors would like to thank Alex Damian, Harshay Shah, Jesse Michel, Joel Flynn, Manolis Zampetakis, Noah Moroze, Piotr Indyk, Sam Hopkins, Sung Min (Sam) Park, and Sarah Cen for helpful references as well as discussions and feedback on early versions of this work." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.511, + 0.949 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.088, + 0.238, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.12, + 0.953, + 0.167 + ], + "angle": 0, + "content": "[ATS+23] Amro Abbas, Kushal Tirumala, Dániel Simig, Surya Ganguli, and Ari S Morcos. \"SemDeDup: Data-efficient learning at web-scale through semantic dedduplication\". In: arXiv preprint arXiv:2303.09540 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.171, + 0.885, + 0.202 + ], + "angle": 0, + "content": "[BAC+21] Sara Beery, Arushi Agarwal, Elijah Cole, and Vighnesh Birodkar. \"The iWildCam 2021 competition dataset\". In: arXiv preprint arXiv:2105.03494. 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.205, + 0.885, + 0.254 + ], + "angle": 0, + "content": "[BBC+22] Mathieu Blondel, Quentin Berthet, Marco Cuturi, Roy Frostig, Stephan Hoyer, Felipe Llinares-López, Fabian Pedregosa, and Jean-Philippe Vert. \"Efficient and modular implicit differentiation\". In: Advances in neural information processing systems 35 (2022), pp. 5230-5242." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.257, + 0.897, + 0.303 + ], + "angle": 0, + "content": "[BBY+22] Yonatan Bitton, Nitzan Bitton Guetta, Ron Yosef, Yuval Elovici, Mohit Bansal, Gabriel Stanovsky, and Roy Schwartz. \"WinoGAViL: Gamified association benchmark to challenge vision-and-language models\". In: Advances in Neural Information Processing Systems. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.307, + 0.885, + 0.354 + ], + "angle": 0, + "content": "[BCT92] Preston Briggs, Keith D Cooper, and Linda Torczon. \"Rematerialization\". In: Proceedings of the ACM SIGPLAN 1992 conference on Programming language design and implementation. 1992, pp. 311-321." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.357, + 0.885, + 0.388 + ], + "angle": 0, + "content": "[Ben00] Yoshua Bengio. \"Gradient-based optimization of hyperparameters\". In: Neural computation 12.8 (2000), pp. 1889-1900." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.392, + 0.89, + 0.455 + ], + "angle": 0, + "content": "[BGM+18] Peter Bandi, Oscar Geessink, Quirine Manson, Marcory Van Dijk, Maschenka Balkenhol, Meyke Hermsen, Babak Ehteshami Bejnordi, Byungjae Lee, Kyunghyun Paeng, Aoxiao Zhong, et al. \"From detection of individual metastases to classification of lymph node status at the patient level: the CAMELYON17 challenge\". In: IEEE Transactions on Medical Imaging (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.458, + 0.885, + 0.489 + ], + "angle": 0, + "content": "[BGV14] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. \"Food-101-mining discriminative components with random forests\". In: European conference on computer vision. 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.493, + 0.885, + 0.54 + ], + "angle": 0, + "content": "[BKB+20] Quentin Bertrand, Quentin Klopfenstein, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. \"Implicit differentiation of lasso-type models for hyperparameter optimization\". In: International Conference on Machine Learning. PMLR. 2020, pp. 810-821." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.543, + 0.885, + 0.605 + ], + "angle": 0, + "content": "[BKM+22] Quentin Bertrand, Quentin Klopfenstein, Mathurin Massias, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. \"Implicit differentiation for fast hyperparameter selection in non-smooth convex learning\". In: Journal of Machine Learning Research 23.149 (2022), pp. 1-43." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.609, + 0.885, + 0.668 + ], + "angle": 0, + "content": "[BMA+19] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. \"ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models\". In: Neural Information Processing Systems (NeurIPS). 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.674, + 0.885, + 0.705 + ], + "angle": 0, + "content": "[BNL+22] Juhan Bae, Nathan Ng, Alston Lo, Marzyeh Ghassemi, and Roger Grosse. \"If Influence Functions are the Answer, Then What is the Question?\" In: ArXiv preprint arXiv:2209.05364. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.709, + 0.885, + 0.74 + ], + "angle": 0, + "content": "[BSF94] Yoshua Bengio, Patrice Simard, and Paolo Frasconi. \"Learning long-term dependencies with gradient descent is difficult\". In: IEEE Transactions on Neural Networks. 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.744, + 0.885, + 0.789 + ], + "angle": 0, + "content": "[CAC+81] Gregory J Chaitin, Marc A Auslander, Ashok K Chandra, John Cocke, Martin E Hopkins, and Peter W Markstein. \"Register allocation via coloring\". In: Computer languages 6.1 (1981), pp. 47-57." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.794, + 0.885, + 0.841 + ], + "angle": 0, + "content": "[CFW+18] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. \"Functional Map of the World\". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). June 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.845, + 0.918, + 0.89 + ], + "angle": 0, + "content": "[CH20] Xiangning Chen and Cho-Jui Hsieh. \"Stabilizing differentiable architecture search via perturbation-based regularization\". In: International conference on machine learning. PMLR. 2020, pp. 1554-1565." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.12, + 0.918, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.09, + 0.885, + 0.123 + ], + "angle": 0, + "content": "[CHL17] Gong Cheng, Junwei Han, and Xiaoqiang Lu. \"Remote sensing image scene classification: Benchmark and state of the art\". In: Proceedings of the IEEE. 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.126, + 0.89, + 0.188 + ], + "angle": 0, + "content": "[CHM+23] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free Dolly: Introducing the World's First Truly Open Instruction-Tuned LLM. 2023. URL: https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm (visited on 06/30/2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.192, + 0.888, + 0.238 + ], + "angle": 0, + "content": "[CKL+22] Jeremy M. Cohen, Simran Kaur, Yuanzhi Li, J. Zico Kolter, and Ameet Talwalkar. Gradient Descent on Neural Networks Typically Occurs at the Edge of Stability. 2022. arXiv: 2103.00065 [cs.LG]. URL: https://arxiv.org/abs/2103.00065." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.242, + 0.682, + 0.259 + ], + "angle": 0, + "content": "[Cla90] Frank H Clarke. Optimization and nonsmooth analysis. SIAM, 1990." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.263, + 0.884, + 0.309 + ], + "angle": 0, + "content": "[CMK+14] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. \"Describing textures in the wild\". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.313, + 0.884, + 0.358 + ], + "angle": 0, + "content": "[CNL11] Adam Coates, Andrew Ng, and Honglak Lee. \"An analysis of single-layer networks in unsupervised feature learning\". In: Proceedings of the fourteenth international conference on artificial intelligence and statistics. 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.363, + 0.884, + 0.407 + ], + "angle": 0, + "content": "[CXR+22] Kartik Chandra, Audrey Xie, Jonathan Ragan-Kelley, and Erik Meijer. \"Gradient descent: The ultimate optimizer\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 8214-8225." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.414, + 0.884, + 0.46 + ], + "angle": 0, + "content": "[ CXZ+16] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin. \"Training Deep Nets with Sublinear Memory Cost\". In: CoRR abs/1604.06174 (2016). arXiv: 1604.06174. URL: http://arxiv.org/abs/1604.06174." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.464, + 0.884, + 0.496 + ], + "angle": 0, + "content": "[DDS+09] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. \"Imagenet: A large-scale hierarchical image database\". In: Computer Vision and Pattern Recognition (CVPR). 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.499, + 0.884, + 0.546 + ], + "angle": 0, + "content": "[DFE+22] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. 2022. arXiv: 2205.14135 [cs.LG]. URL: https://arxiv.org/abs/2205.14135." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.55, + 0.884, + 0.595 + ], + "angle": 0, + "content": "[DJP+24] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. \"The llama 3 herd of models\". In: arXiv preprint arXiv:2407.21783 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.6, + 0.884, + 0.63 + ], + "angle": 0, + "content": "[ Eco24] Team EcoDatum. EcoDatum DataComp-small submission. https://www.datacomp.ai/dcclip/leaderboard.html. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.635, + 0.884, + 0.665 + ], + "angle": 0, + "content": "[EFM24] Logan Engstrom, Axel Feldmann, and Aleksander Madry. \"DsDm: Model-Aware Dataset Selection with Datamodels\". In: 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.67, + 0.884, + 0.702 + ], + "angle": 0, + "content": "[EVW+10] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. \"The Pascal Visual Object Classes (VOC) Challenge\". In: International Journal of Computer Vision. 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.706, + 0.884, + 0.75 + ], + "angle": 0, + "content": "[FAL17] Chelsea Finn, Pieter Abbeel, and Sergey Levine. \"Model-agnostic meta-learning for fast adaptation of deep networks\". In: International conference on machine learning. PMLR. 2017, pp. 1126-1135." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.755, + 0.884, + 0.802 + ], + "angle": 0, + "content": "[FDF+17] Luca Franceschi, Michele Donini, Paolo Frasconi, and Massimiliano Pontil. \"Forward and reverse gradient-based hyperparameter optimization\". In: International Conference on Machine Learning (ICML). 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.807, + 0.884, + 0.853 + ], + "angle": 0, + "content": "[FFP04] Li Fei-Fei, Rob Fergus, and Pietro Perona. \"Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories\". In: 2004 conference on computer vision and pattern recognition workshop. IEEE. 2004, pp. 178-178." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.857, + 0.884, + 0.903 + ], + "angle": 0, + "content": "[FIW+22] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. \"Data Determines Distributional Robustness in Contrastive Language Image Pre-training (CLIP)\". In: ICML. 2022." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.09, + 0.89, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.09, + 0.885, + 0.138 + ], + "angle": 0, + "content": "[GBA+23] Roger Grosse, Juhan Bae, Cem Anil, Nelson Elhage, Alex Tamkin, Amirhossein Tajdini, Benoit Steiner, Dustin Li, Esin Durmus, Ethan Perez, et al. \"Studying large language model generalization with influence functions\". In: arXiv preprint arXiv:2308.03296 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.141, + 0.885, + 0.173 + ], + "angle": 0, + "content": "[GDG17] Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. \"Badnets: Identifying Vulnerabilities in the Machine Learning Model Supply Chain\". In: arXiv preprint arXiv:1708.06733 (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.175, + 0.885, + 0.238 + ], + "angle": 0, + "content": "[GIF+24] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. \"DataComp: In search of the next generation of multimodal datasets\". In: Advances in Neural Information Processing Systems. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.241, + 0.885, + 0.288 + ], + "angle": 0, + "content": "[GLU12] Andreas Geiger, Philip Lenz, and Raquel Urtasun. \"Are we ready for autonomous driving? The KITTI vision benchmark suite\". In: 2012 IEEE conference on computer vision and pattern recognition. 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.291, + 0.885, + 0.322 + ], + "angle": 0, + "content": "[GW08] Andreas Griewank and Andrea Walther. Evaluating derivatives: principles and techniques of algorithmic differentiation. SIAM, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.326, + 0.885, + 0.373 + ], + "angle": 0, + "content": "[HAM+21] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey. \"Meta-learning in neural networks: A survey\". In: IEEE transactions on pattern analysis and machine intelligence 44.9 (2021), pp. 5149-5169." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.375, + 0.885, + 0.422 + ], + "angle": 0, + "content": "[HBB+20] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. \"Measuring massive multitask language understanding\". In: arXiv preprint arXiv:2009.03300 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.426, + 0.885, + 0.473 + ], + "angle": 0, + "content": "[HBD+19] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. \"EuroSAT: A novel dataset and deep learning benchmark for land use and land cover classification\". In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing. 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.476, + 0.905, + 0.523 + ], + "angle": 0, + "content": "[HBK+21] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. \"Measuring Mathematical Problem Solving With the MATH Dataset\". In: NeurIPS (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.526, + 0.885, + 0.587 + ], + "angle": 0, + "content": "[HBM+20] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization. 2020. arXiv: 2006.16241 [cs.CV]." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.591, + 0.885, + 0.638 + ], + "angle": 0, + "content": "[HBM+22] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. \"Training compute-optimal large language models\". In: arXiv preprint arXiv:2203.15556. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.641, + 0.885, + 0.673 + ], + "angle": 0, + "content": "[HNM19] Satoshi Hara, Atsushi Nitanda, and Takanori Maehara. \"Data cleansing for models trained with SGD\". In: Advances in Neural Information Processing Systems 32 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.676, + 0.885, + 0.707 + ], + "angle": 0, + "content": "[Hub64] Peter J. Huber. \"Robust estimation of a location parameter\". In: The Annals of Mathematical Statistics. 1964." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.711, + 0.885, + 0.744 + ], + "angle": 0, + "content": "[HY20] Jiaoyang Huang and Horng-Tzer Yau. \"Dynamics of Deep Neural Networks and Neural Tangent Hierarchy\". In: Proceedings of the 37th International Conference on Machine Learning. 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.746, + 0.885, + 0.778 + ], + "angle": 0, + "content": "[HZB+19] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. \"Natural adversarial examples\". In: arXiv preprint arXiv:1907.07174 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.781, + 0.885, + 0.841 + ], + "angle": 0, + "content": "[JHV+17] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. \"CLEVR: A diagnostic dataset for compositional language and elementary visual reasoning\". In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.846, + 0.885, + 0.864 + ], + "angle": 0, + "content": "[Kor24] Keller Jordan. \"94 percent on CIFAR-10 in 3.29 Seconds on a Single GPU\". In: (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.867, + 0.9, + 0.914 + ], + "angle": 0, + "content": "[JS08] Yaochu Jin and Bernhard Sendhoff. \"Pareto-based multiobjective machine learning: An overview and case studies\". In: IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews) 38.3 (2008), pp. 397-415." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.09, + 0.905, + 0.914 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.09, + 0.885, + 0.123 + ], + "angle": 0, + "content": "[KB15] Diederik P. Kingma and Jimmy Ba. \"Adam: A Method for Stochastic Optimization\". In: International Conference on Learning Representations (ICLR). 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.126, + 0.885, + 0.158 + ], + "angle": 0, + "content": "[KDJ20] MJ Zico Kolter, David Duvenaud, and Matt Johnson. \"Deep implicit layers-neural odes, deep equilibrium models, and beyond, 2020\". In: NeurIPS Tutorial (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.161, + 0.885, + 0.224 + ], + "angle": 0, + "content": "[KKR+24] Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi Rui Tam, Keith Stevens, Abdullah Barhoum, Duc Nguyen, Oliver Stanley, Richard Nagyfi, et al. \"Open-assistant conversations-democratizing large language model alignment\". In: Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.226, + 0.885, + 0.259 + ], + "angle": 0, + "content": "[Pang Wei Koh and Percy Liang. \"Understanding Black-box Predictions via Influence Functions\". In: International Conference on Machine Learning. 2017.]" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.261, + 0.885, + 0.294 + ], + "angle": 0, + "content": "[Kri09] Alex Krizhevsky. \"Learning Multiple Layers of Features from Tiny Images\". In: Technical report. 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.297, + 0.885, + 0.343 + ], + "angle": 0, + "content": "[KSD+13] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. \"3d object representations for fine-grained categorization\". In: Proceedings of the IEEE international conference on computer vision workshops. 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.346, + 0.885, + 0.409 + ], + "angle": 0, + "content": "[KSM+20] Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsubramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Sara Beery, et al. \"WILDS: A Benchmark of in-the-Wild Distribution Shifts\". In: arXiv preprint arXiv:2012.07421 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.412, + 0.874, + 0.43 + ], + "angle": 0, + "content": "[LA19] Zhiyuan Li and Sanjeev Arora. An Exponential Learning Rate Schedule for Deep Learning. 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.433, + 0.818, + 0.45 + ], + "angle": 0, + "content": "[LeC98] Yann LeCun. \"The MNIST database of handwritten digits\". In: Technical report. 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.453, + 0.885, + 0.5 + ], + "angle": 0, + "content": "[LFX+24] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. \"Deepseek-v3 technical report\". In: arXiv preprint arXiv:2412.19437. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.503, + 0.885, + 0.565 + ], + "angle": 0, + "content": "[LHV+23] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. \"The flan collection: Designing data and methods for effective instruction tuning\". In: International Conference on Machine Learning. PMLR. 2023, pp. 22631-22648." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.568, + 0.885, + 0.601 + ], + "angle": 0, + "content": "[LIE+22] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.604, + 0.885, + 0.636 + ], + "angle": 0, + "content": "[LKY22] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. \"Indiscriminate Data Poisoning Attacks on Neural Networks\". In: arXiv preprint arXiv:2204.09092 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.639, + 0.885, + 0.687 + ], + "angle": 0, + "content": "[LY23] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. \"Exploring the limits of model-targeted indiscriminate data poisoning attacks\". In: International Conference on Machine Learning. PMLR. 2023, pp. 22856-22879." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.689, + 0.885, + 0.722 + ], + "angle": 0, + "content": "[LM20] Guillaume Leclerc and Aleksander Madry. \"The two regimes of deep network training\". In: arXiv preprint arXiv:2002.10376. 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.724, + 0.885, + 0.771 + ], + "angle": 0, + "content": "[LMB+14] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C Lawrence Zitnick. \"Microsoft coco: Common objects in context\". In: European conference on computer vision (ECCV). 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.774, + 0.885, + 0.807 + ], + "angle": 0, + "content": "[LSY18] Hanxiao Liu, Karen Simonyan, and Yiming Yang. \"Darts: Differentiable architecture search\". In: arXiv preprint arXiv:1806.09055 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.81, + 0.885, + 0.858 + ], + "angle": 0, + "content": "[LVD20] Jonathan Lorraine, Paul Vicol, and David Duvenaud. \"Optimizing millions of hyperparameters by implicit differentiation\". In: International conference on artificial intelligence and statistics. PMLR. 2020, pp. 1540-1552." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.861, + 0.885, + 0.906 + ], + "angle": 0, + "content": "[MDA15] Dougal Maclaurin, David Duvenaud, and Ryan Adams. \"Gradient-based hyperparameter optimization through reversible learning\". In: International conference on machine learning (ICML). 2015." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.09, + 0.885, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.09, + 0.885, + 0.123 + ], + "angle": 0, + "content": "[MRK+13] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. \"Fine-grained visual classification of aircraft\". In: arXiv preprint arXiv:1306.5151 (2013)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.124, + 0.885, + 0.158 + ], + "angle": 0, + "content": "[MS21] Paul Micaelli and Amos J Storkey. \"Gradient-based hyperparameter optimization over long horizons\". In: Advances in Neural Information Processing Systems 34 (2021), pp. 10798-10809." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.159, + 0.885, + 0.193 + ], + "angle": 0, + "content": "[Nau08] Uwe Naumann. \"Optimal Jacobian accumulation is NP-complete\". In: Math. Program. 112.2 (Apr. 2008), pp. 427-441. ISSN: 0025-5610." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.194, + 0.885, + 0.242 + ], + "angle": 0, + "content": "[NWC+11] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Baolin Wu, Andrew Y Ng, et al. \"Reading digits in natural images with unsupervised feature learning\". In: NIPS workshop on deep learning and unsupervised feature learning. 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.243, + 0.885, + 0.292 + ], + "angle": 0, + "content": "[NZ08] Maria-Elena Nilsback and Andrew Zisserman. \"Automated flower classification over a large number of classes\". In: 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing. 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.293, + 0.885, + 0.326 + ], + "angle": 0, + "content": "[OR00] James M Ortega and Werner C Rheinboldt. Iterative solution of nonlinear equations in several variables. SIAM, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.328, + 0.915, + 0.36 + ], + "angle": 0, + "content": "[Pag18] David Page. CIFAR-10 Fast. GitHub Repository. Oct. 2018. URL: https://github.com/davidcpage/cifar10-fast." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.362, + 0.885, + 0.395 + ], + "angle": 0, + "content": "[Pea96] Barak A Pearlmutter. \"An investigation of the gradient descent process in neural networks\". In: PhD thesis, Carnegie Mellon University. 1996." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.397, + 0.885, + 0.431 + ], + "angle": 0, + "content": "[PGI+23] Sung Min Park, Kristian Georgiev, Andrew Ilyas, Guillaume Leclerc, and Aleksander Madry. \"TRAK: Attributing Model Behavior at Scale\". In: *Arxiv preprint arXiv:2303.14186*. 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.432, + 0.885, + 0.465 + ], + "angle": 0, + "content": "[PVZ+12] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. \"Cats and dogs\". In: 2012 IEEE conference on computer vision and pattern recognition. IEEE. 2012, pp. 3498-3505." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.467, + 0.885, + 0.53 + ], + "angle": 0, + "content": "[RDK+22] William A Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. \"The dollar street dataset: Images representing the geographic and socioeconomic diversity of the world\". In: Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.532, + 0.885, + 0.565 + ], + "angle": 0, + "content": "[RFK+19] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and Sergey Levine. \"Meta-learning with implicit gradients\". In: Advances in neural information processing systems 32 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.567, + 0.885, + 0.614 + ], + "angle": 0, + "content": "[RKH+21] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. \"Learning transferable visual models from natural language supervision\". In: arXiv preprint arXiv:2103.00020. 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.616, + 0.885, + 0.665 + ], + "angle": 0, + "content": "[RLZ+24] Vikram V Ramaswamy, Sing Yu Lin, Dora Zhao, Aaron Adcock, Laurens van der Maaten, Deepti Ghadiyaram, and Olga Russakovsky. \"GeoDE: a geographically diverse evaluation dataset for object recognition\". In: Advances in Neural Information Processing Systems. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.667, + 0.885, + 0.713 + ], + "angle": 0, + "content": "[RRS+19] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. \"Do ImageNet Classifiers Generalize to ImageNet?\" In: International Conference on Machine Learning (ICML). 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.715, + 0.885, + 0.763 + ], + "angle": 0, + "content": "[SGB+22] Damien Scieur, Gauthier Gidel, Quentin Bertrand, and Fabian Pedregosa. \"The curse of un-rolling: Rate of differentiating through optimization\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 17133–17145." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.766, + 0.885, + 0.799 + ], + "angle": 0, + "content": "[SN17] Leslie N. Smith Smith and Topin Nicholay. \"Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates\". In: ArXiv preprint arXiv:1708.07120. 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.801, + 0.885, + 0.863 + ], + "angle": 0, + "content": "[SRR+22] Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. “Beyond the imitation game: Quantifying and extrapolating the capabilities of language models”. In: arXiv preprint arXiv:2206.04615 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.865, + 0.885, + 0.913 + ], + "angle": 0, + "content": "[SSS+11] Johannes Stallkamp, Marc Schlipsing, Jan Salmen, and Christian Igel. \"The German traffic sign recognition benchmark: a multi-class classification competition\". In: The 2011 international joint conference on neural networks. 2011." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.09, + 0.915, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.09, + 0.885, + 0.153 + ], + "angle": 0, + "content": "[SSS+22] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. \"Challenging big-bench tasks and whether chain-of-thought can solve them\". In: arXiv preprint arXiv:2210.09261 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.157, + 0.885, + 0.203 + ], + "angle": 0, + "content": "[TGZ+23] Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford Alpaca: An Instruction-following LLaMA model. https://github.com/tatsu-lab/stanford_alpaca.2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.207, + 0.885, + 0.254 + ], + "angle": 0, + "content": "[TMH+24] Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Riviere, Mihir Sanjay Kale, Juliette Love, et al. \"Gemma: Open models based on gemini research and technology\". In: arXiv preprint arXiv:2403.08295 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.257, + 0.892, + 0.303 + ], + "angle": 0, + "content": "[TSF+16] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. \"YFCC100M: The New Data in Multimedia Research\". In: Communications of the ACM (2016)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.307, + 0.885, + 0.369 + ], + "angle": 0, + "content": "[VLW+18] Bastiaan S Veeling, Jasper Linmans, Jim Winkens, Taco Cohen, and Max Welling. \"Rotation equivariant CNNs for digital pathology\". In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2018: 21st International Conference, Granada, Spain, September 16-20, 2018, Proceedings, Part II 11. 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.373, + 0.885, + 0.389 + ], + "angle": 0, + "content": "[Web24] Team Webdataset. webdataset. 2024. URL: https://www.github.com/webdataset/webdataset." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.393, + 0.885, + 0.424 + ], + "angle": 0, + "content": "[Wer90] Paul J Werbos. \"Backpropagation through time: what it does and how to do it\". In: Proceedings of the IEEE 78.10 (1990), pp. 1550-1560." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.428, + 0.885, + 0.473 + ], + "angle": 0, + "content": "[WGX+19] Haohan Wang, Songwei Ge, Eric P Xing, and Zachary C Lipton. \"Learning robust global representations by penalizing local predictive power\". In: Neural Information Processing Systems (NeurIPS) (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.478, + 0.885, + 0.525 + ], + "angle": 0, + "content": "[WWS+22] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. \"Chain-of-thought prompting elicits reasoning in large language models\". In: Advances in neural information processing systems 35 (2022), pp. 24824-24837." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.528, + 0.899, + 0.574 + ], + "angle": 0, + "content": "[XHE+10] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. \"Sun database: Large-scale scene recognition from abbey to zoo\". In: Computer Vision and Pattern Recognition (CVPR). 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.578, + 0.885, + 0.625 + ], + "angle": 0, + "content": "[XMG+24] Mengzhou Xia, Sadhika Malladi, Suchin Gururangan, Sanjeev Arora, and Danqi Chen. \"Less: Selecting influential data for targeted instruction tuning\". In: arXiv preprint arXiv:2402.04333 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.629, + 0.885, + 0.676 + ], + "angle": 0, + "content": "[YLH+14] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. \"From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions\". In: Transactions of the Association for Computational Linguistics. 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.679, + 0.885, + 0.755 + ], + "angle": 0, + "content": "[ZP00] Geoffrey Zweig and Mukund Padmanabhan. \"Exact alpha-beta computation in logarithmic space with application to MAP word graph construction\". In: Sixth International Conference on Spoken Language Processing, ICSLP 2000 / INTERSPEECH 2000, Beijing, China, October 16-20, 2000. ISCA, 2000, pp. 855-858. DOI: 10.21437/ICSLP.2000-404. URL: https://doi.org/10.21437/ICSLP.2000-404." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.76, + 0.885, + 0.821 + ], + "angle": 0, + "content": "[ZPK+19] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. \"A large-scale study of representation learning with the visual task adaptation benchmark\". In: arXiv preprint arXiv:1910.04867. 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.825, + 0.885, + 0.872 + ], + "angle": 0, + "content": "[ZSP+21] Miao Zhang, Steven W Su, Shirui Pan, Xiaojun Chang, Ehsan M Abbasnejad, and Reza Haffari. \"idarts: Differentiable architecture search with stochastic implicit gradients\". In: International Conference on Machine Learning. PMLR. 2021, pp. 12557-12566." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.09, + 0.899, + 0.872 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.089, + 0.586, + 0.11 + ], + "angle": 0, + "content": "A Calculating metagradients with REPLAY" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.121, + 0.884, + 0.152 + ], + "angle": 0, + "content": "This appendix contains supplementary material for Section 2. We describe two algorithms in detail: stepwise AD, and our own algorithm REPLAY. Refer to Section 2 for the notation used in this appendix." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.171, + 0.38, + 0.189 + ], + "angle": 0, + "content": "A.1 Warmup: Step-wise AD" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.196, + 0.885, + 0.243 + ], + "angle": 0, + "content": "We fully describe step-wise AD in Algorithm 2. The algorithm requires storing all \\( T \\) optimizer states, but requires constant memory overhead for each AD call (as each AD call is over a single step), making it feasible to compute for small setups." + }, + { + "type": "code_caption", + "bbox": [ + 0.126, + 0.263, + 0.442, + 0.279 + ], + "angle": 0, + "content": "Algorithm 2: metagradients in \\(\\mathcal{O}(T)\\) space." + }, + { + "type": "code", + "bbox": [ + 0.117, + 0.281, + 0.831, + 0.525 + ], + "angle": 0, + "content": "1 // Store each optimizer state on disk \n2 \\(\\{s_i\\}_{i=0}^T \\leftarrow\\) Train model via \\(A(z)\\) \n3 \n4 // Variables; shorthand for \\(\\frac{\\partial f(z)}{\\partial z}\\) and \\(\\frac{\\partial f(z)}{\\partial s_T}\\) \n5 \\(\\bar{z} \\gets 0\\) \n6 \\(\\bar{s}_T \\leftarrow \\frac{\\partial g(s_T)}{\\partial s_T} \\quad //\\) One reverse-mode AD call \n7 \n8 // Reverse-mode differentiate step-by-step \n9 for \\(s_i \\gets s_{T-1}\\) to \\(s_0\\) do \n10 // One reverse-mode AD call. Left: \\(\\nabla_{s_i}f\\). Right: contribution to \\(\\nabla_{z}f\\) at \\(i\\). \n11 \\(\\bar{s}_i \\gets \\bar{s}_{i+1} \\cdot \\frac{\\partial h_i(s_i, z)}{\\partial s_i}, \\quad \\bar{z}_i \\gets \\bar{s}_{i+1} \\cdot \\frac{\\partial h_i(s_i, z)}{\\partial z}\\) \n12 \n13 \\(\\bar{z} \\gets \\bar{z} + \\bar{z}_i \\quad //\\) Accumulate metagradient \n14 \n15 Return \\(\\bar{z}\\)" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.568, + 0.238, + 0.582 + ], + "angle": 0, + "content": "A.2 REPLAY" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.593, + 0.884, + 0.669 + ], + "angle": 0, + "content": "We now describe REPLAY, our method for calculating metagradients. For a free parameter \\( k \\in \\mathbb{N} \\), REPLAY requires storing \\( \\mathcal{O}(k\\log_k(T)) \\) optimizer states and an additional \\( \\mathcal{O}(\\log_k(T)) \\) factor of computation. The free parameter \\( k \\) controls the trade-off between storage and required compute. We fully describe REPLAY in Algorithm 3. REPLAY modifies Algorithm 2 by retrieving the optimizer states in reverse order using a \\( k \\)-ary tree structure in lieu of a list of all the stored states." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.688, + 0.28, + 0.703 + ], + "angle": 0, + "content": "A.2.1 Lazy \\(k\\)-ary tree" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.711, + 0.885, + 0.802 + ], + "angle": 0, + "content": "We now describe the \\(k\\)-ary tree structure that underlies REPLAY; for a visual reference of this tree with \\(k = 2\\), see Figure 3. For ease of analysis we parameterize the total number of states as \\(n = T + 1\\) (and therefore take \\(n - 1\\) total training steps) when describing this data structure, and assume WLOG that \\(n\\) is an integer power of \\(k\\). At a high level, traversing this tree recursively replays retraining to recover all the optimizer states in reverse order, while deleting states that are no longer needed. We call this tree \"lazy\" because it retransmits only when required to obtain states that are not yet retrieved." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.802, + 0.884, + 0.895 + ], + "angle": 0, + "content": "The tree is a complete \\( k \\)-ary tree with \\( n \\) leaves (and therefore \\( \\log_k(n) \\) depth) structured as follows. We start at the root, then recursively define the rest of the tree. Every node in the tree represents a single optimizer state. The root represents state \\( s_0 \\). To recursively define the remaining nodes: each non-leaf node \\( s_i \\) at depth \\( d \\) has \\( k \\) equally spaced (in terms of state number) children starting—from left to right—at state \\( s_i \\) and ending at \\( s_{i+n/k^{d+1}} \\). This means that the leaves correspond—from left to right—to the states \\( s_0, s_1, \\ldots, s_{n-1} \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.212 + ], + "angle": 0, + "content": "We reduce the problem of iterating over the states in reverse to the problem of reverse in-order traversing this tree and yielding just the leaves—this is exactly the states in reverse order. A reverse in-order traversal for this \\( k \\)-ary tree requires repeatedly: recursively traversing child nodes from largest to smallest, then visiting the parent node. We design the specifics of this traversal to maximize space and compute efficiency. To access the children of a parent node at traversal time, we replay model training from the smallest child state (which is stored in the parent state) to the largest child state and store all the children. We perform this operation recursively each time we traverse a node. After traversing the node's left side (i.e., after ascending from this node), we delete all its child states." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.212, + 0.885, + 0.351 + ], + "angle": 0, + "content": "Reverse in-order traversing this tree requires storing at most \\( k \\log_k(n) \\) optimizer states at a time, and in aggregate requires retraining the model \\( \\log_k(n) \\) times. The argument for each is straightforward. Storage: the traversal requires storing at most \\( k \\) states for each level that it descends (we store \\( k \\) states whenever we first traverse to a parent node) and we remove \\( k \\) states for each level that the traversal ascends (we remove \\( k \\) states after we are done with the left traversal of a parent). Compute: we replay training to reinstantiate the children of every parent node a single time. The \\( k^d \\) parent nodes at level \\( d \\) each require replaying \\( \\mathcal{O}(n / k^d) \\) states to reinstantiate children. Therefore, in a traversal, each level requires \\( \\mathcal{O}(n) (k^d \\cdot n / k^d) \\) optimizer steps. There are \\( \\log_k(n) \\) levels with parent nodes, which means a total of \\( \\mathcal{O}(n \\log_k(n)) \\) optimizer steps, or a multiplicative factor of \\( \\mathcal{O}(\\log_k(n)) \\) steps compared to model training." + }, + { + "type": "code_caption", + "bbox": [ + 0.126, + 0.369, + 0.561, + 0.386 + ], + "angle": 0, + "content": "Algorithm 3: REPLAY. metagradients in \\(\\mathcal{O}(k\\log_k(T))\\) space." + }, + { + "type": "code", + "bbox": [ + 0.119, + 0.389, + 0.831, + 0.617 + ], + "angle": 0, + "content": "1 \\(T\\gets\\) Lazy \\(k\\) -ary tree for \\(\\mathcal{A}(z)\\) // Make lazy \\(k\\) -ary tree of Appendix A.2 \n2 \n3 // Variables; shorthand for \\(\\frac{\\partial f(z)}{\\partial z}\\) and \\(\\frac{\\partial f(z)}{\\partial s_T}\\) \n4 \\(\\bar{z}\\gets 0\\) \n5 \\(\\bar{s}_T\\gets \\frac{\\partial g(s_T)}{\\partial s_T}\\) // One reverse-mode AD call \n6 \n7 // Reverse-mode differentiate step-by-step; traverse \\(T\\) instead of stored states \n8 for \\(s_i\\gets s_{T - 1}\\) to \\(s_0\\in\\) reverse_inorder_traversal(T) do \n9 // One reverse-mode AD call. Left: \\(\\nabla_{s_i}f\\) . Right: contribution to \\(\\nabla_{z}f\\) at i. \n10 \\(\\bar{s}_i\\gets \\bar{s}_{i + 1}\\cdot \\frac{\\partial h_i(s_i,z)}{\\partial s_i},\\quad \\bar{z}_i\\gets \\bar{s}_{i + 1}\\cdot \\frac{\\partial h_i(s_i,z)}{\\partial z}\\) \n11 \n12 \\(\\bar{z}\\gets \\bar{z} +\\bar{z}_i\\) // Accumulate metagradient \n13 \n14 Return \\(\\bar{z}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.088, + 0.415, + 0.11 + ], + "angle": 0, + "content": "B Smooth Model Training" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.121, + 0.313, + 0.141 + ], + "angle": 0, + "content": "B.1 Omitted Figures" + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.159, + 0.868, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.313, + 0.884, + 0.342 + ], + "angle": 0, + "content": "Figure 11: The factors affecting metasmoothness of training a ResNet-9 on the CIFAR-10 dataset. See §3 for details." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.092, + 0.443, + 0.109 + ], + "angle": 0, + "content": "Non-smooth (Example #1118)" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.111, + 0.486, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.589, + 0.092, + 0.773, + 0.109 + ], + "angle": 0, + "content": "Smooth (Example #1118)" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.111, + 0.832, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.294, + 0.443, + 0.311 + ], + "angle": 0, + "content": "Non-smooth (Example #3349)" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.314, + 0.485, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.294, + 0.773, + 0.311 + ], + "angle": 0, + "content": "Smooth (Example #3349)" + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.314, + 0.832, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.497, + 0.446, + 0.514 + ], + "angle": 0, + "content": "Non-smooth (Example #10600)" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.516, + 0.485, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.586, + 0.497, + 0.777, + 0.514 + ], + "angle": 0, + "content": "Smooth (Example #10600)" + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.516, + 0.832, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.7, + 0.446, + 0.716 + ], + "angle": 0, + "content": "Non-smooth (Example #15578)" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.719, + 0.485, + 0.887 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.586, + 0.7, + 0.777, + 0.716 + ], + "angle": 0, + "content": "Smooth (Example #15578)" + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.719, + 0.832, + 0.887 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.901, + 0.686, + 0.918 + ], + "angle": 0, + "content": "Figure 12: Additional loss landscape visualizations." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.088, + 0.475, + 0.11 + ], + "angle": 0, + "content": "C Metagradients for DataComp" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.121, + 0.886, + 0.168 + ], + "angle": 0, + "content": "This appendix contains pseudocode for the main algorithm used to do dataset selection for DataComp. It also contains additional implementation details on how metagradients were applied to CLIP, and how they were specifically applied to the DataComp setting." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.186, + 0.438, + 0.205 + ], + "angle": 0, + "content": "C.1 Dataset Selection Using MGD" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.212, + 0.886, + 0.304 + ], + "angle": 0, + "content": "When implementing Algorithm 1, there are several differences from the pseudocode below: firstly, rather than selecting \\(\\mathbf{m}\\) fully randomly every step, we randomly select a shard comprising fraction \\(p\\) of the data and take steps on all datapoints in the shard (see Section C.2). To mitigate overfitting, we also bake a \"minibatch fraction\" \\(q\\) into our model output function \\(\\phi\\). For example, if \\(\\phi\\) calculates model loss on the ImageNet train set, each time \\(\\phi\\) is called, we randomly sample fraction \\(q\\) of the ImageNet train set to evaluate on." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.321, + 0.885, + 0.454 + ], + "angle": 0, + "content": "Adapting the CLIP loss function to our surrogate learning algorithm. Here, we explain how dataweights are incorporated into the CLIP loss function—the formulation given in Section 4.1 is actually slightly simplified and incorrect, as it does not account for cross terms in the CLIP contrastive loss. As a refresher, we first state the \"vanilla\" CLIP loss function, \\(\\ell\\), as it is defined in [RKH+21]. Letting \\(b\\) be the batch size and \\(d\\) be the embedding dimension, and \\(\\mathbf{x}\\) be the training batch at timestep \\(k\\). Recall that the CLIP model internally has two \"submodules\": and image embedder, and a text embedder. We then use these to obtain image embeddings \\(E_{I} \\in \\mathbb{R}^{b \\times d}\\) and text embeddings \\(E_{T} \\in \\mathbb{R}^{b \\times d}\\) from \\(\\mathbf{x}\\). We then compute the image-wise scores, or logits, for this batch as \\(S = E_{I}E_{T}^{\\top}^{3}\\). Then, we can define the CLIP loss (as a function of the logits) as" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.454, + 0.597, + 0.483 + ], + "angle": 0, + "content": "\\[\nL (S) = \\frac {1}{2} (L _ {I} (S) + L _ {T} (S)),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.488, + 0.714, + 0.505 + ], + "angle": 0, + "content": "where \\(L_{I}\\) and \\(L_{T}\\) are row-wise and column-wise cross-entropy losses, respectively:" + }, + { + "type": "equation", + "bbox": [ + 0.237, + 0.514, + 0.758, + 0.557 + ], + "angle": 0, + "content": "\\[\nL _ {I} (S) = \\sum_ {i = 1} ^ {b} \\log \\left(\\frac {\\exp (S _ {i , i})}{\\sum_ {j = 1} ^ {b} \\exp (S _ {i , j})}\\right), \\quad L _ {T} (S) = \\sum_ {i = 1} ^ {b} \\log \\left(\\frac {\\exp (S _ {i , i})}{\\sum_ {j = 1} ^ {b} \\exp (S _ {j , i})}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.569, + 0.883, + 0.616 + ], + "angle": 0, + "content": "We now wish to relax \\(L\\) into a new function \\(L'\\) that supports an additional input \\(\\mathbf{z} \\in \\mathbb{R}^n\\), where \\(\\frac{\\partial L'}{\\partial \\mathbf{z}}\\) resembles the metagradients with respect to dataweights. In order to do this, we imagine expanding passing the entire dataset \\(D\\) into our embedder to obtain \\(E_I'\\) and \\(E_{T'}'\\) and take our new logits \\(S' = E_I'E_T'^{\\top} \\in \\mathbb{R}^{n \\times n}\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.616, + 0.884, + 0.675 + ], + "angle": 0, + "content": "There are some additional key conditions our relaxation \\( L' \\) should satisfy. Particularly: when \\( \\mathbf{z} = \\mathbf{0}_n \\), we should recover the normal CLIP loss \\( L \\), and when \\( \\mathbf{z} \\) is all 0's except for a single entry \\( i \\), \\( L' \\) should act as if \\( i \\) had been appended to the original batch \\( \\mathbf{x} \\). In addition, \\( L' \\) should always have meaningful partials with respect to \\( \\mathbf{z} \\), even when some values in \\( \\mathbf{z} \\) are 0." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.676, + 0.884, + 0.706 + ], + "angle": 0, + "content": "Letting \\(\\mathbf{1}_{i = j}\\) and \\(\\mathbf{1}_{i\\neq j}\\) be indicator variables and letting \\(\\mathbf{1}_k\\in \\{0,1\\} ^n\\) be the indicator vector for the \\(k\\) -th batch, we find that the definition" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.706, + 0.613, + 0.723 + ], + "angle": 0, + "content": "\\[\nL ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) = L _ {I} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) + L _ {T} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.73, + 0.163, + 0.743 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.239, + 0.741, + 0.753, + 0.782 + ], + "angle": 0, + "content": "\\[\nL _ {I} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) = \\sum_ {i = 1} ^ {n} \\left(z _ {i} + \\left(\\mathbf {1} _ {k}\\right) _ {i}\\right) \\log \\left(\\frac {\\exp \\left(S _ {i , i} ^ {\\prime}\\right)}{\\sum_ {j = 1} ^ {n} \\exp \\left(S _ {i , j} ^ {\\prime}\\right) \\left(\\mathbf {1} _ {i = j} + \\mathbf {1} _ {i \\neq j} \\left(z _ {j} + \\left(\\mathbf {1} _ {k}\\right) _ {j}\\right)\\right)}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.788, + 0.147, + 0.801 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.798, + 0.754, + 0.84 + ], + "angle": 0, + "content": "\\[\nL _ {T} ^ {\\prime} (S ^ {\\prime}, \\mathbf {z}) = \\sum_ {i = 1} ^ {b} \\left(z _ {i} + \\left(\\mathbf {1} _ {k}\\right) _ {i}\\right) \\log \\left(\\frac {\\exp \\left(S _ {i , i} ^ {\\prime}\\right)}{\\sum_ {j = 1} ^ {n} \\exp \\left(S _ {j , i} ^ {\\prime}\\right) \\left(\\mathbf {1} _ {i = j} + \\mathbf {1} _ {i \\neq j} \\left(z _ {j} + \\left(\\mathbf {1} _ {k}\\right) _ {j}\\right)\\right)}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.845, + 0.291, + 0.86 + ], + "angle": 0, + "content": "satisfy these conditions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.112, + 0.87, + 0.884, + 0.897 + ], + "angle": 0, + "content": "3The CLIP model scales these logits by a temperature parameter \\(\\tau\\) before applying the softmax. While we omit \\(\\tau\\) in our definitions, it can be easily incorporated. All our experiments use temperature scaling." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.887, + 0.14 + ], + "angle": 0, + "content": "Finally, we let define the loss for the entire batch \\(\\ell'\\) as a function of \\(\\mathbf{z}\\) and model parameters \\(\\theta\\) which outputs the loss calculated according to \\(L'\\) above. To summarize, letting \\(\\mathbf{x}^{(t)}\\) denote the \\(t\\)-th training batch, the loss function \\(\\ell_t\\) at step \\(t\\) of our surrogate learning algorithm \\(\\mathcal{A}'\\) for CLIP training is:" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.148, + 0.607, + 0.189 + ], + "angle": 0, + "content": "\\[\n\\ell_ {t} ^ {\\prime} (\\theta) := \\left\\{ \\begin{array}{l l} \\ell (\\mathbf {x} ^ {(t)}; \\theta) & \\text {i f} t \\neq k \\\\ \\ell^ {\\prime} (\\mathbf {z}; \\theta) & \\text {i f} t = k. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.195, + 0.89, + 0.227 + ], + "angle": 0, + "content": "We find that this empirically works well for obtaining meaningful metagradients with respect to dataweights in the CLIP setting, and yields to strong dataset selection results." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.244, + 0.513, + 0.264 + ], + "angle": 0, + "content": "C.2 Scaling MGD for CLIP and DataComp" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.269, + 0.885, + 0.348 + ], + "angle": 0, + "content": "MGD is highly scalable, allowing it to be applied to large-scale settings like training CLIP models. In particular, computing metagratings is only up to a constant factor more expensive than training a model normally. Here, we outline challenges we faced in scaling MGD in this setting, and how they were resolved. Specifically, we will explain how we efficiently calculated metagratings for CLIP models and efficiently tracked/shuffled our dataset selection from step-to-step despite its large storage footprint." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.364, + 0.885, + 0.472 + ], + "angle": 0, + "content": "Computing metagradient. Due to the large batch size used in the CLIP contrastive loss, we implement manual gradient checkpointing to make the operations computationally feasible on our hardware. The most memory-intensive operation are model forward passes (and its gradients): obtaining the image and label embeddings given raw pixel data and tokens. So, we manually make gradient checkpoints before this operation, allowing us to run the embedder in minibatches to avoid memory issues. This setup also naturally lends itself to parallelization across multiple GPU's, which we make use of to further speed up our computations." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.488, + 0.884, + 0.64 + ], + "angle": 0, + "content": "Loading, writing, and storing data. Due to the data-intensive nature of training large models like CLIP and our need to frequently produce new datasets at each optimization step, we found that using the web-dataset [Web24] format given by DataComp was restrictively slow. To circumvent this, we rewrote all data following the format of FFCV [LIE+22], allowing us to load and write data much faster. Specifically, we divided the entire candidate pool into 8 base shards. Once we trained a model, we choose one of the 8 shards, compute metagradient corresponding to all datapoints in the shard, take a gradient step on them, and rewrite the shard. This roughly corresponds to \\( p = \\frac{1}{8} \\) in Algorithm 1, which we empirically worked well for optimizing. In following steps, we always choose one of the 8 original shards to calculate metagradient for—this ensures that points removed from the dataset in some optimization step can return if they have a negative metagradient." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.641, + 0.884, + 0.762 + ], + "angle": 0, + "content": "We also observed that always stepping on the sign causes the sizes of the shards to grow over time: stepping based on the sign of the metagradient does not decrease the weight on a positive-weight datapoint if its dataweight is already 0, so our steps are biased towards increasing the size of the shards. To combat this blowup, after some number of optimization steps, we choose a fixed shard size and enforce that subsequent steps must not change the size of the shards—the step size thereafter is controlled by hyperparameter \\( q \\) representing the fraction of datapoints in a shard which are incremented. We experimented both with randomly sampling which points are added or removed, and stepping on the datapoints with the top \\( q \\) and bottom \\( q \\) metagradient; the latter seems to give empirically better performance." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.762, + 0.882, + 0.837 + ], + "angle": 0, + "content": "To maintain randomness during shuffling, we implement an 8-way dataloader which would shuffle all 8 shards individually. Then, to sample a batch of \\( b \\) datapoints, we would sample \\( b / 8 \\) datapoints from each shard and concatenate them to fill our batch. This works better than simply sampling our entire batch from a single shard, as (especially in later optimization steps) shards may contain a high number of duplicate datapoints, which causes CLIP's contrastive loss function to misbehave if they appear in the same batch." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.837, + 0.884, + 0.913 + ], + "angle": 0, + "content": "To minimize disk space used, old shards can be deleted once they become \"stale\". Specifically, if shard s is rewritten into shard \\(s'\\), all future optimization steps will never read s again, and s can safely be deleted. Thus, when running MGD for a large number of steps and potentially rewriting each shard multiple times, the total disk space used by our algorithm is constant in the number of steps we take: it stores the 8 most recently written shards on disk at any given time, and any other shards are deleted to save space." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.09, + 0.593, + 0.109 + ], + "angle": 0, + "content": "C.3 Details Pertaining to the DataComp Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.115, + 0.884, + 0.206 + ], + "angle": 0, + "content": "Setting. We provide a brief summary of the DataComp competition here, and we refer readers to the original paper [GIF+24]. DataComp is a framework to compare different training dataset selection techniques. Participants submit a training dataset (which, for our purposes, is a subset of a larger dataset), upon which a CLIP model is trained from scratch with a fixed learning algorithm, model architecture, and number of training steps. We focus on DataComp-small, which has a candidate pool of 12.8 million samples. The number of training steps in this case is also fixed at 12.8 million samples." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.207, + 0.884, + 0.237 + ], + "angle": 0, + "content": "We try to match the optimization hyperparameters enforced by DataComp as closely as possible. As a refresher, our ADAM[KB15] update step can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.248, + 0.884, + 0.268 + ], + "angle": 0, + "content": "\\[\n\\theta_ {t + 1} = - \\alpha_ {t} \\cdot \\left(m _ {t} / \\left(\\sqrt {v _ {t} + \\varepsilon_ {\\mathrm {r o o t}}} + \\varepsilon\\right) + \\lambda \\theta_ {t}\\right) \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.277, + 0.885, + 0.352 + ], + "angle": 0, + "content": "where \\( m_{t} \\) and \\( v_{t} \\) are running estimates of the first and second moments of the gradients, respectively, \\( \\lambda \\) represents weight decay, \\( \\alpha \\) represents the learning rate, and \\( \\varepsilon \\) and \\( \\varepsilon_{\\mathrm{root}} \\) are hyperparameters to avoid blowup. Our training hyperparameters can be found in Table 1 and are identical to those mandated by DataComp-small, aside from a positive \\( \\varepsilon_{\\mathrm{root}} \\) added for numerical stability. The values of \\( \\varepsilon_{\\mathrm{root}} \\) and \\( k \\) (the step at which metagradients are calculated) were chosen to empirically maximize metasmoothness." + }, + { + "type": "table_caption", + "bbox": [ + 0.266, + 0.366, + 0.731, + 0.383 + ], + "angle": 0, + "content": "Table 1: Hyperparameters for the CLIP DataComp experiments." + }, + { + "type": "table", + "bbox": [ + 0.356, + 0.392, + 0.637, + 0.624 + ], + "angle": 0, + "content": "
HyperparameterValue
DataComp Scalesmall
ModelViT-B/32
Train compute (MACs)9.5 × 1016
Pool size12.8M
# samples seen12.8M
Batch size4096
Training batches3125
k2800
Learning rate5 × 10-4
AdamW β10.9
AdamW β20.98
AdamW εroot1 × 10-17
Warmup500
" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.638, + 0.884, + 0.744 + ], + "angle": 0, + "content": "Our experiments are also run on an incomplete subset of the entire DataComp candidate pool. DataComp did not store the raw image and text files when assembling their dataset; they only stored a list of URL's to download data from. Due to the nature of the internet, for various reasons, some of these URL's no longer point to the same data (or no longer point to any data at all). Thus, after ignoring these broken links, our candidate pool is only around \\(80\\%\\) of the size of the original DataComp candidate pool when it was collected in 2023. All our results are obtained by running our methods on this subset of the DataComp pool." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.763, + 0.884, + 0.868 + ], + "angle": 0, + "content": "Evaluation tasks. In order to ensure that our method is truly improving trained models' performances on the entire target distribution and not overfitting to the target set, for each of the 38 evaluation tasks used by DataComp, we attempted to separately create a disjoint target and validation set (DataComp only creates test sets for each task). Thus, metagradients were computed on the target sets and model performance was evaluated on the validation set, before submitting with the official DataComp script and evaluating on the test sets. This ensures that our method's generalization ability is being evaluated, and we are not overfitting to our target set." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.869, + 0.883, + 0.898 + ], + "angle": 0, + "content": "For various reasons, creating target splits was not possible for all 38 tasks; we summarize our setup in Table 2." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.089, + 0.885, + 0.121 + ], + "angle": 0, + "content": "Table 2: All DataComp evaluation tasks. The \"Target set\" column refers to whether metagradients were taken on the target set corresponding to this dataset." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.133, + 0.884, + 0.543 + ], + "angle": 0, + "content": "
DatasetTaskTest sizeTrain sizeVal sizeMain metricTarget set
Caltech-101 [FFP04]Object recognition60852754306mean per class
CIFAR-10 [Kri09]Visual recognition10000450005000accuracy
CIFAR-100 [Kri09]Visual recognition10000450005000accuracy
CLEVR Counts [JHV+17; ZPK+19]Counting15000650005000accuracy
CLEVR Distance [JHV+17; ZPK+19]Distance prediction15000650005000accuracy
Country211 [RKH+21; TSF+16]Geolocation21100379804220accuracy
DTD [CMK+14]Texture classification18803384376accuracy
EuroSAT [HBD+19; ZPK+19]Satellite imagery recognition5400194402160accuracy
FGVC Aircraft [MRK+13]Aircraft recognition33336001666mean per class
Food-101 [BGV14]Food recognition25250707505000accuracy
GTSRB [SSS+11]Traffic sign recognition12630352893920accuracy
ImageNet 1k [DDS+09]Visual recognition5000012761675000accuracy
ImageNet Sketch [WGX+19]Visual recognition50889N/AN/Aaccuracy*
ImageNet V2 [RKS+19]Visual recognition10000N/AN/Aaccuracy*
ImageNet-A [HZB+19]Visual recognition7500N/AN/Aaccuracy*
ImageNet-O [HZB+19]Visual recognition2000N/AN/Aaccuracy*
ImageNet-R [HBM+20]Visual recognition30000N/AN/Aaccuracy*
KITTI distance [GLU12; ZPK+19]Distance prediction711N/AN/Aaccuracy
MNIST [LeC98]Digit recognition10000550005000accuracy
ObjectNet [BMA+19]Visual recognition18574N/AN/Aaccuracy*
Oxford Flowers-102 [NZ08]Flower recognition61491836204mean per class
Oxford-IIIT Pet [PVZ+12; ZPK+19]Pet classification36693312368mean per class
Pascal VOC 2007 [EVW+10]Object recognition14976140961566accuracy
PatchCamelyon [VLW+18; ZPK+19]Metastatic tissue cls.327682899125000accuracy
Rendered SST2 [ZPK+19]Sentiment classification18217013779accuracy
RESISC45 [CHL17; ZPK+19]Satellite imagery recognition6300226802520accuracy
Stanford Cars [KSD+13]Vehicle recognition80417329814accuracy
STL-10 [CNL11]Visual recognition80004500500accuracy
SUN-397 [XHE+10]Scene recognition108753N/AN/Aaccuracy
SVHN [NWC+11; ZPK+19]Digit recognition26032682575000accuracy
iWildCam [BAC+21; KSM+20]Animal recognition427911470845000macro F1 score
Camelyon17 [BGM+18; KSM+20]Metastatic tissue cls.850543659005000accuracy
FMoW [CFW+18; KSM+20]Satellite imagery recognition221081032615000worst-region acc.
Dollar Street [RDK+22]Object recognition3503138421537worst-income top-5 acc.
GeoDE [RLZ+24]Object recognition12438444884943worst-region acc.
Flickr30k [YLH+14]Image and text retrieval31014N/AN/AR@1§
MSCOCO [LMB+14]Image and text retrieval5000N/AN/AR@1§
WinoGAViL [BBY+22]Commonsense association3563N/AN/AJaccard score§
" + }, + { + "type": "page_footnote", + "bbox": [ + 0.131, + 0.862, + 0.692, + 0.875 + ], + "angle": 0, + "content": "*No train or val set exists for this dataset, so we were unable to create disjoint target and val sets." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.875, + 0.503, + 0.885 + ], + "angle": 0, + "content": "We were unable to use this dataset due to technical difficulties." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.887, + 0.882, + 0.9 + ], + "angle": 0, + "content": "Both the train and val sets were used by DataComp to make their test set, so we were unable to create disjoint target and val sets." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.9, + 0.42, + 0.912 + ], + "angle": 0, + "content": "\\( {}^{S} \\) Retrieval tasks were not used for metagradients." + }, + { + "type": "list", + "bbox": [ + 0.131, + 0.862, + 0.882, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.088, + 0.357, + 0.11 + ], + "angle": 0, + "content": "D Selecting IFT data" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.12, + 0.884, + 0.152 + ], + "angle": 0, + "content": "In this section, we describe the details of the IFT setting of Xia et al. [XMG+24], as well as the details of our method." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.169, + 0.885, + 0.248 + ], + "angle": 0, + "content": "Setting. The setting contains a fixed data pool: instruction fine-tuning data from a data pool consisting of four combined IFT datasets (cf. Table 4 and Xia et al. [XMG+24] for more information). The goal is to select the data that yields the best possible task performance for a LoRA fine-tuning run. We adapt a LoRA to a Gemma-2B model (the pretraining-only Gemma-2B model) using the LoRA configuration from Xia et al. [XMG+24]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.265, + 0.884, + 0.328 + ], + "angle": 0, + "content": "Data splits. See Table 3 for a description of the available data for each task, along with the task setup details. Xia et al. [XMG+24] constructed these extra samples by drawing from the ICL samples given in the tasks originally. Note that we drop TydiQA from the original work of Xia et al. [XMG+24] as there are not enough samples to select with (there is only one from each category, for a total of 7)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.344, + 0.884, + 0.465 + ], + "angle": 0, + "content": "Method. We execute Algorithm 1 with \\( k \\) as 150 steps from the end of training and the Bernoulli parameter \\( p \\) controlling the step size as 0.2. At each step, we choose a \"minibatch\" with a size equal to half the target set and a quarter of the target set for BBH and MMLU, respectively (that is, we only select to optimize performance on a fraction of the target set at a time). We model select over iterates and hyperparameters by (a) choosing the top three steps in terms of validation loss for each run (b) selecting the best one in terms of full train set accuracy (including the part that we trained on). We perform this procedure—akin to Pareto optimization [JS08]—because the validation set is so small (as the overall set of samples is very small) that it is difficult to select models without overfitting otherwise." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.465, + 0.884, + 0.51 + ], + "angle": 0, + "content": "We compare with two baselines: training on the full dataset (i.e., training on the entirety of all the data for a single epoch), and LESS (we use the data selected according to \"LESS-T\" [XMG+24], following the recommendation of 4 epochs)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.51, + 0.885, + 0.678 + ], + "angle": 0, + "content": "For model training, we train with ADAM (\\(\\beta_{1} = 0.95\\), \\(\\beta_{2} = 0.975\\), decoupled weight decay as \\(10^{-5}\\)) and a one-cycle linear schedule starting at \\(10^{-6}\\) of the maximum learning rate, reaching the peak over \\(25\\%\\) of training, then ending at 0.1 of the maximum learning rate. We insert a positive \\(\\varepsilon_{\\mathrm{root}}\\) into the inverse square root term in the ADAM update to prevent metagradient (and to a lesser extent update) blowup (see Eq. 12). The model training is the same across selected data, except that we use \\(\\varepsilon_{\\mathrm{root}} = 10^{-7}\\) for MGD-selected data and \\(\\varepsilon_{\\mathrm{root}} = 10^{-9}\\) for the other runs (we select the optimal parameter for each class of method). We additionally hyperparameter select for the best learning rate across each baseline by minimizing validation set loss; LESS performs best with a smaller learning rate (0.00024 for BBH and 0.00012 for MMLU) than training on the full dataset or with MGD (0.0006 for both). We normalize the loss of each training sample by taking the mean across predicted tokens during training, and do not divide by the batch size (important for scaling the \\(\\varepsilon_{\\mathrm{root}}\\) term, but otherwise ADAM is invariant to the scale)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.695, + 0.884, + 0.773 + ], + "angle": 0, + "content": "Selecting smooth model training for MGD. For MGD runs, we jointly select learning rate and \\(\\varepsilon_{\\mathrm{root}}\\) using the smoothness metric of Section 3. We find that the choice of \\(\\varepsilon_{\\mathrm{root}}\\) term is important (just as the choice of \\(\\varepsilon\\) is important in standard ADAM training); choosing a much larger term results in non-smooth training. We also find that metagradients are sensitive to learning rate schedule; choosing a much larger or smaller maximum learning rate results in non-smooth training." + }, + { + "type": "table_caption", + "bbox": [ + 0.185, + 0.785, + 0.812, + 0.802 + ], + "angle": 0, + "content": "Table 3: Overview of datasets used in IFT dataset selection (from Xia et al. [XMG+24])." + }, + { + "type": "table", + "bbox": [ + 0.155, + 0.812, + 0.846, + 0.878 + ], + "angle": 0, + "content": "
Dataset# Shot# Tasksn_targetn_valn_testAnswer TypeType of Task
MMLU5575722818,721Letter optionsKnowledge/Recall
BBH3232346920COT and answerReasoning
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.354, + 0.09, + 0.645, + 0.107 + ], + "angle": 0, + "content": "Table 4: Details of IFT training datasets." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.116, + 0.952, + 0.212 + ], + "angle": 0, + "content": "
Dataset# InstanceSourced fromPrompt Len.Completion Len.
FLAN V2100,000NLP datasets and human-written instructions355.731.2
CoT100,000NLP datasets and human-written CoTs26653.2
Dolly15,011Human-written from scratch118.191.3
Open Assistant 155,668Human-written from scratch34.8212.5
" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.235, + 0.2, + 0.249 + ], + "angle": 0, + "content": "IFT results" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.26, + 0.885, + 0.322 + ], + "angle": 0, + "content": "Figure 13: MGD dataset selection improves the validation loss over metagradient steps, demonstrating our method's efficacy. However, the gap between loss on samples MGD directly optimizes on and the validation samples widens over the number of iterates, and there is overfitting depending on the number of steps taken." + }, + { + "type": "image", + "bbox": [ + 0.14, + 0.333, + 0.492, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.33, + 0.856, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.935, + 0.51, + 0.948 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.089, + 0.539, + 0.111 + ], + "angle": 0, + "content": "E Accuracy-degrading data poisoning" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.121, + 0.547, + 0.141 + ], + "angle": 0, + "content": "E.1 Background on Gradient Cancelling attack" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.147, + 0.884, + 0.177 + ], + "angle": 0, + "content": "We briefly review the Gradient Cancelling attack [LKY23] used as a baseline in our experiments. We refer the reader to the original paper for details. Here we highlight the key ideas." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.178, + 0.884, + 0.21 + ], + "angle": 0, + "content": "At a high level: Gradient Cancelling (GC) explicitly aims at making a specific malicious parameter configuration reachable through retraining on the poisoned dataset. The attack operates in two phases:" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.22, + 0.884, + 0.282 + ], + "angle": 0, + "content": "1. Parameter Generation: The attacker generates a target malicious model parameter independently, often using a direct parameter corruption method like Gradient-based Parameter Corruption (GradPC) [LKY23]. The end result of this phase is a target model parameter \\(\\theta_{p}\\) that achieves low accuracy on the test set, but is close to the original parameter \\(\\theta_0\\) derived from training on the clean dataset." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.29, + 0.885, + 0.368 + ], + "angle": 0, + "content": "2. Poison Data Crafting: In the second phase, GC finds values of the poison data that induce a near-zero gradient at the target parameter \\(\\theta_{p}\\). This is achieved by solving a gradient cancellation optimization problem: specifically, GC minimizes the total gradient of the loss function (with respect to the model parameters) evaluated over the combined (clean and poisoned) dataset, aiming to ensure that the gradient at the malicious parameter \\(\\theta_{p}\\) approaches zero." + }, + { + "type": "list", + "bbox": [ + 0.132, + 0.22, + 0.885, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.386, + 0.434, + 0.406 + ], + "angle": 0, + "content": "E.2 Metasmooth hyperparameters" + }, + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.426, + 0.884, + 0.459 + ], + "angle": 0, + "content": "Table 5: Hyperparameters used in the ResNet-9 [Jor24] CIFAR-10 poisoning experiments. The augmentations used are normalization, random horizontal flip, and random translate (2 pixels)" + }, + { + "type": "table", + "bbox": [ + 0.309, + 0.469, + 0.688, + 0.821 + ], + "angle": 0, + "content": "
HyperparameterValue
Learning rate0.5
β10.85
Weight decay10-5
Exclude BatchNormTrue
OptimizerSGD
Batch size250
Epochs18
Starting learning rate fraction0.5
Relative min. learning rate10000
Scheduler max. iterations50000
Nesterov momentumTrue
BatchNorm ε10-5
BatchNorm momentum0.5
Final biasTrue
Width multiplier2.0
Final scale0.125
Initial scale2.0
Batchnorm locationBefore activation
Activation functionGELU
Pooling typeAverage
Test-time augmentationTrue
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.949 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.088, + 0.331, + 0.11 + ], + "angle": 0, + "content": "F LR optimization" + }, + { + "type": "table_caption", + "bbox": [ + 0.151, + 0.135, + 0.846, + 0.152 + ], + "angle": 0, + "content": "Table 6: The grid search was run over all 528 combinations of the hyperparameter values below." + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.162, + 0.77, + 0.257 + ], + "angle": 0, + "content": "
ParameterValues
Peak learning rate[7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0]
Initial LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
Final LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
LR peak time[0.25, 0.5, 0.75]
" + }, + { + "type": "image_caption", + "bbox": [ + 0.449, + 0.286, + 0.529, + 0.303 + ], + "angle": 0, + "content": "MGD step" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.31, + 0.761, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.326, + 0.557, + 0.672, + 0.574 + ], + "angle": 0, + "content": "Figure 14: Graphs of our learned LR schedules." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "37" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_origin.pdf b/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fb8f0bf30fa7363c16eff74f8b86a61725224480 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/2a994aa1-2c31-48af-8c61-ad007e40c304_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced0295a498f02ceb38022ab991e5d34fec0623dd2073b0a0c4a278d766807e4 +size 758040 diff --git a/data/2025/2503_13xxx/2503.13751/full.md b/data/2025/2503_13xxx/2503.13751/full.md new file mode 100644 index 0000000000000000000000000000000000000000..81d1a1c89d7a56c8ab3e55c1360f70382806c4b0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/full.md @@ -0,0 +1,846 @@ +# Optimizing ML Training with Metagradient Descent + +Logan Engstrom\*1, Andrew Ilyas\*2†, Benjamin Chen\*1, Axel Feldmann\*1, William Moses\*3, Aleksander Madry\*1 + +*Equal contribution ${}^{1}$ MIT, ${}^{2}$ Stanford, ${}^{3}$ UIUC + +# Abstract + +A major challenge in training large-scale machine learning models is configuring the training process to maximize model performance, i.e., finding the best training setup from a vast design space. In this work, we unlock a gradient-based approach to this problem. We first introduce an algorithm for efficiently calculating metagradient gradients through model training at scale. We then introduce a "smooth model training" framework that enables effective optimization using metagradient. With metagradient descent (MGD), we greatly improve on existing dataset selection methods, outperform accuracy-degrading data poisoning attacks by an order of magnitude, and automatically find competitive learning rate schedules. + +# 1 Introduction + +How should I clean my data? What architecture should I use? Training large-scale (i.e., deep) machine learning models entails making many design decisions. When making such decisions, typical practice is to exhaustively search over a small set of standard options. For example, we might try a few well-known data cleaning heuristics, construct a grid over a hyperparameters, and choose the options that yield the best models. However, given that this process explores only a small part of the overall design space (e.g., one can construct $2^{n}$ possible training datasets from a pool of $n$ candidate datapoints), it is unlikely that this approach really yields the optimal training configuration. + +How can we find optimal (or at least, better) training configurations? To do so, we take the optimization perspective on designing model training. From this well-studied perspective, deciding on a training configuration—or as we will call it, a set of metaparameters—is just a high-dimensional optimization problem. The input space of this problem comprises all possible metaparameter choices, including which datapoints to train on, what model architecture to use, and how to initialize model weights. The objective function takes in a set of metaparameters, trains a machine learning model according to those metaparameters, and then returns a target metric evaluated on that model (e.g., test accuracy). From this perspective, any procedure for selecting metaparameters—including the typical practice of grid-searching over standard options—is just an optimization algorithm, whose goal is to maximize the objective function with respect to the (high-dimensional) input. + +Given that selecting metaparameters is "just" a high-dimensional optimization problem, a natural tool to consider is the gradient. After all, in many contexts, gradients offer a more effective approach to maximizing high-dimensional functions than grid search. Indeed, for a sufficiently "well-behaved" function $f(x)$ with gradient $\nabla f(x)$ , we can optimize $f$ by iteratively updating $x$ in the direction of $\nabla f(x)$ . This insight suggests a generic recipe for selecting metaparameters: first, make the objective differentiable with respect to the metaparameters; second, update via gradient steps. + +Now, the idea of using gradients to search for metaparameters is not new. Indeed, there is a substantial line of work that aims to optimize metaparameters (e.g., architectures, regularizers, or data augmentation schemes) with gradient-based methods [MDA15; LSY18; LVD20]. However, such methods have not managed to scale beyond relatively small settings. This state of affairs prompts our main question: + +Can we scalably configure model training using gradient-based methods? + +![](images/4f160909052d60c475611d729d8627a48f2ec03ab554e430d02e6ecf55122fc3.jpg) +Figure 1: Our proto-algorithm, metagradient descent (MGD), uses gradients to achieve state-of-the-art performance across a variety of applications, including data selection and data poisoning. + +![](images/ae72b147ec83b8978675affb3f8e2678e0176541836b311165c6969778223f46.jpg) + +![](images/25d0520397f487be3b99371e5ad5a60cf7c24eb09b3e0e6bcccb837ff7007b8c.jpg) + +# 1.1 Contributions + +In this work, we answer this question in the affirmative, adding "gradient descent on metaparameters" to the large-scale machine learning toolkit. Along the way, we will face—and address—two main challenges. + +First, existing methods for computing metagradients do not scale. In response, we devise an algorithm, REPLAY, that can take metagradients in large-scale settings. By combining reverse-mode autodifferentiation (AD) with an efficient data structure, REPLAY can calculate exact metagradients for models with billions of parameters and thousands of training steps. + +Second, we find that metagradients of standard training routines are not necessarily helpful for optimization, which we connect to non-smoothness of the metaparameter optimization landscape. Borrowing tools from convex optimization, we devise a framework for designing "metasmooth" training routines that do admit helpful metagradients. + +Addressing the challenges above unlocks a simple recipe for solving a broad range of machine learning tasks: (a) frame the task as a continuous optimization problem over metaparameters; (b) design a metasmooth training routine; (c) perform metagradient descent (MGD). Applying this recipe: + +- In the DataComp-small11 competition [GIF+24], we achieve state-of-the-art pre-training data selection for CLIP (2x larger performance improvement than the previous DataComp-small1 leader [Eco24]); +- In the context of data selection for instruction tuning (as introduced by Xia et al. [XMG+24]), we substantially improve on data selection for Gemma-2B (outperforming existing selection methods as well as full-data training); +- In the accuracy-degrading data poisoning setting (defined by Huber [Hub64] and pioneered by Lu et al. [LKY22] for deep neural networks), we improve attacks on DNNs by an order of magnitude, dropping CIFAR-10 accuracy from $92\% \rightarrow 78\%$ (the best previous attack [LKY23] only reduces accuracy to $91\%$ ); +- For the task of hyperparameter optimization, we efficiently find a competitive CIFAR-10 learning rate schedule (matching the performance of a schedule found by grid search). + +# 2 Scalably computing metagradients + +In this section we present REPLAY, an algorithm for computing metagradients of large-scale iterative ML algorithms. We first detail the setting, then discuss existing approaches to computing metagradients, and conclude by describing REPLAY. + +![](images/1dc990e467236d18f25ade5d515382e45fc4338efb7565274a7fd059602d5385.jpg) +Training setup +Trained model +Observed behavior +Figure 2: An illustration of the metagradient. We embed a given aspect of the training setup (e.g., the training dataset, or optimizer hyperparameters) into a continuous metaparameter vector $z \in \mathbb{R}^d$ . This metaparameter defines a model $\mathcal{A}(z)$ by way of the learning algorithm $\mathcal{A}$ , which in turn defines an output $\phi(z)$ . The metagradient $\nabla_z \phi(\mathcal{A}(z)) \in \mathbb{R}^d$ is the gradient of this model output with respect to the metaparameter. + +# 2.1 What is a metagradient? + +Training a machine learning model is a two-step process. First, we decide on a training setup—we must pick, for example, a neural network architecture, a training dataset, and an optimizer for training. Second, we apply the algorithm defined by this training setup to train a model. + +Our overall goal in this paper is to optimize model behavior as a function of the training setup (or, as we call it, the metaparameters) using gradient-based methods. To this end, we define the following notation: + +- Let $\mathbf{z} \in \mathbb{R}^n$ be a vector of continuous metaparameters representing the aspects of the training setup we aim to optimize. For example, if we only want to adjust the learning rate and weight decay of SGD then $n = 2$ . We handle discrete metaparameters (e.g., choice of training data) by finding a continuous relaxation (e.g., importance weights). +- Let $\mathcal{A}$ be an algorithm mapping $\mathbf{z}$ to a trained machine learning model; we assume all other aspects of the training setup outside $\mathbf{z}$ are fixed and thus part of the algorithm $\mathcal{A}$ . +- Finally, let $\phi$ be an output function mapping a model $\theta$ to a vector $\phi(\theta) \in \mathbb{R}$ . For example, $\phi(\theta)$ might represent the validation loss of the model $\theta$ . We require that $\phi$ be differentiable with respect to $\theta$ , but otherwise make no assumptions on $\phi$ . + +With this notation in place, we define the training function $f \coloneqq \phi \circ \mathcal{A}$ mapping the training setup $\mathbf{z}$ directly to the output function $\phi$ evaluated on the corresponding model. + +Finally, the metagradient is the gradient of the training function with respect to the metaparameters, $\nabla_{\mathbf{z}}f(\mathbf{z})$ . Intuitively, the metagradient defines the "direction of steepest ascent" in metaparameter space. + +Our focus: iterative algorithms. To efficiently compute the metagradient, we restrict our focus to cases where the algorithm $\mathcal{A}$ is iterative, i.e., when it can be written in the form + +$$ +\underbrace {\mathcal {A} (z) := \mathbf {s} _ {T}} _ {\text {m o d e l s t a t e a f t e r T s t e p s}}, \quad \text {w h e r e} \quad \underbrace {\mathbf {s} _ {t + 1} : = h _ {t} (\mathbf {s} _ {t} , \mathbf {z})} _ {\text {o p t i m i z e r s t e p t}}. \tag {1} +$$ + +Here, $\mathbf{s}_t$ is the optimizer state at step $t$ (with $\mathbf{s}_0$ being the initial state) and $h_t$ is the update mapping from state $t$ to state $t + 1$ . The form of (1) captures most large-scale training algorithms. For example, if the setup $\mathbf{z} \in \mathbb{R}^T$ is a per-step learning rate, and the algorithm $\mathcal{A}$ is full batch gradient descent, then each update $h_t$ is + +$$ +h _ {t} (\mathbf {s} _ {t}, \mathbf {z}) := \mathbf {s} _ {t} - z _ {t} \nabla \ell (\mathbf {s} _ {t}), +$$ + +where $z_{t}$ is the learning rate at step $t$ , $\ell$ is the training loss, and the state $\mathbf{s}_t$ comprises the parameters at step $t$ . For more complex algorithms like Adam [KB15], the state $\mathbf{s}_t$ includes terms like gradient moments. + +# 2.2 Warmup: Metagradients via autodifferentiation + +A key primitive we leverage to calculate metagradients is automatic differentiation (AD)—a standard tool for taking gradients through computer-defined functions. AD takes gradients by decomposing functions into elementary operations with known derivatives, then combining these derivatives using the chain rule. Concretely, AD operates in two passes: a "forward pass," which executes the function of interest and stores intermediate products for each elementary operation; and a "backward pass," which calculates the gradient by propagating chains of partial derivatives using these stored products. For the purposes of this paper, we will view AD as a black box that calculates the gradient of a many-to-one function (i.e., any $f: \mathbb{R}^d \to \mathbb{R}$ ) at a given point using only a small constant factor more time than calculating the function itself (along with the space cost of storing the necessary forward-pass products). + +What does this have to do with metagradients? Well, seeing as how training itself is a computer-defined function, AD is a natural tool for calculating the metagradient. The main challenge, as we discuss in the sequel, is that AD-based approaches to calculating the metagradient tend to be too resource-intensive for the large-scale machine learning algorithms we consider. In the remainder of this section we build up background before finally describing REPLAY, our algorithm for scalably computing (exact) metagradients. + +Approach #1: Direct AD. The direct approach to calculating metagradients exploits the fact that nearly any learning algorithm is itself a sequence of differentiable computer-defined operations—meaning the training function $f$ is also differentiable. + +However, operationalizing this observation to compute metagradients turns out to be challenging. The reason is that AD stores intermediate products for each operation. The amount of data stored thus scales with the number of operations in the function of interest. In the case of our training function $f$ , this number encompasses all the operations used to train a machine learning model. As a result, even in a toy scenario like MNIST training, computing metagradients with naive AD would require storing terabytes of data. + +Approach #2: Exploiting structure with step-wise AD. A more efficient method for calculating the metagradient, step-wise AD, leverages the structure of iterative learning algorithms [Wer90; MDA15; FDF+17]. Recall from (1) that such algorithms take the form + +$$ +\mathcal {A} (\mathbf {z}) := \mathbf {s} _ {T}, \quad \text {w h e r e} \quad \mathbf {s} _ {t + 1} := h _ {t} (\mathbf {s} _ {t}, \mathbf {z}). +$$ + +Algebraic manipulation (in particular, using the chain rule, the law of the total derivative, and the identity $\mathbf{s}_t = h_{t-1}(\mathbf{s}_{t-1}, \mathbf{z})$ ) allows us to write the metagradient over an iterative algorithm as + +$$ +\frac {\partial f (\mathbf {z})}{\partial \mathbf {z}} = \frac {\partial \phi (\mathcal {A} (\mathbf {z}))}{\partial \mathbf {z}} = \sum_ {t = 1} ^ {T} \underbrace {\overbrace {\partial \phi (\mathbf {s} _ {T})} ^ {A _ {t}} . \overbrace {\partial \mathbf {s} _ {t}} ^ {\partial \phi (\mathbf {s} _ {T})}} _ {B _ {t}}, \tag {2} +$$ + +where we have introduced the notation $A_{t}$ and $B_{t}$ for notational convenience. Step-wise AD computes the metagradient by calculating each term in the sum of (2) one at a time. For each term, the main challenge lies in computing $A_{t}$ , since given $A_{t}$ we can straightforwardly compute $B_{t}$ (the entire term) by differentiating through a single model update, i.e., + +$$ +B _ {t} := A _ {t} \cdot \frac {\partial h _ {t - 1} (\mathbf {s} _ {t - 1} , \mathbf {z})}{\partial \mathbf {z}} = \frac {\partial (A _ {t} \cdot h _ {t - 1} (\mathbf {s} _ {t - 1} , \mathbf {z}))}{\partial \mathbf {z}}, +$$ + +which is just a single call to our assumed "AD oracle" on the function $\mathbf{z} \mapsto A_t \cdot h_{t-1}(\mathbf{s}_{t-1}, \mathbf{z})$ . Computing the $A_t$ terms is less straightforward as we need to relate $s_t$ and $s_T$ ; to do so, we exploit the recurrence + +$$ +A _ {t} := \frac {\partial \phi (\mathbf {s} _ {T})}{\partial \mathbf {s} _ {t}} = \frac {\partial \phi (\mathbf {s} _ {T})}{\partial \mathbf {s} _ {t + 1}} \cdot \frac {\partial h _ {t} (\mathbf {s} _ {t} , \mathbf {z})}{\partial \mathbf {s} _ {t}} = \frac {\partial \left(A _ {t + 1} \cdot h _ {t} (\mathbf {s} _ {t} , \mathbf {z})\right)}{\partial \mathbf {s} _ {t}}, \tag {3} +$$ + +making $A_{t}$ straightforward to compute (again, a single "AD oracle" call) given $A_{t+1}$ . Step-wise AD exploits this fact to successively calculate the gradient with respect to each state, from state $T$ down to state 0. + +![](images/e61aefc6d8918a5c6e6ab0e0697fcbfc36486ad10f9f7a6216b3e5fefb7518f9.jpg) +Figure 3: The lazy $k$ -ary tree structure for traversing optimizer states in reverse order, with $k = 2$ . Recall that $n$ is the number of states (parameterized such that $n = T + 1$ ). Each node represents the correspondingly numbered state. We give an example of the traversal using the blue arrows in the figure, which denote the traversal path up to state $s_{\frac{3n}{4} + 1}$ . The gray cylinders indicate the states that are stored when the traversal is at state $s_{\frac{3n}{4} + 1}$ ; the other states are not stored at this point in the traversal. Traversing this structure requires storing $\mathcal{O}(\log(n))$ state and computing $\mathcal{O}(n \log(n))$ optimizer steps—compared to $n$ for simply training. + +Bringing these ingredients together, the algorithm executes as follows. As a preprocessing step, it trains the model and stores all intermediate states $\mathbf{s}_0,\dots ,\mathbf{s}_T$ . Then, the algorithm calculates and sums the terms in (2). It first computes $A_{T}\coloneqq \partial \phi (\mathbf{s}_{T}) / \mathbf{s}_{T}$ , the gradient of the output function $\phi$ with respect to the final state. Then, the algorithm steps through $\mathbf{s}_{T - 1},\ldots ,\mathbf{s}_0$ in reverse order, calculating (a) the gradient with respect to each state $A_{t}$ (via (3)) and (b) the gradient with respect to $\mathbf{z}$ at that step $B_{t}$ (via (2), using the previously calculated gradient with respect to that state). AD calculates both quantities--each requires differentiating over only one train step. Finally, the algorithm returns the final metagradient as the sum of the terms. + +Despite improving storage overhead compared to "direct AD", step-wise AD is still too space-intensive at scale. After all, this algorithm saves every optimizer state. + +# 2.3 REPLAY + +REPLAY is our algorithm for efficiently and exactly computing metagradients. It uses $\mathcal{O}(k\log_k(T))$ space and requires running the learning algorithm $\mathcal{A}$ a total of $1 + \log_{k}(T)$ times, with $k$ a user-chosen constant. The main idea is to make the space-intensive subroutine of step-wise AD—a reverse-order traversal of the optimizer states at each step—much more efficient. After all, step-wise AD stores all the states to reverse traverse them. REPLAY modifies step-wise AD to traverse states in less space by exploiting a simple observation: when training is deterministic, one can reinstantiate an optimizer state $\mathbf{s}_t$ by "replaying" training from a fixed point $t' < t$ at the compute cost of $t - t'$ training steps. For example, one simple scheme saves every other state, then "replays" the remaining states when (reverse) traversing; this routine stores $T/2$ states but computes an extra $T/2$ model updates compared to storing all the states. + +REPLAY performs a reverse-order traversal the optimizer states while balancing the compute cost of "replaying" training with the storage cost of saving states. We use a combination of deterministic training (fixing data ordering, data augmentation, and any other randomness in the training process) and an efficient data structure (similar to a segment tree; see Figure 3) to reverse-order traverse the optimizer states with $\mathcal{O}(k\log_k(T))$ space and an additional $T\log_k(T)$ model steps. + +Specifically, REPLAY recursively saves and replays training states. The algorithm splits the training trajectory into $k$ segments, performs the full training routine while saving only the start of each segment, then recurses into each segment (in reverse) to retrieve the states in reverse-order. The recursion depth bottoms out at $\log_k(T)$ , at which point the algorithm has $k$ consecutive optimizer states in memory; the algorithm then backpropagates along this segment, before deleting all these states from memory and then reinstantating the next $k$ -length segment of optimizer states. We provide additional details on the algorithm in Appendix A.2. REPLAY unlocks computing large-scale metagradients by requiring only logarithmic storage and additional compute time. + +Remark 1 (Connection to rematerialization). In a broad sense, both REPLAY and step-wise AD above can be viewed as special cases of a classical approach in AD (and computing broadly) known as rematerialization [CAC+81; BCT92; ZP00; GW08; CXZ+16]. To our knowledge, however, REPLAY is the first application of this particular rematerialization technique to the problem of computing metagradients through model training. + +Remark 2 (Reversible learning). An alternative approach to calculating metagradients that does not save any state is reversible learning [MDA15], for which one can "invert" previous training states from future ones. We focus here on general (non-reversible) learning algorithms for two reasons: first, even simple algorithms such as SGD without momentum are non-reversible; second, reversibility in practice introduces numerical precision issues. + +# 3 Designing metasmooth training routines + +Given a training function $f$ , REPLAY enables us to compute metagradients $\nabla f(\mathbf{z})$ for any setup $\mathbf{z}$ . Can we immediately use these metagradients to optimize model training setups? The answer is (generally) no: we find that applying REPLAY to a function $f$ representing a standard model training and evaluation routine yields metagradients that are often $\pm \infty$ -valued and generally unhelpful for optimization. Indeed, previous work has observed similar issues optimizing over even (very) small-scale training [BSF94; Pea96; MDA15]. + +In this section, we show that an underlying source of the issue is the landscape of the metaparameter optimization problem. We then present a framework for modifying standard learning algorithms to admit useful metagradient, i.e., to be metasmooth. To use a familiar analogy: just as residual connections and improved initialization schemes can improve optimization in standard deep learning algorithms, our framework introduces an analogous set of modifications to enable optimization with metagradient. + +# 3.1 The metaparameter optimization landscape + +We first review the notion of smoothness from optimization theory, and then adapt it to the setting of metagradients. The resulting metasmoothness metric allows us to quantify (and later, improve) the amenability of the metaparameter optimization problem to gradient-based methods. + +Smoothness. In optimization theory, the basic property of a function that controls how effectively it can be optimized with first-order methods is smoothness. Specifically, a function $f(\mathbf{z})$ is $\beta$ -smooth at a point $\mathbf{z}$ if its gradient $\nabla f$ satisfies the property that + +$$ +\left\| \nabla f (\mathbf {z}) - \nabla f \left(\mathbf {z} ^ {\prime}\right) \right\| \leq \beta \cdot \left\| \mathbf {z} - \mathbf {z} ^ {\prime} \right\| \quad \text {f o r a l l} \mathbf {z} ^ {\prime}, \tag {4} +$$ + +or in other words, if its gradient does not change too quickly around $\mathbf{z}$ . To motivate this definition: if a function $f$ is $\beta$ -smooth at $\mathbf{z}$ , then a step of gradient descent with step size $1 / \beta$ will successfully decrease the value of the function: + +$$ +f \left(\mathbf {z} - \frac {1}{\beta} \nabla f (\mathbf {z})\right) \leq f (\mathbf {z}) - \frac {1}{2 \beta} \| \nabla f (\mathbf {z}) \| ^ {2}. +$$ + +This guarantee makes $\beta$ -smoothness a good measure of gradient utility. + +Metasmoothness. There are two main challenges in adapting the smoothness property to the metagradient setting. First, evaluating (4) requires a search over all possible $\mathbf{z}'$ , which is infeasible. Second, even if we could exactly evaluate the left-hand side of (4), it would be difficult to disentangle non-smoothness of the training function $f$ from potential error in metagradient computation (e.g., a numerically unstable operation in REPLAY). + +To sidestep these issues, we propose a metric called metasmoothness, given in Definition 1. Metasmoothness is cheap to compute—requiring only three evaluations of the training function—and does not rely on metagradient computation. For the remainder of this section, we fix a small constant $h > 0$ , and define the corresponding finite-differences estimator of the directional derivative $\Delta_f$ as + +$$ +\Delta_ {f} (\mathbf {z}; \mathbf {v}) := \frac {f (\mathbf {z} + h \mathbf {v}) - f (\mathbf {z})}{h}. +$$ + +Definition 1 (Metasmoothness of $f$ at $\mathbf{z}$ towards $\mathbf{v}$ ). Consider a training function $f$ mapping metaparameters $\mathbf{z} \in \mathbb{R}^n$ to model output $f(\mathbf{z}) \in \mathbb{R}$ . Given a metaparameter $\mathbf{z}$ and a vector $\mathbf{v} \in \mathbb{R}^n$ , the metasmoothness of $f$ at $\mathbf{z}$ towards $\mathbf{v}$ is given by + +$$ +S _ {h, \mathbf {v}} (f; \mathbf {z}) := \left| \frac {\Delta_ {f} (\mathbf {z} + h \mathbf {v}) - \Delta_ {f} (\mathbf {z})}{h} \right|. \tag {5} +$$ + +Definition 1 measures the rate of change of the derivative of $f(\mathbf{z})$ in the direction of a given vector $\mathbf{v}$ , and is therefore related to $\beta$ -smoothness in that: + +(a) If $f$ is $\beta$ -smooth at $\mathbf{z}$ , then $S_{h,\mathbf{v}}(f;\mathbf{z}) \leq \beta$ for any $(h,\mathbf{v})$ (so Definition 1 is necessary for smoothness). +(b) If $\lim_{h\to 0}S_{h,\mathbf{v}}(f;\mathbf{z})\leq \beta$ for all $\mathbf{z}\in \mathbb{R}^n$ and $\mathbf{v}\in \mathbb{S}^{n - 1}$ , then $f$ is $\beta$ -smooth everywhere (so a global version of Definition 1 is sufficient for smoothness). + +Empirical metasmoothness. Definition 1 lets us measure the meta-smoothness of a training function $f$ at a particular metaparameter $\mathbf{z}$ (towards a direction $\mathbf{v}$ ). This definition, however, has two shortcomings. First, recall that the training function $f$ is a composition of a learning algorithm $\mathcal{A}$ and an output function $\phi$ , so the smoothness of $f$ depends on that of both $\mathcal{A}$ and $\phi$ (in particular, $\frac{\partial f}{\partial \mathbf{z}} = \frac{\partial \phi}{\partial \mathcal{A}} \cdot \frac{\partial \mathcal{A}}{\partial \mathbf{z}}$ ). Since the output function $\phi$ might be unknown ahead of time, we are most interested in measuring the overall metasmoothness of a learning algorithm $\mathcal{A}$ . Second, while the result of (5) does have a concrete basis in optimization theory, it may not be easy to interpret in practice (e.g., what does $S = 200$ mean?). We address both issues simultaneously by (a) proposing an interpretable "binarized" version of Definition 1, and (b) studying metasmoothness in the space of model parameters $\theta$ , instead of the output space. + +Definition 2 (Empirical metasmoothness of $\mathcal{A}$ ). Let $\mathcal{A}$ be a learning algorithm which maps metaparameters $\mathbf{z} \in \mathbb{R}^n$ to model parameters $\theta \in \mathbb{R}^d$ , let $\mathbf{z}$ be a metaparameter vector, and let $\mathbf{v}$ be a given direction. Let $\mathbf{d} \in \mathbb{R}^d$ be the per-coordinate variation in $\theta$ , i.e., + +$$ +\mathbf {d} = \left| \mathcal {A} (\mathbf {z} + 2 h \mathbf {v}) - \mathcal {A} (\mathbf {z}) \right| +$$ + +The empirical $(h,\mathbf{v})$ -metasmoothness of $\mathcal{A}$ at $\mathbf{z}$ is given by + +$$ +\widehat {S} _ {h, \mathbf {v}} (\mathcal {A}; \mathbf {z}) = \operatorname {s i g n} \left(\Delta_ {\mathcal {A}} (\mathbf {z}; \mathbf {v})\right) ^ {\top} \cdot \operatorname {d i a g} \left(\frac {\mathbf {d}}{\| \mathbf {d} \| _ {1}}\right) \cdot \operatorname {s i g n} \left(\Delta_ {\mathcal {A}} (\mathbf {z} + h \mathbf {v}; \mathbf {v})\right), \tag {6} +$$ + +weights each parameter by its range. + +Intuitively, (6) measures the agreement in sign between the (finite-difference approximation of the) metagradient in the direction of $\mathbf{v}$ at $\mathbf{z}$ and at $\mathbf{z} + h\mathbf{v}$ , averaged across parameter coordinates and weighted by the variation in each coordinate. Taking a weighted average of sign agreements ensures that $\widehat{S} \in [-1,1]$ (making it easier to interpret than Definition 1). The $\mathrm{diag}(\mathsf{d} / \| \mathsf{d}\|_1)$ term weights each agreement proportionally to the scale of the corresponding parameter change (downweighting, e.g., coordinates $i$ that are essentially constant). Finally, observe that Definition 2 is efficient to compute in practice: it requires only three calls to the learning algorithm $\mathcal{A}$ . + +![](images/8903e4b2221d100768bfe268b5a001dd442b4e5c27be2741e58072ab69f0a150.jpg) +(a) + +![](images/1a484312f1ed10309ea34d6652f551a0d6047e126a3f781ebb74cc68734bab99.jpg) +Figure 4: (a) For a variety of training configurations of a ResNet-9 model, we plot metasmoothness (Def. 2) against test accuracy. Strategies such as increasing width, placing batch normalization before activations, and scaling down network outputs consistently improve metasmoothness, at a minor cost to accuracy. (b) Smoother training configurations can be optimized via metagradients more effectively. Here, as in Section 4.3, we use metagradients to gradient ascend on validation loss. + +![](images/a19a4da2f67b18c44fb26d6e2a9cceb669d036ffc05618b89b9c68b99f1485ad.jpg) +(b) + +Remark 3. Ideally, recalling the smoothness definition (4), we would evaluate metasmoothness in all possible directions $\mathbf{v}$ and all points $\mathbf{z}$ . Empirically, we find in the sequel (Section 3.2) that this single-direction approximation at a single point $\mathbf{z}$ still yields a useful estimate of metasmoothness (e.g., one that correlates with metagradients utility). + +# 3.2 Estimating and improving metasmoothness + +Having established a method for quantifying metasmoothness, we turn to the practical question: how can we design learning algorithms that are amenable to metagradient optimization? To answer this question, we introduce a straightforward framework: given a learning algorithm, explore a fixed menu of possible modifications to the training setup, and choose the combination that maximizes empirical metasmoothness. In practice, we find that this framework allows us to slightly modify learning algorithms in a way that makes them amenable to first-order methods. + +As a case study, we study the task of training ResNet-9 on the CIFAR-10 dataset [Kri09]. We let the metaparameters $\mathbf{z}$ be a perturbation to the pixels of 1000 random training images (so $\mathbf{z} \in \mathbb{R}^{1000 \times 32 \times 32 \times 3}$ ). We estimate the empirical metasmoothness of different learning algorithms $\mathcal{A}$ at $\mathbf{z} = \mathbf{0}$ using Definition 2. Concretely, we proceed as follows for each learning algorithm $\mathcal{A}$ : + +1. Let $\mathbf{z}_0 = \mathbf{0}$ be the metaparameter corresponding to the original dataset. +2. Sample a random perturbation vector $\mathbf{v} \sim \mathcal{N}(0,1)$ . +3. Compute the empirical metasmoothness (6), i.e., + +(a) Let $\theta_0\coloneqq \mathcal{A}(\mathbf{z}_0)$ , $\theta_h\coloneqq \mathcal{A}(\mathbf{z}_0 + h\cdot \mathbf{v})$ , and $\theta_{2h}\coloneqq \mathcal{A}(\mathbf{z}_0 + 2h\cdot \mathbf{v})$ be the model parameters that result from training with training dataset perturbations $\mathbf{z}_0,\mathbf{z}_0 + h\mathbf{v}$ , and $\mathbf{z}_0 + 2h\mathbf{v}$ , respectively. +(b) Compute the approximate derivatives + +$$ +\Delta_ {\mathcal {A}} (\mathbf {z} _ {0}; \mathbf {v}) = \left(\theta_ {h} - \theta_ {0}\right) / h, \quad \Delta_ {\mathcal {A}} (\mathbf {z} _ {0} + h \mathbf {v}; \mathbf {v}) = \left(\theta_ {2 h} - \theta_ {h}\right) / h. +$$ + +(c) Compute the weighting vector $\mathbf{d} = |\theta_{2h} - \theta_0|$ , and compute the average metasmoothness (6), i.e., + +$$ +\widehat {S} _ {h, \mathbf {v}} (\mathcal {A}; z _ {0}) = \operatorname {s i g n} \left(\Delta_ {\mathcal {A}} \left(\mathbf {z} _ {0} + h \mathbf {v}; \mathbf {v}\right)\right) ^ {\top} \cdot \operatorname {d i a g} \left(\frac {\mathbf {d}}{\| \mathbf {d} \| _ {1}}\right) \cdot \operatorname {s i g n} \left(\Delta_ {\mathcal {A}} \left(\mathbf {z} _ {0}; \mathbf {v}\right)\right). +$$ + +![](images/95cf8c6f67f7a1ff5f5908ae786adfca0bdc4e43fa7de5fa95c29ea9182013b3.jpg) + +![](images/8ef162573b94d345eed4da85496a669bb5b67c2e40af2ad7caf94d5e5efc2d81.jpg) + +![](images/65efa3e7039ec2bf0c17df31db0fea551abb1e8ebec13cd70da4376df689dec2.jpg) +Figure 5: The effect of metasmoothness on the optimization landscape. Each plot above visualizes the loss landscape of a (deterministic) learning algorithm $\mathcal{A}$ , with the $x$ - and $y$ -axes representing additive perturbations to 1000 examples in the training set and the $z$ -axis representing the resulting model's loss on the test example given in the title. In each row, the left plot is a non-smooth algorithm, and the right plot is a smooth algorithm (as per Definition 2) evaluated on the same example. Overall, empirical metasmoothness seems to strongly correlate with qualitative landscape smoothness. See Figure 12 for more examples. + +![](images/b822311a6fc4b5b0c4d7fcafd79dd0e3e2f03c4bdeeb8e5787a92fec3738137b.jpg) + +Metasmooth learning algorithms. We apply the procedure above to estimate the metasmoothness of learning algorithms induced by different design choices (batch size, network width, BatchNorm placement, gradient scaling), and report the results in Figure 4 (left). On one hand, "standard" learning algorithms (i.e., those designed without metasmoothness in mind) are not metasmooth. On the other hand, our investigation reveals central factors driving metasmoothness. In addition to "standard" hyperparameters such as batch size and network width playing a role, we find that placing Batch Normalization layers prior to nonlinearities (instead of after) and scaling the final layer output are both crucial to metasmoothness. Note that the modifications we consider above are not exhaustive—see Appendix E for the full training setup. + +Finally, in Figure 5, we plot the optimization landscape of both metasmooth (right) and non-metasmooth (left) models. We find that the landscapes of metasmooth models are much smoother and—qualitatively—more straightforward to optimize. + +Metasmoothness/performance tradeoffs? Figure 4 (left) relates metasmoothness to model accuracy for the considered learning algorithms. While there is no clear trend, the top-performing learning algorithms are not always metasmooth. However, the trade-off is not too severe: the most metasmooth algorithms still achieve near-optimal accuracy. Furthermore, it is possible that with additional searching we could identify even more accurate metasmooth models. Taken together with our previous experiment, our results suggest that jointly searching over metasmoothness and model accuracy is a general recipe for designing learning algorithms that are both performant and metasmooth. Finally, as we discuss in Section 5, a fruitful avenue + +for future work may be to design metasmooth learning algorithms directly, i.e., without relying on stability heuristics or grid search. + +Does metasmoothness aid downstream optimization? Recall that our motivation for studying metasmoothness is to develop learning algorithms that we can optimize the metaparameters of via metagradients (using first-order methods). We started with the notion of $\beta$ -smoothness from optimization theory, and we adapted it to the setting of metagradients by making a series of approximations and modifications. The final question we address is: does our final notion of metasmoothness actually predict the utility of metagradients for optimization? Figure 4 (right) demonstrates that metasmoothness strongly predicts our ability to optimize the metaparameters of a given learning algorithm. We use metagradients (computed by REPLAY) to gradient ascend on validation loss with respect to the metaparameters $\mathbf{z}$ , and measure the change in model loss. + +# 4 Applications + +In this section, apply metagradients to three problems in machine learning: selecting training data, poisoning training data, and searching for hyperparameters. In each setting we follow the same recipe: we frame the task as an optimization problem, modify the learning algorithm of interest to be smooth, then solve by first-order optimizing with meta-gradients—which we refer to, in a catch-all manner across algorithms, as metagradient descent (MGD). In particular: we substantially improve on existing dataset selection methods (Section 4.1, Section 4.2), perform the first effective accuracy-degrading data poisoning attack (Section 4.3), and discover one-cycle learning rate schedules with MGD (Section 4.4). + +# 4.1 Selecting multimodal training data + +Curating a training dataset from a mass of unfiltered data is a necessary and influential step in any large-scale machine learning pipeline. Deciding how to curate such a dataset is a challenging problem that has attracted substantial recent interest [FiW+22; ATS+23; EFM24; GIF+24]. In this section, we frame pre-training data selection as an optimization problem, and then solve this problem by first-order optimizing with metagradient. Applying our method to the DataComp-small benchmark [GIF+24], we greatly improve on the state-of-the-art (our improvement over state-of-the-art is roughly the same as the improvement of state-ofthe-art over training on random data). + +# 4.1.1 Setup + +The goal of dataset selection is to choose a training data subset (out of a broad pool of data) that maximizes trained machine learning model performance. Given this goal, dataset selection has a natural interpretation as a combinatorial metaparameter optimization problem. In particular, in the language of Section 2.1, for a training set of size $n$ , let + +(a) the metaparameters $\mathbf{c} \in \mathcal{C} \coloneqq \mathbb{Z}_{\geq 0}^{n}$ be non-negative data counts representing the number of times each training sample repeats in the training data; +(b) the algorithm $\mathcal{A}$ be a standard large-scale learning procedure, which runs on a training set comprising $c_{i}$ copies of each sample $i$ for $i\in [n]$ ; +(c) the output function $\phi$ be the loss of the trained model on a target distribution $D$ . + +Then, defining $f(\mathbf{c}) \coloneqq \phi(\mathcal{A}(\mathbf{c}))$ (as in Section 2.1), our goal is to find the data counts $\mathbf{c}^*$ that solve + +$$ +\mathbf {c} ^ {*} := \underset {\mathbf {c} \in \mathcal {C}} {\arg \min } f (\mathbf {c}). \tag {7} +$$ + +# 4.1.2 Gradient descent on training data + +Metagradients let us directly minimize the target task loss (7) with respect to the choice of training data. At a high level, our algorithm operates as follows: we start with a randomly chosen set of training data, then iteratively update the dataset selection using metagradients with respect to importance weights placed on each training datapoint. The specifics of our method are in Algorithm 1; we describe its core ideas below. + +Idea 1: A surrogate algorithm. We cannot use metagradients to optimize (7) directly, because the metaparameters of interest $\mathbf{c}$ are discrete counts (and so the algorithm $\mathcal{A}$ is non-differentiable with respect to $\mathbf{c}$ ). To circumvent this problem, we relax $\mathcal{A}$ : we define a surrogate algorithm $\mathcal{A}_{\mathbf{c}}^{\prime}$ that takes in a continuous metaparameter $\mathbf{z} \in \mathbb{R}^{n}$ , whose metagradient we can compute, then optimize using the metagradient on $\mathcal{A}_{\mathbf{c}}^{\prime}$ . + +This surrogate learning algorithm $\mathcal{A}_{\mathrm{c}}^{\prime}$ maps a metaparameter $\mathbf{z} \in \mathbb{R}^{n}$ (representing a perturbation to training data weights) to a machine learning model. The surrogate is defined by a set of counts $\mathbf{c} \in \mathbb{Z}_{+}^{n}$ , and a hyperparameter $k$ denoting a specific training iteration, both of which we bake into the surrogate algorithm itself. Given a metaparameter $\mathbf{z} \in \mathbb{R}^{n}$ , the algorithm $\mathcal{A}_{\mathrm{c}}^{\prime}$ trains a model "as usual" using the fixed counts $\mathbf{c}$ . That is, it makes $c_{i}$ copies of each training sample $i$ , shuffles and partitions the data into batches, and then at each iteration minimizes the batch loss with a step—just as the original learning algorithm $\mathcal{A}$ . At iteration $k$ , however, in addition to the original loss on the $k$ -th batch, the algorithm upweights each training sample $i$ according to the metaparameter $z_{i}$ . In other words, the objective at iteration $t$ of the surrogate algorithm $\mathcal{A}_{\mathrm{c}}^{\prime}$ is + +$$ +\ell_ {t} ^ {\prime} (\theta) := \left\{ \begin{array}{l l} \sum_ {x \in t ^ {\text {t h}} \text {b a t c h}} \ell (x; \theta) & \text {i f} t \neq k \\ \sum_ {x \in t ^ {\text {t h}} \text {b a t c h}} \ell (x; \theta) + \sum_ {i = 1} ^ {n} z _ {i} \ell (x _ {i}; \theta) & \text {i f} t = k \end{array} \right. +$$ + +where $\ell (x;\theta)$ is the training loss on example $x$ + +Observe that when $\mathbf{z} = \mathbf{0}_n$ , the algorithm $\mathcal{A}_{\mathbf{c}}'$ is identical to the standard learning algorithm $\mathcal{A}$ . And while $\mathcal{A}$ was a function of (nondifferentiable) discrete data counts $\mathbf{c}$ , $\mathcal{A}_{\mathbf{c}}'$ is differentiable with respect to its input $\mathbf{z}$ , and so we can compute the metagradient + +$$ +\mathbf {g} := \nabla_ {\mathbf {z}} \phi \big (\mathcal {A} _ {\mathbf {c}} ^ {\prime} (\mathbf {z}) \big) \big | _ {\mathbf {z} = \mathbf {0} _ {n}}. +$$ + +Intuitively, the entries of the metagradient $\mathbf{g}$ capture the effect of adding an infinitesimal amount of each training sample $i$ to the training data at iteration $k$ . A positive entry $g_{i}$ indicates that adding an infinitesimal amount of sample $i$ to the training data would increase the loss, and a negative entry indicates that adding an infinitesimal amount of sample $i$ to the training data would decrease the loss; the slot at $i$ represents the (estimated) effect of adding a copy of sample $i$ to the training data at every batch containing the sample. + +Idea 2: Block coordinate descent. We then use the metagradient $\mathbf{g}$ to iteratively update our selected dataset. We update data counts as + +$$ +\mathbf {c} \leftarrow \mathbf {c} - \operatorname {s i g n} (\mathbf {g}) \odot \mathbf {m}, \quad \mathbf {m} \sim \text {B e r n o u l l i} (p), \tag {8} +$$ + +where $p$ is a hyperparameter controlling the fraction of sample counts to update. This algorithm resembles a block coordinate descent algorithm [OR00], with the main difference being that we take signed gradient steps with step size 1 (projected onto non-negative integers) to ensure that the counts remain well-defined. As a result, $p$ implicitly controls the algorithm's step size. + +Applying (8) concludes a single optimization step. By repeating this process of estimating the metagradient, updating our counts vector, then constructing a new training dataset, we iteratively improve the selected data. Pseudocode for our algorithm can be found in Algorithm 1. + +# 4.1.3 Results + +We evaluate our data selection algorithm using DataComp [GIF+24], a standardized framework for evaluating data selection methods for multimodal models. Algorithm 1 greatly improves on the state-of-the-art for the benchmark. Below, we describe the setting, outline our method, and conclude with our results. + +Algorithm 1: Dataset selection using using metagradient descent (MGD). +Input: initial data counts $\mathbf{c}\in \mathbb{Z}_{\geq 0}^{n}$ , learning algorithm $\mathcal{A}$ output function Hyperparameters: step size $p$ # opt steps $T$ iteration number $k$ for $t\gets 1$ to $T$ do +2 $\mathbf{z}\gets \mathbf{0}_n / /$ Build input to surrogate +3 $\mathbf{g}\leftarrow \frac{\partial\phi(\mathcal{A}_c'(\mathbf{z}))}{\partial\mathbf{z}} / /$ Calculate metagradient using REPLAY +4 m<- sample from Bernoulli(p) // Sample indices to step on +5 c<- c-sign(g) $\odot$ m// Take optimization step +6 Return c// Return final data counts + +Setting. DataComp [GIF+24] is a multimodal model training competition and benchmark for evaluating dataset selection methods. DataComp provides a fixed learning algorithm chosen in advance by the organizers and a large fixed candidate pool of internet data. The goal is to choose a subset of the candidate pool—possibly with repeated datapoints—that yields the best-performing model after training with the given learning algorithm, as measured by a predetermined set of 38 benchmarks. Given a submission subset, the mean score on the evaluation datasets for a model trained with that subset is taken as the final "score." DataComp offers four separate "scales" requiring different amounts of compute; we focus on the small scale in this paper due to compute limitations. + +Method. We select data with MGD (Algorithm 1) to minimize loss on data on a "target set" that is distributionally similar to the DataComp benchmark tasks, and select hyperparameters with a held-out "validation set." In particular, we construct target and validation sets by taking samples from the DataComp evaluation tasks with extra samples available beyond those used in the DataComp test set (e.g., ImageNet, one of the tasks in DataComp, has a training set in addition to the test set evaluated in DataComp). See Appendix C for the exact details of the target and validation sets, the precise hyperparameters used with Algorithm 1, and a discussion on scalability (including further engineering details on executing our algorithm efficiently). + +Results. MGD greatly outperforms the current state-of-the-art: the difference in accuracy between MGD and the current best method is roughly as large as the difference between the previous state-of-the-art (EcoDatum [Eco24]) and training on randomly chosen data (cf. Figure 6). Inspecting scores over the course of the optimization in Figure 6, we find that only a few steps are necessary to outperform previous methods. + +![](images/b4f07dfbec991e71985782d6905bc8288383a71ad6a7e6b4aaf74a949165576e.jpg) +Figure 6: MGD dataset selection greatly outperforms existing methods (improving over the previous SOTA by as much as the previous SOTA improves over no filtering at all). We compare DataComp scores for MGD (over optimization steps), training on the entire candidate pool, the best baseline originally proposed by DataComp, and the previous SOTA [Eco24]. + +
MethodScoreΔ
- - - Baseline: No filtering0.13-
- - - Best baseline from [GIF+24]0.17+0.04
- - - Previous SOTA [Eco24]0.18+0.05
- - - MGD-DS (ours)0.22+0.09
+ +![](images/c5d7306e1433a0b6c6b42b22995ae8e6a7ebb4b28513e507342f46c61081428d.jpg) +Figure 7: MGD dataset selection outperforms baselines. Comparing to training on all the data: it achieves over double the margin of improvement of LESS on MMLU, and improves by $+1.5\%$ on BBH (where LESS does not improve at all). The $\Delta$ column denotes improvement over not filtering. + +
BBH [SSS+22]MMLU [HBB+20]
Acc.ΔAcc.Δ
All Data35.2%-41.2%-
■ LESS35.2%-0.0%41.8%+0.5%
■ MGD-DS36.7%+1.5%42.5%+1.3%
+ +# 4.2 Selecting instruction-tuning data + +In our second application, we select training data for instruction fine-tuning (IFT) using the same MGD-based method detailed in Algorithm 1 of Section 4.1. As with multimodal data, training on the "right" post-training data (such as the "right" IFT data) can greatly impact deployment-time model performance [LFX+24; DJP+24; TGZ+23]. MGD improves over baselines at choosing IFT data for MMLU [HBK+21], a general knowledge task, and BBH [SSS+22], a reasoning/chain-of-thought task. + +To overview this section: we start by detailing the setting, then describe the specifics of our MGD instantiation before concluding with results. + +Setting. We adopt the setting of LESS [XMG+24]. Here, the goal is to select a training data subset from four combined IFT datasets (Flan V2 [LHV+23], CoT [WWS+22], DOLLY [CHM+23], and Open Assistant 1 [KKR+24]) to maximize accuracy on a given target task. We consider two target tasks from LESS: MMLU (which comprises multiple choice questions spanning a variety of disciplines) and BBH (a 23 task subset of BIG-Bench [SRR+22]). In this setup, the data selector can access samples from each task built from the in-context learning prompts. Following Xia et al. [XMG+24], we fine-tune a 128-width LoRA [HY20] (in our work, on Gemma-2B [TMH+24]). See Appendix D for full details on the tasks and learning algorithm. + +Method. We split up the available task samples into two sets—a "target" set and a "validation" set—then select data with MGD (via Algorithm 1) by minimizing causal language modeling loss on the "target" set of samples. We select hyperparameters like step size and number of SGD iterations with the validation set; see Appendix D for more details. + +Results. Comparing with two baselines—training on all the data and training with data selected with LESS [XMG+24]—MGD yields strictly better training dataset selections for each target task (cf. Figure 7). MGD improves most on BBH, a reasoning task, compared to the best baseline $(+1.5\%)$ accuracy). On MMLU, a knowledge-based task, we outperform baselines by slightly less compared to the best baseline $(+0.8\%)$ ; one explanation is that selecting IFT data lends more control over reasoning than over intrinsic knowledge available in the LM. + +Beyond raw accuracy, we inspect losses across each step of the optimization process. Overall, our method improves validation loss over MGD steps (cf. Appendix Figures 13), but also exhibits signs of overfitting. Given intuition from overparameterized learning, we might expect this behavior: we optimize a total of 270,679 "weights"—each corresponding to a count for a datapoint—to minimize loss on only a handful of test samples (cf. Table 3). + +# 4.3 Accuracy-degrading (Huber) data poisoning + +The goal of an accuracy-degrading data poisoning attack is to degrade the performance of a machine learning model by corrupting a small fraction of its training data. Here, the considered threat model is as follows. + +The attacker is given a training set $\mathbf{X} = \{x_{1},\dots,x_{n}\}$ drawn from a distribution $P$ , and a function $\theta (\cdot)$ mapping training data to model parameters (representing the learning algorithm used by the victim). The attacker's goal is to return a new training set $\mathbf{X}'$ that differs from $\mathbf{X}$ in at most $\varepsilon \cdot n$ datapoints while inducing model parameters $\theta (\mathbf{X}')$ that perform as poorly as possible on a freshly drawn test set $T$ from $P$ . + +Formally, the adversary aims to solve the following optimization problem: + +$$ +\arg \max _ {\tilde {x} _ {1}, \dots , \tilde {x} _ {n _ {p}}} \mathbb {E} _ {x \sim P} [ \ell (x; \theta (\mathbf {X} ^ {\prime})) ], \tag {9} +$$ + +where $\mathbf{X}^{\prime} = \{\widetilde{x}_1,\dots ,\widetilde{x}_{n_p},x_{n_p + 1},\dots ,x_n\}$ and $n_p = \lfloor \varepsilon n\rfloor$ . Note that our goal is to degrade the overall model performance on a test set $\mathbf{X}_{test}$ drawn from $P$ (in particular, the test set $\mathbf{X}_{test}$ is unknown to the adversary). In this way, this setting resembles the Huber contamination model in statistics [Hub64], and is strictly more challenging than the usual data poisoning settings in deep learning (e.g., backdoor attacks [GDG17] or attacks that target specific test examples [KL17]). + +For large-scale machine learning models, finding strong adversaries has proven challenging—standard loss-minimizing learning algorithms seem quite robust to maliciously-inserted data [LKY23]. In fact, the first non-trivial accuracy degradation data poisoning attacks on deep models were pioneered by Lu et al. [LKY22] and later improved upon by the same set of authors [LKY23]. Broadly speaking, even constructing attacks that degrade the overall performance of a learning algorithm by more than the adversarial budget $\varepsilon$ has proven challenging. + +# 4.3.1 Setup + +We observe that (9) is a continuous optimization problem to which we can directly apply our metagradient framework, approximating the expectation over $P$ by a finite-sample average over a validation set $\mathbf{X}_{val}$ . In particular, given a (randomly shuffled) training set $\mathbf{X}$ and validation set $\mathbf{X}_{val}$ , we set up the following metaparameter optimization problem (see Section 2.1): + +(a) the metaparameter $\mathbf{z} \in \mathcal{X}^{n_p}$ is a tensor of $n_p = \lfloor \varepsilon n \rfloor$ poisoned samples; +(b) the algorithm $\mathcal{A}$ maps metaparameters $\mathbf{z}$ to a trained model $\mathcal{A}(\mathbf{z})$ by replacing the first $n_p$ samples in $\mathbf{X}$ with the samples in $\mathbf{z}$ and then training on the resulting dataset; +(c) the output function $\phi$ evaluates average loss on the validation set $\mathbf{X}_{val}$ . + +# 4.3.2 Algorithm + +To apply our first-order methods to this problem, we start by initializing the poisoned data to be exactly the first $n_p$ samples in $\mathbf{X}$ , $\mathbf{z}^{(0)} \coloneqq \{\widetilde{x}_i^{(0)} = x_i : i \in [n_p]\}$ . Then, for $t = 1, \dots, T$ , we sample a minibatch $\mathbf{X}_{val}^{(t)}$ from $\mathbf{X}_{val}$ and use REPLAY to compute the metagradient + +$$ +\mathbf{g}_{t} = \frac{d}{d\mathbf{z}}\left(\sum_{x\in \mathbf{X}_{val}^{(t)}}\ell (x;\mathcal{A}(\mathbf{z}^{(t - 1)}))\right), +$$ + +and update the poisoned data using (projected) gradient ascent: + +$$ +\mathbf {z} ^ {(t)} = \Pi_ {\mathcal {X}} \left(\mathbf {z} ^ {(t - 1)} + \eta \cdot \operatorname {s i g n} (\mathbf {g} _ {t})\right), +$$ + +where $\Pi_{\mathcal{X}}$ is the projection operator onto the sample space $\mathcal{X}$ . (For example, when $\mathcal{X}$ is the space of image-label pairs, $\Pi_{\mathcal{X}}$ clips images' pixel values to [0, 1] and ensures labels are valid probability distributions.) + +![](images/bee2bdf66cc8b88534900202c77d8a154c55594c42b87de51729a3c9fc1551b5.jpg) +Figure 8: Examples of poisoned images from Section 4.3. + +![](images/599688cbc30a73e86945ceef1aaa6104947a8bf4fb89679b2b4e04e12dbef15b.jpg) +Figure 9: For each iteration of MGD ( $x$ -axis), we train a new model from random initialization on a randomly shuffled training set with the current iterate of poisoned data injected. We evaluate the test accuracy ( $y$ -axis), and use REPLAY to compute the metagradient. MGD outperforms the best known attack [LKY23] by an order of magnitude and (for reference) results in a model that has the same accuracy as a single-layer neural network trained on random image features [CNL11]. + +
ModelAcc.Δ
- - Original model92.0%-
- - GradCancel [LKY23]91.2%-0.80%
- MGD-DP (ours)78.1%-13.9%
1-layer NN (for reference) [CNL11]83.3%-8.7%
+ +# 4.3.3 Evaluation + +We use the CIFAR-10 dataset which consists of 60,000 total images each labeled as one of 10 classes. We partition the data into 40,000 training examples, 10,000 validation examples, and 10,000 test examples. We consider a simple 12-epoch CIFAR-10 training procedure, which reaches $92.4\%$ accuracy on the CIFAR-10 test set when applied to the 40,000 training examples. See Appendix E for training hyperparameters. + +As described above, we allow the adversary to modify (in-place) a fixed, $\varepsilon$ -fraction of the training data (in our case, $2.5\%$ ) subject to the constraint that the poisoned images still lay in the valid (normalized) image range of [0, 1]. We compare our approach—direct optimization of the data poisoning objective using metagratings—to the state-of-the-art "Gradient Cancelling" (GradCancel) method of Lu et al. [LKY23]. In short, GradCancel is a two-step method which first finds a poorly performing model, then finds poisoned data that induces this model as a minimizer of the training loss. We present the full method in Appendix E. + +Results. We find that metagradients enable state-of-the-art data poisoning attacks, degrading accuracy by $14\%$ . In particular, when allowed to corrupt 1000 of the 40,000 training samples $(2.5\%)$ , our method reduces test set accuracy to $78\%$ — for reference, the accuracy of a single-layer neural networked trained on the unmodified CIFAR-10 training set is $83\%$ . The strongest existing data poisoning attack, GradCancel, only reduces test set accuracy by less than $1\%$ . In Figure 8, we visualize the poisoned images and labels found by our method. In Figure 9, we visualize the minibatch loss at each step of the optimization process. + +Remark 4 (Poisoning non-smooth learning algorithms). Recall that to apply metagradient descent, we alter the learning algorithm $\mathcal{A}$ to be metasmooth (see Section 3.1). This involves making modifications such as switching out max pooling layers for average pooling layers, moving batch normalization layers before activations, and scaling down the last layer's output by a factor of 10. It is natural to ask: how much does the efficacy of our method depend on this smoothness? After all, in practice the adversary cannot control the learning algorithm. To answer this question, we take the poison samples generated by MGD and insert them into the training set of a corresponding standard (i.e., non-metasmooth) learning algorithm. We find that our method still significantly degrades the performance of the model, from $92.8\%$ to $82.6\%$ (a drop of $10.2\%$ ). + +![](images/ff507e583af2cf8d875c5175cfaf529dfdc7148e3aef2836098edd006a592450.jpg) +Figure 10: Target and test accuracies of MGD's learning rate schedule over time closely match or exceed those found by a grid search over hundreds of combinations of hyperparameters. $95\%$ confidence intervals are plotted for MGD's results. + +![](images/983c55704fec1c3570e69e9f79f79845d462ac252b5f882ff32f4fe1718609cd.jpg) + +# 4.4 Finding a learning rate schedule + +As a final application, we optimize the learning rate schedule of stochastic gradient descent (SGD) for training a CIFAR-10 classifier. By following the metagradients with respect to the learning rate at each step of training, our procedure matches grid searching over standard learning rate schedules—despite starting with naive hyperparameters (a flat learning rate). + +Unlike the other applications discussed here, metagradients do not unlock state-of-the-art performance. Instead, we discuss this application to illustrate the flexibility of REPLAY, and in particular its ability to optimize metaparameters that do not directly affect the loss landscape (i.e., that only affect the model via the optimization trajectory). As we discuss in Section 6, approximate metagradient estimators cannot apply to these metaparameters. + +# 4.4.1 Setting + +To put learning rate schedule optimization into the metagradient framework, we parameterize a schedule as a vector $\eta \in \mathbb{R}^k$ comprising $k$ evenly-spaced keypoints, so that the learning rate at iteration $t$ is given by + +$$ +\eta (t) = \eta_ {\lfloor k t / T \rfloor} + \frac {k t / T - \lfloor k t / T \rfloor}{\lceil k t / T \rceil - \lfloor k t / T \rfloor} \left(\eta_ {\lceil k t / T \rceil} - \eta_ {\lfloor k t / T \rfloor}\right), \tag {10} +$$ + +i.e., a linear interpolation between the keypoints. + +(a) the metaparameter $\eta \in \mathbb{R}^k$ is a vector of $k$ keypoints; +(b) the algorithm $\mathcal{A}$ maps metaparameters $\eta$ to a trained model $\mathcal{A}(\eta)$ by training a model for $T$ iterations with the learning rate schedule defined by (10); +(c) the output function $\phi$ evaluates average loss on the validation set $\mathbf{X}_{val}$ . + +# 4.4.2 Algorithm + +Following the theme of the rest of this section, we optimize the metaparameter $\eta$ directly using MGD. In particular, we initialize the keypoints to be a flat learning rate schedule, and then update the keypoints using the metagradient with respect to the validation loss, + +$$ +\boldsymbol {\eta} ^ {(t + 1)} = \boldsymbol {\eta} ^ {(t)} - \alpha \cdot \operatorname {s i g n} \left(\nabla_ {\boldsymbol {\eta}} \phi (\mathcal {A} (\boldsymbol {\eta} ^ {(t)}))\right). +$$ + +# 4.4.3 Evaluation + +We aim to select the learning rate schedule that minimizes the expected test set loss. To do so, we reserve $90\%$ of the CIFAR-10 test set as a "validation set" on which we select hyperparameters. We then use the remaining $10\%$ as a test set. We compare the following two approaches: + +- Grid search: We construct a grid over different one cycle learning rate schedules, varying the peak learning rate, starting learning rate, ending learning rate, and peak learning rate time. In total, we consider over 1,000 different learning rate schedules. We use the reserved $90\%$ of the test set to select the best learning rate schedule from the grid. +- Metagradient descent (MGD): We run 50 steps of MGD starting from a highly suboptimal flat learning rate schedule, aiming to minimize loss on the reserved $90\%$ of the test set. We use the last iteration of MGD as our learned learning rate schedule. + +We evaluate the performance of each final learning rate schedule on the held-out $10\%$ test set and average the results over the same set of 5 unseen random seeds. + +Results. Comparing our learned hyperparameter schedule to grid search, as shown in Figure 10, our learned schedule using only 50 steps of MGD matches the performance of the state-of-the-art onecycle schedule found via grid search over more than 1000 configurations. An important caveat, however, is that these numbers are not directly comparable: grid search can be run in parallel across many machines, while steps of MGD must be run sequentially. + +In practice, we do not advise using MGD for optimizing low-dimensional hyperparameters, especially ones that have been thoroughly optimized by grid search (such as CIFAR-10 learning rate schedules [SN17; Pag18; LA19; Jor24]). Still, an interesting avenue for future work is to study the utility of MGD for optimizing high-dimensional hyperparameters that are less well-studied, such as per-parameter/layer learning rates/weight decays for language models, attention hyperparameters, or gradient preconditioners. + +# 5 Discussion + +In this section, we first present the main limitations of our method and outline future directions. + +Limitations. Although REPLAY is more efficient than existing methods at computing metagradients, it is still non-trivially more expensive than simply training a model once. The main reason is that metagradients require making a backwards pass over a backwards pass. This operation necessarily requires 2-3 times the operations of a backwards pass; furthermore, our current implementation requires float32/tensorfloat32 operations. Finally, standard training operations are often made more efficient by specialized software (e.g., via FlashAttention [DFE+22]); no such software (yet) exists for backwards-over-backwards operations. Beyond computational issues, successfully applying metagradients requires smooth model training. + +Metasmoothness: connections and future directions. While Section 3 describes a general procedure for finding metasmooth learning algorithms, an important future direction is to further explore and understand metasmoothness. This includes, for example: (a) characterizing the relationship between metasmoothness and numerical stability (and potentially using techniques from the latter to improve the former); (b) devising improved optimizers and/or architectures that lead directly to metasmooth learning algorithms (akin to skip connections or stable initialization in architecture design); (c) formalizing connections between metasmoothness and other optimization-related phenomena in deep learning [LM20; CKL+22]. A related but separate direction is to explore the possibility of using techniques from non-smooth optimization [Cla90] to perform metagradient descent on non-metasmooth learning algorithms. + +**Applying metagradients.** Our methods apply to any ML task that requires optimizing with respect to a metaparameter. These include: poisoning data (generated or simply hosted on the internet) so that it cannot be trained on without permission (i.e., by maximizing training loss with respect to the text); selecting better training data at various stages of the model training lifecycle; and designing better model training routines and architectures with first-order methods. Another direction of future work lies in mitigating the computational limitations of our algorithm. Both (a) small-scale proxy-models [HBM+22; EFM24] and (b) low-hanging engineering improvements can likely make calculating metagradients much more efficient. + +# 6 Related work + +We overview previous work on calculating and applying meta-gradients. + +# 6.1 Calculating metagradients + +Previous work estimates the metagradient for large-scale models via one of two broad families of methods: implicit differentiation and automatic (explicit) differentiation. Note that in previous literature, synonyms for metagradient include "hyper-gradient" and "outer gradient." + +Implicit differentiation. One family of methods aims to approximate the metagradient. To illustrate the idea behind such approaches, suppose that the learning algorithm $\mathcal{A}$ returns a model state $\theta$ that minimizes a strongly convex loss function $\mathcal{L}(z,\theta)$ . Here, the implicit function theorem tells us that + +$$ +\nabla_ {z} f (z) = \overbrace {\left(\frac {d \phi}{d \theta} \right| _ {\theta = \mathcal {A} (z)} ^ {\text {w r t . f i n a l p a r a m s}} \underbrace {\left. \left(\frac {\partial^ {2} \mathcal {L} (z , \theta)}{\partial \theta^ {2}} \right| _ {\theta = \mathcal {A} (z)}\right) ^ {- 1}} _ {p \times p \text {i n v e r s e H e s s i a n o f l o s s w r t . f i n a l p a r a m s}} ^ {1 \times p \text {g r a d i e n t o f o u t p u t w r t . f i n a l p a r a m s}} \overbrace {\left. \left(\frac {\partial^ {2} \mathcal {L} (z , \theta)}{\partial \theta \partial z} \right| _ {\theta = \mathcal {A} (z)}\right) ^ {- 1}} ^ {p \times n \text {J a c o b i a n o f l o s s g r a d i e n t w r t . m e t a p a r a m e t e r s}}. \tag {11} +$$ + +The form of (11) yields efficient and accurate estimators for metagradients of models learned by minimizing a strongly convex loss [BKB+20; BKM+22; KDJ20; BBC+22; SGB+22]. Such approaches can extend to estimate metagradients of large-scale, non-convex learning algorithms [Ben00; KL17; RFK+19; FAL17; LVD20; CH20; BNL+22], but lose any correctness guarantees. Indeed, applying this class of methods in large-scale settings is challenging as doing so requires (a) assuming conditions on the learning algorithm (e.g., Hessian invertibility, continuous differentiability) and (b) efficiently approximating the inverse Hessian (in practice, typically at the cost of estimate accuracy). Finally, implicit function-based approaches are fundamentally limited in that they can only differentiate with respect to metaparameters expressed in the loss function (e.g., these methods can differentiate with respect to the weight decay, but not learning rate). + +Automatic (explicit) differentiation. Beyond implicit differentiation approaches, there is a long line of work on directly calculating metagradients with AD (see Section 2). Previous work has used AD to estimate metagradients of learning algorithms ranging from those with convex objectives to small neural networks [HNM19; MDA15; FDF+17; MS21; ZSP+21; CXR+22; SGB+22]. As detailed in Section 2, the primary challenge with (reverse-mode) AD-based approaches to meta-differentiation is storing the intermediate products required for the backward pass. To circumvent this challenge, previous work either (a) only considers settings that are small enough that is possible to differentiate while requiring space that is linear in the number of iterations (i.e., 2 layer networks on MNIST), (b) uses forward-mode AD [FDF+17; MS21; CXR+22] (which requires no extra storage at the cost of additional compute that scales linearly with metaparameter dimension), (c) only approximates the metagradient by calculating over only a few training steps [LSY18; CH20; FAL17], or uses (d) a reversible learning algorithm [MDA15]. The fourth category is a promising direction for reducing space requirements when computing large-scale metagradients, but current approaches require (a) representing model parameters in a fixed-precision format (which current large-scale learning algorithms do not support) in addition to restricting the algorithm to be reversible (e.g., + +SGD and standard GD do not qualify). A common thread is that algorithms computing metagradient with AD often suffer from numerical instability and overflow issues [MS21; SGB+22]. In relation to previous work on AD, REPLAY (Section 2) can be seen as a strategy for choosing gradient checkpointing [CAC+81; BCT92; ZP00; GW08; CXZ+16] locations in the compute graph (an NP-complete task in general [Nau08]). + +# 6.2 Applying metagradients + +Previous work applies metagradients to optimize training setup, including distillation [MDA15; LVD20], training data selection [HNM19; EFM24], meta-learning [FAL17; RFK+19; HAM+21], learning rate/weight decay selection [MS21; CXR+22], tuning data augmentation [LVD20], and architecture search [MDA15; LSY18; ZSP+21]. Beyond optimizing metagradients, methods in data attribution apply metagradients to (Taylor) estimate the effect of dropping training data on model predictions [KL17; GBA+23; PGI+23]. To the Previous works either (a) calculate metagradients directly with AD (made feasible by working in a very small-scale learning setting) or (b) estimate the metagradient with an implicit function-based approach. + +# 7 Conclusion + +In this work we add metagradients to the large-scale machine learning toolkit. To do so, we overcome two challenges: (a) calculating metagradients at scale and (b) modifying learning algorithms to be metasmooth—i.e., to admit metagradients that locally predict model behavior. We then successfully calculate and apply metagradients for large-scale models (up to 2B parameters) to select data for CLIP pretraining and instruction fine-tuning, to (Huber) poison training data to decrease overall model accuracy, and search for high-dimensional hyperparameters (per-iteration learning rates). Given the successful applications of metagradients in these settings, we are excited to see what unlocking metagradients enables in other areas of machine learning. + +# 8 Acknowledgements + +Work supported in part by the NSF grant DMS-2134108 and Open Philanthropy, and in part by NSF Grant No. 2346519. This work is also supported in part by the Alan Turing Institute, and the U.S. Department of Energy. The authors would like to thank Alex Damian, Harshay Shah, Jesse Michel, Joel Flynn, Manolis Zampetakis, Noah Moroze, Piotr Indyk, Sam Hopkins, Sung Min (Sam) Park, and Sarah Cen for helpful references as well as discussions and feedback on early versions of this work. + +# References + +[ATS+23] Amro Abbas, Kushal Tirumala, Dániel Simig, Surya Ganguli, and Ari S Morcos. "SemDeDup: Data-efficient learning at web-scale through semantic dedduplication". In: arXiv preprint arXiv:2303.09540 (2023). +[BAC+21] Sara Beery, Arushi Agarwal, Elijah Cole, and Vighnesh Birodkar. "The iWildCam 2021 competition dataset". In: arXiv preprint arXiv:2105.03494. 2021. +[BBC+22] Mathieu Blondel, Quentin Berthet, Marco Cuturi, Roy Frostig, Stephan Hoyer, Felipe Llinares-López, Fabian Pedregosa, and Jean-Philippe Vert. "Efficient and modular implicit differentiation". In: Advances in neural information processing systems 35 (2022), pp. 5230-5242. +[BBY+22] Yonatan Bitton, Nitzan Bitton Guetta, Ron Yosef, Yuval Elovici, Mohit Bansal, Gabriel Stanovsky, and Roy Schwartz. "WinoGAViL: Gamified association benchmark to challenge vision-and-language models". In: Advances in Neural Information Processing Systems. 2022. +[BCT92] Preston Briggs, Keith D Cooper, and Linda Torczon. "Rematerialization". In: Proceedings of the ACM SIGPLAN 1992 conference on Programming language design and implementation. 1992, pp. 311-321. +[Ben00] Yoshua Bengio. "Gradient-based optimization of hyperparameters". In: Neural computation 12.8 (2000), pp. 1889-1900. +[BGM+18] Peter Bandi, Oscar Geessink, Quirine Manson, Marcory Van Dijk, Maschenka Balkenhol, Meyke Hermsen, Babak Ehteshami Bejnordi, Byungjae Lee, Kyunghyun Paeng, Aoxiao Zhong, et al. "From detection of individual metastases to classification of lymph node status at the patient level: the CAMELYON17 challenge". In: IEEE Transactions on Medical Imaging (2018). +[BGV14] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. "Food-101-mining discriminative components with random forests". In: European conference on computer vision. 2014. +[BKB+20] Quentin Bertrand, Quentin Klopfenstein, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. "Implicit differentiation of lasso-type models for hyperparameter optimization". In: International Conference on Machine Learning. PMLR. 2020, pp. 810-821. +[BKM+22] Quentin Bertrand, Quentin Klopfenstein, Mathurin Massias, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. "Implicit differentiation for fast hyperparameter selection in non-smooth convex learning". In: Journal of Machine Learning Research 23.149 (2022), pp. 1-43. +[BMA+19] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. "ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models". In: Neural Information Processing Systems (NeurIPS). 2019. +[BNL+22] Juhan Bae, Nathan Ng, Alston Lo, Marzyeh Ghassemi, and Roger Grosse. "If Influence Functions are the Answer, Then What is the Question?" In: ArXiv preprint arXiv:2209.05364. 2022. +[BSF94] Yoshua Bengio, Patrice Simard, and Paolo Frasconi. "Learning long-term dependencies with gradient descent is difficult". In: IEEE Transactions on Neural Networks. 1994. +[CAC+81] Gregory J Chaitin, Marc A Auslander, Ashok K Chandra, John Cocke, Martin E Hopkins, and Peter W Markstein. "Register allocation via coloring". In: Computer languages 6.1 (1981), pp. 47-57. +[CFW+18] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. "Functional Map of the World". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). June 2018. +[CH20] Xiangning Chen and Cho-Jui Hsieh. "Stabilizing differentiable architecture search via perturbation-based regularization". In: International conference on machine learning. PMLR. 2020, pp. 1554-1565. + +[CHL17] Gong Cheng, Junwei Han, and Xiaoqiang Lu. "Remote sensing image scene classification: Benchmark and state of the art". In: Proceedings of the IEEE. 2017. +[CHM+23] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free Dolly: Introducing the World's First Truly Open Instruction-Tuned LLM. 2023. URL: https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm (visited on 06/30/2023). +[CKL+22] Jeremy M. Cohen, Simran Kaur, Yuanzhi Li, J. Zico Kolter, and Ameet Talwalkar. Gradient Descent on Neural Networks Typically Occurs at the Edge of Stability. 2022. arXiv: 2103.00065 [cs.LG]. URL: https://arxiv.org/abs/2103.00065. +[Cla90] Frank H Clarke. Optimization and nonsmooth analysis. SIAM, 1990. +[CMK+14] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. "Describing textures in the wild". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014. +[CNL11] Adam Coates, Andrew Ng, and Honglak Lee. "An analysis of single-layer networks in unsupervised feature learning". In: Proceedings of the fourteenth international conference on artificial intelligence and statistics. 2011. +[CXR+22] Kartik Chandra, Audrey Xie, Jonathan Ragan-Kelley, and Erik Meijer. "Gradient descent: The ultimate optimizer". In: Advances in Neural Information Processing Systems 35 (2022), pp. 8214-8225. +[ CXZ+16] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin. "Training Deep Nets with Sublinear Memory Cost". In: CoRR abs/1604.06174 (2016). arXiv: 1604.06174. URL: http://arxiv.org/abs/1604.06174. +[DDS+09] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. "Imagenet: A large-scale hierarchical image database". In: Computer Vision and Pattern Recognition (CVPR). 2009. +[DFE+22] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. 2022. arXiv: 2205.14135 [cs.LG]. URL: https://arxiv.org/abs/2205.14135. +[DJP+24] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. "The llama 3 herd of models". In: arXiv preprint arXiv:2407.21783 (2024). +[ Eco24] Team EcoDatum. EcoDatum DataComp-small submission. https://www.datacomp.ai/dcclip/leaderboard.html. 2024. +[EFM24] Logan Engstrom, Axel Feldmann, and Aleksander Madry. "DsDm: Model-Aware Dataset Selection with Datamodels". In: 2024. +[EVW+10] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. "The Pascal Visual Object Classes (VOC) Challenge". In: International Journal of Computer Vision. 2010. +[FAL17] Chelsea Finn, Pieter Abbeel, and Sergey Levine. "Model-agnostic meta-learning for fast adaptation of deep networks". In: International conference on machine learning. PMLR. 2017, pp. 1126-1135. +[FDF+17] Luca Franceschi, Michele Donini, Paolo Frasconi, and Massimiliano Pontil. "Forward and reverse gradient-based hyperparameter optimization". In: International Conference on Machine Learning (ICML). 2017. +[FFP04] Li Fei-Fei, Rob Fergus, and Pietro Perona. "Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories". In: 2004 conference on computer vision and pattern recognition workshop. IEEE. 2004, pp. 178-178. +[FIW+22] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. "Data Determines Distributional Robustness in Contrastive Language Image Pre-training (CLIP)". In: ICML. 2022. + +[GBA+23] Roger Grosse, Juhan Bae, Cem Anil, Nelson Elhage, Alex Tamkin, Amirhossein Tajdini, Benoit Steiner, Dustin Li, Esin Durmus, Ethan Perez, et al. "Studying large language model generalization with influence functions". In: arXiv preprint arXiv:2308.03296 (2023). +[GDG17] Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. "Badnets: Identifying Vulnerabilities in the Machine Learning Model Supply Chain". In: arXiv preprint arXiv:1708.06733 (2017). +[GIF+24] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. "DataComp: In search of the next generation of multimodal datasets". In: Advances in Neural Information Processing Systems. 2024. +[GLU12] Andreas Geiger, Philip Lenz, and Raquel Urtasun. "Are we ready for autonomous driving? The KITTI vision benchmark suite". In: 2012 IEEE conference on computer vision and pattern recognition. 2012. +[GW08] Andreas Griewank and Andrea Walther. Evaluating derivatives: principles and techniques of algorithmic differentiation. SIAM, 2008. +[HAM+21] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey. "Meta-learning in neural networks: A survey". In: IEEE transactions on pattern analysis and machine intelligence 44.9 (2021), pp. 5149-5169. +[HBB+20] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. "Measuring massive multitask language understanding". In: arXiv preprint arXiv:2009.03300 (2020). +[HBD+19] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. "EuroSAT: A novel dataset and deep learning benchmark for land use and land cover classification". In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing. 2019. +[HBK+21] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. "Measuring Mathematical Problem Solving With the MATH Dataset". In: NeurIPS (2021). +[HBM+20] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization. 2020. arXiv: 2006.16241 [cs.CV]. +[HBM+22] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. "Training compute-optimal large language models". In: arXiv preprint arXiv:2203.15556. 2022. +[HNM19] Satoshi Hara, Atsushi Nitanda, and Takanori Maehara. "Data cleansing for models trained with SGD". In: Advances in Neural Information Processing Systems 32 (2019). +[Hub64] Peter J. Huber. "Robust estimation of a location parameter". In: The Annals of Mathematical Statistics. 1964. +[HY20] Jiaoyang Huang and Horng-Tzer Yau. "Dynamics of Deep Neural Networks and Neural Tangent Hierarchy". In: Proceedings of the 37th International Conference on Machine Learning. 2020. +[HZB+19] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. "Natural adversarial examples". In: arXiv preprint arXiv:1907.07174 (2019). +[JHV+17] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. "CLEVR: A diagnostic dataset for compositional language and elementary visual reasoning". In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2017. +[Kor24] Keller Jordan. "94 percent on CIFAR-10 in 3.29 Seconds on a Single GPU". In: (2024). +[JS08] Yaochu Jin and Bernhard Sendhoff. "Pareto-based multiobjective machine learning: An overview and case studies". In: IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews) 38.3 (2008), pp. 397-415. + +[KB15] Diederik P. Kingma and Jimmy Ba. "Adam: A Method for Stochastic Optimization". In: International Conference on Learning Representations (ICLR). 2015. +[KDJ20] MJ Zico Kolter, David Duvenaud, and Matt Johnson. "Deep implicit layers-neural odes, deep equilibrium models, and beyond, 2020". In: NeurIPS Tutorial (2020). +[KKR+24] Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi Rui Tam, Keith Stevens, Abdullah Barhoum, Duc Nguyen, Oliver Stanley, Richard Nagyfi, et al. "Open-assistant conversations-democratizing large language model alignment". In: Advances in Neural Information Processing Systems 36 (2024). +[Pang Wei Koh and Percy Liang. "Understanding Black-box Predictions via Influence Functions". In: International Conference on Machine Learning. 2017.] +[Kri09] Alex Krizhevsky. "Learning Multiple Layers of Features from Tiny Images". In: Technical report. 2009. +[KSD+13] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. "3d object representations for fine-grained categorization". In: Proceedings of the IEEE international conference on computer vision workshops. 2013. +[KSM+20] Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsubramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Sara Beery, et al. "WILDS: A Benchmark of in-the-Wild Distribution Shifts". In: arXiv preprint arXiv:2012.07421 (2020). +[LA19] Zhiyuan Li and Sanjeev Arora. An Exponential Learning Rate Schedule for Deep Learning. 2019. +[LeC98] Yann LeCun. "The MNIST database of handwritten digits". In: Technical report. 1998. +[LFX+24] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. "Deepseek-v3 technical report". In: arXiv preprint arXiv:2412.19437. 2024. +[LHV+23] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. "The flan collection: Designing data and methods for effective instruction tuning". In: International Conference on Machine Learning. PMLR. 2023, pp. 22631-22648. +[LIE+22] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/. 2022. +[LKY22] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. "Indiscriminate Data Poisoning Attacks on Neural Networks". In: arXiv preprint arXiv:2204.09092 (2022). +[LY23] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. "Exploring the limits of model-targeted indiscriminate data poisoning attacks". In: International Conference on Machine Learning. PMLR. 2023, pp. 22856-22879. +[LM20] Guillaume Leclerc and Aleksander Madry. "The two regimes of deep network training". In: arXiv preprint arXiv:2002.10376. 2020. +[LMB+14] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C Lawrence Zitnick. "Microsoft coco: Common objects in context". In: European conference on computer vision (ECCV). 2014. +[LSY18] Hanxiao Liu, Karen Simonyan, and Yiming Yang. "Darts: Differentiable architecture search". In: arXiv preprint arXiv:1806.09055 (2018). +[LVD20] Jonathan Lorraine, Paul Vicol, and David Duvenaud. "Optimizing millions of hyperparameters by implicit differentiation". In: International conference on artificial intelligence and statistics. PMLR. 2020, pp. 1540-1552. +[MDA15] Dougal Maclaurin, David Duvenaud, and Ryan Adams. "Gradient-based hyperparameter optimization through reversible learning". In: International conference on machine learning (ICML). 2015. + +[MRK+13] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. "Fine-grained visual classification of aircraft". In: arXiv preprint arXiv:1306.5151 (2013). +[MS21] Paul Micaelli and Amos J Storkey. "Gradient-based hyperparameter optimization over long horizons". In: Advances in Neural Information Processing Systems 34 (2021), pp. 10798-10809. +[Nau08] Uwe Naumann. "Optimal Jacobian accumulation is NP-complete". In: Math. Program. 112.2 (Apr. 2008), pp. 427-441. ISSN: 0025-5610. +[NWC+11] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Baolin Wu, Andrew Y Ng, et al. "Reading digits in natural images with unsupervised feature learning". In: NIPS workshop on deep learning and unsupervised feature learning. 2011. +[NZ08] Maria-Elena Nilsback and Andrew Zisserman. "Automated flower classification over a large number of classes". In: 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing. 2008. +[OR00] James M Ortega and Werner C Rheinboldt. Iterative solution of nonlinear equations in several variables. SIAM, 2000. +[Pag18] David Page. CIFAR-10 Fast. GitHub Repository. Oct. 2018. URL: https://github.com/davidcpage/cifar10-fast. +[Pea96] Barak A Pearlmutter. "An investigation of the gradient descent process in neural networks". In: PhD thesis, Carnegie Mellon University. 1996. +[PGI+23] Sung Min Park, Kristian Georgiev, Andrew Ilyas, Guillaume Leclerc, and Aleksander Madry. "TRAK: Attributing Model Behavior at Scale". In: *Arxiv preprint arXiv:2303.14186*. 2023. +[PVZ+12] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. "Cats and dogs". In: 2012 IEEE conference on computer vision and pattern recognition. IEEE. 2012, pp. 3498-3505. +[RDK+22] William A Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. "The dollar street dataset: Images representing the geographic and socioeconomic diversity of the world". In: Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2022. +[RFK+19] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and Sergey Levine. "Meta-learning with implicit gradients". In: Advances in neural information processing systems 32 (2019). +[RKH+21] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. "Learning transferable visual models from natural language supervision". In: arXiv preprint arXiv:2103.00020. 2021. +[RLZ+24] Vikram V Ramaswamy, Sing Yu Lin, Dora Zhao, Aaron Adcock, Laurens van der Maaten, Deepti Ghadiyaram, and Olga Russakovsky. "GeoDE: a geographically diverse evaluation dataset for object recognition". In: Advances in Neural Information Processing Systems. 2024. +[RRS+19] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. "Do ImageNet Classifiers Generalize to ImageNet?" In: International Conference on Machine Learning (ICML). 2019. +[SGB+22] Damien Scieur, Gauthier Gidel, Quentin Bertrand, and Fabian Pedregosa. "The curse of un-rolling: Rate of differentiating through optimization". In: Advances in Neural Information Processing Systems 35 (2022), pp. 17133–17145. +[SN17] Leslie N. Smith Smith and Topin Nicholay. "Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates". In: ArXiv preprint arXiv:1708.07120. 2017. +[SRR+22] Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. “Beyond the imitation game: Quantifying and extrapolating the capabilities of language models”. In: arXiv preprint arXiv:2206.04615 (2022). +[SSS+11] Johannes Stallkamp, Marc Schlipsing, Jan Salmen, and Christian Igel. "The German traffic sign recognition benchmark: a multi-class classification competition". In: The 2011 international joint conference on neural networks. 2011. + +[SSS+22] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. "Challenging big-bench tasks and whether chain-of-thought can solve them". In: arXiv preprint arXiv:2210.09261 (2022). +[TGZ+23] Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford Alpaca: An Instruction-following LLaMA model. https://github.com/tatsu-lab/stanford_alpaca.2023. +[TMH+24] Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Riviere, Mihir Sanjay Kale, Juliette Love, et al. "Gemma: Open models based on gemini research and technology". In: arXiv preprint arXiv:2403.08295 (2024). +[TSF+16] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. "YFCC100M: The New Data in Multimedia Research". In: Communications of the ACM (2016). +[VLW+18] Bastiaan S Veeling, Jasper Linmans, Jim Winkens, Taco Cohen, and Max Welling. "Rotation equivariant CNNs for digital pathology". In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2018: 21st International Conference, Granada, Spain, September 16-20, 2018, Proceedings, Part II 11. 2018. +[Web24] Team Webdataset. webdataset. 2024. URL: https://www.github.com/webdataset/webdataset. +[Wer90] Paul J Werbos. "Backpropagation through time: what it does and how to do it". In: Proceedings of the IEEE 78.10 (1990), pp. 1550-1560. +[WGX+19] Haohan Wang, Songwei Ge, Eric P Xing, and Zachary C Lipton. "Learning robust global representations by penalizing local predictive power". In: Neural Information Processing Systems (NeurIPS) (2019). +[WWS+22] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. "Chain-of-thought prompting elicits reasoning in large language models". In: Advances in neural information processing systems 35 (2022), pp. 24824-24837. +[XHE+10] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. "Sun database: Large-scale scene recognition from abbey to zoo". In: Computer Vision and Pattern Recognition (CVPR). 2010. +[XMG+24] Mengzhou Xia, Sadhika Malladi, Suchin Gururangan, Sanjeev Arora, and Danqi Chen. "Less: Selecting influential data for targeted instruction tuning". In: arXiv preprint arXiv:2402.04333 (2024). +[YLH+14] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions". In: Transactions of the Association for Computational Linguistics. 2014. +[ZP00] Geoffrey Zweig and Mukund Padmanabhan. "Exact alpha-beta computation in logarithmic space with application to MAP word graph construction". In: Sixth International Conference on Spoken Language Processing, ICSLP 2000 / INTERSPEECH 2000, Beijing, China, October 16-20, 2000. ISCA, 2000, pp. 855-858. DOI: 10.21437/ICSLP.2000-404. URL: https://doi.org/10.21437/ICSLP.2000-404. +[ZPK+19] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. "A large-scale study of representation learning with the visual task adaptation benchmark". In: arXiv preprint arXiv:1910.04867. 2019. +[ZSP+21] Miao Zhang, Steven W Su, Shirui Pan, Xiaojun Chang, Ehsan M Abbasnejad, and Reza Haffari. "idarts: Differentiable architecture search with stochastic implicit gradients". In: International Conference on Machine Learning. PMLR. 2021, pp. 12557-12566. + +# A Calculating metagradients with REPLAY + +This appendix contains supplementary material for Section 2. We describe two algorithms in detail: stepwise AD, and our own algorithm REPLAY. Refer to Section 2 for the notation used in this appendix. + +# A.1 Warmup: Step-wise AD + +We fully describe step-wise AD in Algorithm 2. The algorithm requires storing all $T$ optimizer states, but requires constant memory overhead for each AD call (as each AD call is over a single step), making it feasible to compute for small setups. + +Algorithm 2: metagradients in $\mathcal{O}(T)$ space. +```txt +1 // Store each optimizer state on disk +2 $\{s_i\}_{i=0}^T \leftarrow$ Train model via $A(z)$ +3 +4 // Variables; shorthand for $\frac{\partial f(z)}{\partial z}$ and $\frac{\partial f(z)}{\partial s_T}$ +5 $\bar{z} \gets 0$ +6 $\bar{s}_T \leftarrow \frac{\partial g(s_T)}{\partial s_T} \quad //$ One reverse-mode AD call +7 +8 // Reverse-mode differentiate step-by-step +9 for $s_i \gets s_{T-1}$ to $s_0$ do +10 // One reverse-mode AD call. Left: $\nabla_{s_i}f$ . Right: contribution to $\nabla_{z}f$ at $i$ . +11 $\bar{s}_i \gets \bar{s}_{i+1} \cdot \frac{\partial h_i(s_i, z)}{\partial s_i}, \quad \bar{z}_i \gets \bar{s}_{i+1} \cdot \frac{\partial h_i(s_i, z)}{\partial z}$ +12 +13 $\bar{z} \gets \bar{z} + \bar{z}_i \quad //$ Accumulate metagradient +14 +15 Return $\bar{z}$ +``` + +# A.2 REPLAY + +We now describe REPLAY, our method for calculating metagradients. For a free parameter $k \in \mathbb{N}$ , REPLAY requires storing $\mathcal{O}(k\log_k(T))$ optimizer states and an additional $\mathcal{O}(\log_k(T))$ factor of computation. The free parameter $k$ controls the trade-off between storage and required compute. We fully describe REPLAY in Algorithm 3. REPLAY modifies Algorithm 2 by retrieving the optimizer states in reverse order using a $k$ -ary tree structure in lieu of a list of all the stored states. + +# A.2.1 Lazy $k$ -ary tree + +We now describe the $k$ -ary tree structure that underlies REPLAY; for a visual reference of this tree with $k = 2$ , see Figure 3. For ease of analysis we parameterize the total number of states as $n = T + 1$ (and therefore take $n - 1$ total training steps) when describing this data structure, and assume WLOG that $n$ is an integer power of $k$ . At a high level, traversing this tree recursively replays retraining to recover all the optimizer states in reverse order, while deleting states that are no longer needed. We call this tree "lazy" because it retransmits only when required to obtain states that are not yet retrieved. + +The tree is a complete $k$ -ary tree with $n$ leaves (and therefore $\log_k(n)$ depth) structured as follows. We start at the root, then recursively define the rest of the tree. Every node in the tree represents a single optimizer state. The root represents state $s_0$ . To recursively define the remaining nodes: each non-leaf node $s_i$ at depth $d$ has $k$ equally spaced (in terms of state number) children starting—from left to right—at state $s_i$ and ending at $s_{i+n/k^{d+1}}$ . This means that the leaves correspond—from left to right—to the states $s_0, s_1, \ldots, s_{n-1}$ . + +We reduce the problem of iterating over the states in reverse to the problem of reverse in-order traversing this tree and yielding just the leaves—this is exactly the states in reverse order. A reverse in-order traversal for this $k$ -ary tree requires repeatedly: recursively traversing child nodes from largest to smallest, then visiting the parent node. We design the specifics of this traversal to maximize space and compute efficiency. To access the children of a parent node at traversal time, we replay model training from the smallest child state (which is stored in the parent state) to the largest child state and store all the children. We perform this operation recursively each time we traverse a node. After traversing the node's left side (i.e., after ascending from this node), we delete all its child states. + +Reverse in-order traversing this tree requires storing at most $k \log_k(n)$ optimizer states at a time, and in aggregate requires retraining the model $\log_k(n)$ times. The argument for each is straightforward. Storage: the traversal requires storing at most $k$ states for each level that it descends (we store $k$ states whenever we first traverse to a parent node) and we remove $k$ states for each level that the traversal ascends (we remove $k$ states after we are done with the left traversal of a parent). Compute: we replay training to reinstantiate the children of every parent node a single time. The $k^d$ parent nodes at level $d$ each require replaying $\mathcal{O}(n / k^d)$ states to reinstantiate children. Therefore, in a traversal, each level requires $\mathcal{O}(n) (k^d \cdot n / k^d)$ optimizer steps. There are $\log_k(n)$ levels with parent nodes, which means a total of $\mathcal{O}(n \log_k(n))$ optimizer steps, or a multiplicative factor of $\mathcal{O}(\log_k(n))$ steps compared to model training. + +Algorithm 3: REPLAY. metagradients in $\mathcal{O}(k\log_k(T))$ space. +```txt +1 $T\gets$ Lazy $k$ -ary tree for $\mathcal{A}(z)$ // Make lazy $k$ -ary tree of Appendix A.2 +2 +3 // Variables; shorthand for $\frac{\partial f(z)}{\partial z}$ and $\frac{\partial f(z)}{\partial s_T}$ +4 $\bar{z}\gets 0$ +5 $\bar{s}_T\gets \frac{\partial g(s_T)}{\partial s_T}$ // One reverse-mode AD call +6 +7 // Reverse-mode differentiate step-by-step; traverse $T$ instead of stored states +8 for $s_i\gets s_{T - 1}$ to $s_0\in$ reverse_inorder_traversal(T) do +9 // One reverse-mode AD call. Left: $\nabla_{s_i}f$ . Right: contribution to $\nabla_{z}f$ at i. +10 $\bar{s}_i\gets \bar{s}_{i + 1}\cdot \frac{\partial h_i(s_i,z)}{\partial s_i},\quad \bar{z}_i\gets \bar{s}_{i + 1}\cdot \frac{\partial h_i(s_i,z)}{\partial z}$ +11 +12 $\bar{z}\gets \bar{z} +\bar{z}_i$ // Accumulate metagradient +13 +14 Return $\bar{z}$ +``` + +# B Smooth Model Training + +# B.1 Omitted Figures + +![](images/af3a8f87efacd4acaa3c56185ccfd5ffdb24bf0c973fc76594255668269e6e61.jpg) +Figure 11: The factors affecting metasmoothness of training a ResNet-9 on the CIFAR-10 dataset. See §3 for details. + +![](images/ed91df62b477bd91c24737b738d49397bea204557c82454c3f8740aacb9e13e6.jpg) +Non-smooth (Example #1118) + +![](images/b1658df434e665ee2b41168eec4c27970ddad44acffd12d60b48a59d7a89737a.jpg) +Smooth (Example #1118) + +![](images/49bd3d8ee76b74e6943123a4c519082b91d14235276199ab531f34a03a34cc78.jpg) +Non-smooth (Example #3349) + +![](images/719d47ec6f96ce91103c9fde603e154cba7baa02ad50f8e48186c6c41b3a97ff.jpg) +Smooth (Example #3349) + +![](images/72ebdc565733f1e246ef1530655df2fd0a0556ed6198f739627b7b58781f7021.jpg) +Non-smooth (Example #10600) + +![](images/7fbbbef73dee36330195f6d0fc527ad0a7c1c618fee58e5669d44bef1d5bb793.jpg) +Smooth (Example #10600) + +![](images/a5641668a6941e25c895bd575ef0e74375dbeaa1da5fcb13884916b4ecfa5360.jpg) +Non-smooth (Example #15578) +Figure 12: Additional loss landscape visualizations. + +![](images/87b96baf0a2e4bfad4e1c9d8f8d653181ea5e5f71efa5c492a82e9d1324d2dd6.jpg) +Smooth (Example #15578) + +# C Metagradients for DataComp + +This appendix contains pseudocode for the main algorithm used to do dataset selection for DataComp. It also contains additional implementation details on how metagradients were applied to CLIP, and how they were specifically applied to the DataComp setting. + +# C.1 Dataset Selection Using MGD + +When implementing Algorithm 1, there are several differences from the pseudocode below: firstly, rather than selecting $\mathbf{m}$ fully randomly every step, we randomly select a shard comprising fraction $p$ of the data and take steps on all datapoints in the shard (see Section C.2). To mitigate overfitting, we also bake a "minibatch fraction" $q$ into our model output function $\phi$ . For example, if $\phi$ calculates model loss on the ImageNet train set, each time $\phi$ is called, we randomly sample fraction $q$ of the ImageNet train set to evaluate on. + +Adapting the CLIP loss function to our surrogate learning algorithm. Here, we explain how dataweights are incorporated into the CLIP loss function—the formulation given in Section 4.1 is actually slightly simplified and incorrect, as it does not account for cross terms in the CLIP contrastive loss. As a refresher, we first state the "vanilla" CLIP loss function, $\ell$ , as it is defined in [RKH+21]. Letting $b$ be the batch size and $d$ be the embedding dimension, and $\mathbf{x}$ be the training batch at timestep $k$ . Recall that the CLIP model internally has two "submodules": and image embedder, and a text embedder. We then use these to obtain image embeddings $E_{I} \in \mathbb{R}^{b \times d}$ and text embeddings $E_{T} \in \mathbb{R}^{b \times d}$ from $\mathbf{x}$ . We then compute the image-wise scores, or logits, for this batch as $S = E_{I}E_{T}^{\top}^{3}$ . Then, we can define the CLIP loss (as a function of the logits) as + +$$ +L (S) = \frac {1}{2} (L _ {I} (S) + L _ {T} (S)), +$$ + +where $L_{I}$ and $L_{T}$ are row-wise and column-wise cross-entropy losses, respectively: + +$$ +L _ {I} (S) = \sum_ {i = 1} ^ {b} \log \left(\frac {\exp (S _ {i , i})}{\sum_ {j = 1} ^ {b} \exp (S _ {i , j})}\right), \quad L _ {T} (S) = \sum_ {i = 1} ^ {b} \log \left(\frac {\exp (S _ {i , i})}{\sum_ {j = 1} ^ {b} \exp (S _ {j , i})}\right). +$$ + +We now wish to relax $L$ into a new function $L'$ that supports an additional input $\mathbf{z} \in \mathbb{R}^n$ , where $\frac{\partial L'}{\partial \mathbf{z}}$ resembles the metagradients with respect to dataweights. In order to do this, we imagine expanding passing the entire dataset $D$ into our embedder to obtain $E_I'$ and $E_{T'}'$ and take our new logits $S' = E_I'E_T'^{\top} \in \mathbb{R}^{n \times n}$ . + +There are some additional key conditions our relaxation $L'$ should satisfy. Particularly: when $\mathbf{z} = \mathbf{0}_n$ , we should recover the normal CLIP loss $L$ , and when $\mathbf{z}$ is all 0's except for a single entry $i$ , $L'$ should act as if $i$ had been appended to the original batch $\mathbf{x}$ . In addition, $L'$ should always have meaningful partials with respect to $\mathbf{z}$ , even when some values in $\mathbf{z}$ are 0. + +Letting $\mathbf{1}_{i = j}$ and $\mathbf{1}_{i\neq j}$ be indicator variables and letting $\mathbf{1}_k\in \{0,1\} ^n$ be the indicator vector for the $k$ -th batch, we find that the definition + +$$ +L ^ {\prime} \left(S ^ {\prime}, \mathbf {z}\right) = L _ {I} ^ {\prime} \left(S ^ {\prime}, \mathbf {z}\right) + L _ {T} ^ {\prime} \left(S ^ {\prime}, \mathbf {z}\right), +$$ + +where + +$$ +L _ {I} ^ {\prime} \left(S ^ {\prime}, \mathbf {z}\right) = \sum_ {i = 1} ^ {n} \left(z _ {i} + \left(\mathbf {1} _ {k}\right) _ {i}\right) \log \left(\frac {\exp \left(S _ {i , i} ^ {\prime}\right)}{\sum_ {j = 1} ^ {n} \exp \left(S _ {i , j} ^ {\prime}\right) \left(\mathbf {1} _ {i = j} + \mathbf {1} _ {i \neq j} \left(z _ {j} + \left(\mathbf {1} _ {k}\right) _ {j}\right)\right)}\right) +$$ + +and + +$$ +L _ {T} ^ {\prime} (S ^ {\prime}, \mathbf {z}) = \sum_ {i = 1} ^ {b} \left(z _ {i} + \left(\mathbf {1} _ {k}\right) _ {i}\right) \log \left(\frac {\exp \left(S _ {i , i} ^ {\prime}\right)}{\sum_ {j = 1} ^ {n} \exp \left(S _ {j , i} ^ {\prime}\right) \left(\mathbf {1} _ {i = j} + \mathbf {1} _ {i \neq j} \left(z _ {j} + \left(\mathbf {1} _ {k}\right) _ {j}\right)\right)}\right) +$$ + +satisfy these conditions. + +Finally, we let define the loss for the entire batch $\ell'$ as a function of $\mathbf{z}$ and model parameters $\theta$ which outputs the loss calculated according to $L'$ above. To summarize, letting $\mathbf{x}^{(t)}$ denote the $t$ -th training batch, the loss function $\ell_t$ at step $t$ of our surrogate learning algorithm $\mathcal{A}'$ for CLIP training is: + +$$ +\ell_ {t} ^ {\prime} (\theta) := \left\{ \begin{array}{l l} \ell (\mathbf {x} ^ {(t)}; \theta) & \text {i f} t \neq k \\ \ell^ {\prime} (\mathbf {z}; \theta) & \text {i f} t = k. \end{array} \right. +$$ + +We find that this empirically works well for obtaining meaningful metagradients with respect to dataweights in the CLIP setting, and yields to strong dataset selection results. + +# C.2 Scaling MGD for CLIP and DataComp + +MGD is highly scalable, allowing it to be applied to large-scale settings like training CLIP models. In particular, computing metagratings is only up to a constant factor more expensive than training a model normally. Here, we outline challenges we faced in scaling MGD in this setting, and how they were resolved. Specifically, we will explain how we efficiently calculated metagratings for CLIP models and efficiently tracked/shuffled our dataset selection from step-to-step despite its large storage footprint. + +Computing metagradient. Due to the large batch size used in the CLIP contrastive loss, we implement manual gradient checkpointing to make the operations computationally feasible on our hardware. The most memory-intensive operation are model forward passes (and its gradients): obtaining the image and label embeddings given raw pixel data and tokens. So, we manually make gradient checkpoints before this operation, allowing us to run the embedder in minibatches to avoid memory issues. This setup also naturally lends itself to parallelization across multiple GPU's, which we make use of to further speed up our computations. + +Loading, writing, and storing data. Due to the data-intensive nature of training large models like CLIP and our need to frequently produce new datasets at each optimization step, we found that using the web-dataset [Web24] format given by DataComp was restrictively slow. To circumvent this, we rewrote all data following the format of FFCV [LIE+22], allowing us to load and write data much faster. Specifically, we divided the entire candidate pool into 8 base shards. Once we trained a model, we choose one of the 8 shards, compute metagradient corresponding to all datapoints in the shard, take a gradient step on them, and rewrite the shard. This roughly corresponds to $p = \frac{1}{8}$ in Algorithm 1, which we empirically worked well for optimizing. In following steps, we always choose one of the 8 original shards to calculate metagradient for—this ensures that points removed from the dataset in some optimization step can return if they have a negative metagradient. + +We also observed that always stepping on the sign causes the sizes of the shards to grow over time: stepping based on the sign of the metagradient does not decrease the weight on a positive-weight datapoint if its dataweight is already 0, so our steps are biased towards increasing the size of the shards. To combat this blowup, after some number of optimization steps, we choose a fixed shard size and enforce that subsequent steps must not change the size of the shards—the step size thereafter is controlled by hyperparameter $q$ representing the fraction of datapoints in a shard which are incremented. We experimented both with randomly sampling which points are added or removed, and stepping on the datapoints with the top $q$ and bottom $q$ metagradient; the latter seems to give empirically better performance. + +To maintain randomness during shuffling, we implement an 8-way dataloader which would shuffle all 8 shards individually. Then, to sample a batch of $b$ datapoints, we would sample $b / 8$ datapoints from each shard and concatenate them to fill our batch. This works better than simply sampling our entire batch from a single shard, as (especially in later optimization steps) shards may contain a high number of duplicate datapoints, which causes CLIP's contrastive loss function to misbehave if they appear in the same batch. + +To minimize disk space used, old shards can be deleted once they become "stale". Specifically, if shard s is rewritten into shard $s'$ , all future optimization steps will never read s again, and s can safely be deleted. Thus, when running MGD for a large number of steps and potentially rewriting each shard multiple times, the total disk space used by our algorithm is constant in the number of steps we take: it stores the 8 most recently written shards on disk at any given time, and any other shards are deleted to save space. + +# C.3 Details Pertaining to the DataComp Benchmark + +Setting. We provide a brief summary of the DataComp competition here, and we refer readers to the original paper [GIF+24]. DataComp is a framework to compare different training dataset selection techniques. Participants submit a training dataset (which, for our purposes, is a subset of a larger dataset), upon which a CLIP model is trained from scratch with a fixed learning algorithm, model architecture, and number of training steps. We focus on DataComp-small, which has a candidate pool of 12.8 million samples. The number of training steps in this case is also fixed at 12.8 million samples. + +We try to match the optimization hyperparameters enforced by DataComp as closely as possible. As a refresher, our ADAM[KB15] update step can be written as + +$$ +\theta_ {t + 1} = - \alpha_ {t} \cdot \left(m _ {t} / \left(\sqrt {v _ {t} + \varepsilon_ {\mathrm {r o o t}}} + \varepsilon\right) + \lambda \theta_ {t}\right) \tag {12} +$$ + +where $m_{t}$ and $v_{t}$ are running estimates of the first and second moments of the gradients, respectively, $\lambda$ represents weight decay, $\alpha$ represents the learning rate, and $\varepsilon$ and $\varepsilon_{\mathrm{root}}$ are hyperparameters to avoid blowup. Our training hyperparameters can be found in Table 1 and are identical to those mandated by DataComp-small, aside from a positive $\varepsilon_{\mathrm{root}}$ added for numerical stability. The values of $\varepsilon_{\mathrm{root}}$ and $k$ (the step at which metagradients are calculated) were chosen to empirically maximize metasmoothness. + +Table 1: Hyperparameters for the CLIP DataComp experiments. + +
HyperparameterValue
DataComp Scalesmall
ModelViT-B/32
Train compute (MACs)9.5 × 1016
Pool size12.8M
# samples seen12.8M
Batch size4096
Training batches3125
k2800
Learning rate5 × 10-4
AdamW β10.9
AdamW β20.98
AdamW εroot1 × 10-17
Warmup500
+ +Our experiments are also run on an incomplete subset of the entire DataComp candidate pool. DataComp did not store the raw image and text files when assembling their dataset; they only stored a list of URL's to download data from. Due to the nature of the internet, for various reasons, some of these URL's no longer point to the same data (or no longer point to any data at all). Thus, after ignoring these broken links, our candidate pool is only around $80\%$ of the size of the original DataComp candidate pool when it was collected in 2023. All our results are obtained by running our methods on this subset of the DataComp pool. + +Evaluation tasks. In order to ensure that our method is truly improving trained models' performances on the entire target distribution and not overfitting to the target set, for each of the 38 evaluation tasks used by DataComp, we attempted to separately create a disjoint target and validation set (DataComp only creates test sets for each task). Thus, metagradients were computed on the target sets and model performance was evaluated on the validation set, before submitting with the official DataComp script and evaluating on the test sets. This ensures that our method's generalization ability is being evaluated, and we are not overfitting to our target set. + +For various reasons, creating target splits was not possible for all 38 tasks; we summarize our setup in Table 2. + +Table 2: All DataComp evaluation tasks. The "Target set" column refers to whether metagradients were taken on the target set corresponding to this dataset. + +
DatasetTaskTest sizeTrain sizeVal sizeMain metricTarget set
Caltech-101 [FFP04]Object recognition60852754306mean per class
CIFAR-10 [Kri09]Visual recognition10000450005000accuracy
CIFAR-100 [Kri09]Visual recognition10000450005000accuracy
CLEVR Counts [JHV+17; ZPK+19]Counting15000650005000accuracy
CLEVR Distance [JHV+17; ZPK+19]Distance prediction15000650005000accuracy
Country211 [RKH+21; TSF+16]Geolocation21100379804220accuracy
DTD [CMK+14]Texture classification18803384376accuracy
EuroSAT [HBD+19; ZPK+19]Satellite imagery recognition5400194402160accuracy
FGVC Aircraft [MRK+13]Aircraft recognition33336001666mean per class
Food-101 [BGV14]Food recognition25250707505000accuracy
GTSRB [SSS+11]Traffic sign recognition12630352893920accuracy
ImageNet 1k [DDS+09]Visual recognition5000012761675000accuracy
ImageNet Sketch [WGX+19]Visual recognition50889N/AN/Aaccuracy*
ImageNet V2 [RKS+19]Visual recognition10000N/AN/Aaccuracy*
ImageNet-A [HZB+19]Visual recognition7500N/AN/Aaccuracy*
ImageNet-O [HZB+19]Visual recognition2000N/AN/Aaccuracy*
ImageNet-R [HBM+20]Visual recognition30000N/AN/Aaccuracy*
KITTI distance [GLU12; ZPK+19]Distance prediction711N/AN/Aaccuracy
MNIST [LeC98]Digit recognition10000550005000accuracy
ObjectNet [BMA+19]Visual recognition18574N/AN/Aaccuracy*
Oxford Flowers-102 [NZ08]Flower recognition61491836204mean per class
Oxford-IIIT Pet [PVZ+12; ZPK+19]Pet classification36693312368mean per class
Pascal VOC 2007 [EVW+10]Object recognition14976140961566accuracy
PatchCamelyon [VLW+18; ZPK+19]Metastatic tissue cls.327682899125000accuracy
Rendered SST2 [ZPK+19]Sentiment classification18217013779accuracy
RESISC45 [CHL17; ZPK+19]Satellite imagery recognition6300226802520accuracy
Stanford Cars [KSD+13]Vehicle recognition80417329814accuracy
STL-10 [CNL11]Visual recognition80004500500accuracy
SUN-397 [XHE+10]Scene recognition108753N/AN/Aaccuracy
SVHN [NWC+11; ZPK+19]Digit recognition26032682575000accuracy
iWildCam [BAC+21; KSM+20]Animal recognition427911470845000macro F1 score
Camelyon17 [BGM+18; KSM+20]Metastatic tissue cls.850543659005000accuracy
FMoW [CFW+18; KSM+20]Satellite imagery recognition221081032615000worst-region acc.
Dollar Street [RDK+22]Object recognition3503138421537worst-income top-5 acc.
GeoDE [RLZ+24]Object recognition12438444884943worst-region acc.
Flickr30k [YLH+14]Image and text retrieval31014N/AN/AR@1§
MSCOCO [LMB+14]Image and text retrieval5000N/AN/AR@1§
WinoGAViL [BBY+22]Commonsense association3563N/AN/AJaccard score§
+ +# D Selecting IFT data + +In this section, we describe the details of the IFT setting of Xia et al. [XMG+24], as well as the details of our method. + +Setting. The setting contains a fixed data pool: instruction fine-tuning data from a data pool consisting of four combined IFT datasets (cf. Table 4 and Xia et al. [XMG+24] for more information). The goal is to select the data that yields the best possible task performance for a LoRA fine-tuning run. We adapt a LoRA to a Gemma-2B model (the pretraining-only Gemma-2B model) using the LoRA configuration from Xia et al. [XMG+24]. + +Data splits. See Table 3 for a description of the available data for each task, along with the task setup details. Xia et al. [XMG+24] constructed these extra samples by drawing from the ICL samples given in the tasks originally. Note that we drop TydiQA from the original work of Xia et al. [XMG+24] as there are not enough samples to select with (there is only one from each category, for a total of 7). + +Method. We execute Algorithm 1 with $k$ as 150 steps from the end of training and the Bernoulli parameter $p$ controlling the step size as 0.2. At each step, we choose a "minibatch" with a size equal to half the target set and a quarter of the target set for BBH and MMLU, respectively (that is, we only select to optimize performance on a fraction of the target set at a time). We model select over iterates and hyperparameters by (a) choosing the top three steps in terms of validation loss for each run (b) selecting the best one in terms of full train set accuracy (including the part that we trained on). We perform this procedure—akin to Pareto optimization [JS08]—because the validation set is so small (as the overall set of samples is very small) that it is difficult to select models without overfitting otherwise. + +We compare with two baselines: training on the full dataset (i.e., training on the entirety of all the data for a single epoch), and LESS (we use the data selected according to "LESS-T" [XMG+24], following the recommendation of 4 epochs). + +For model training, we train with ADAM ( $\beta_{1} = 0.95$ , $\beta_{2} = 0.975$ , decoupled weight decay as $10^{-5}$ ) and a one-cycle linear schedule starting at $10^{-6}$ of the maximum learning rate, reaching the peak over $25\%$ of training, then ending at 0.1 of the maximum learning rate. We insert a positive $\varepsilon_{\mathrm{root}}$ into the inverse square root term in the ADAM update to prevent metagradient (and to a lesser extent update) blowup (see Eq. 12). The model training is the same across selected data, except that we use $\varepsilon_{\mathrm{root}} = 10^{-7}$ for MGD-selected data and $\varepsilon_{\mathrm{root}} = 10^{-9}$ for the other runs (we select the optimal parameter for each class of method). We additionally hyperparameter select for the best learning rate across each baseline by minimizing validation set loss; LESS performs best with a smaller learning rate (0.00024 for BBH and 0.00012 for MMLU) than training on the full dataset or with MGD (0.0006 for both). We normalize the loss of each training sample by taking the mean across predicted tokens during training, and do not divide by the batch size (important for scaling the $\varepsilon_{\mathrm{root}}$ term, but otherwise ADAM is invariant to the scale). + +Selecting smooth model training for MGD. For MGD runs, we jointly select learning rate and $\varepsilon_{\mathrm{root}}$ using the smoothness metric of Section 3. We find that the choice of $\varepsilon_{\mathrm{root}}$ term is important (just as the choice of $\varepsilon$ is important in standard ADAM training); choosing a much larger term results in non-smooth training. We also find that metagradients are sensitive to learning rate schedule; choosing a much larger or smaller maximum learning rate results in non-smooth training. + +Table 3: Overview of datasets used in IFT dataset selection (from Xia et al. [XMG+24]). + +
Dataset# Shot# Tasksn_targetn_valn_testAnswer TypeType of Task
MMLU5575722818,721Letter optionsKnowledge/Recall
BBH3232346920COT and answerReasoning
+ +Table 4: Details of IFT training datasets. + +
Dataset# InstanceSourced fromPrompt Len.Completion Len.
FLAN V2100,000NLP datasets and human-written instructions355.731.2
CoT100,000NLP datasets and human-written CoTs26653.2
Dolly15,011Human-written from scratch118.191.3
Open Assistant 155,668Human-written from scratch34.8212.5
+ +# IFT results + +![](images/ab6a03663afe6a2c4a405104df1bd0065c08dd9c6f6cb7f26c3a6a34d3c3783e.jpg) + +![](images/36df9944129f621ff0e1ecdf764695eb6b36f0e8d3eaba57ccb6745917a7a037.jpg) +Figure 13: MGD dataset selection improves the validation loss over metagradient steps, demonstrating our method's efficacy. However, the gap between loss on samples MGD directly optimizes on and the validation samples widens over the number of iterates, and there is overfitting depending on the number of steps taken. + +# E Accuracy-degrading data poisoning + +# E.1 Background on Gradient Cancelling attack + +We briefly review the Gradient Cancelling attack [LKY23] used as a baseline in our experiments. We refer the reader to the original paper for details. Here we highlight the key ideas. + +At a high level: Gradient Cancelling (GC) explicitly aims at making a specific malicious parameter configuration reachable through retraining on the poisoned dataset. The attack operates in two phases: + +1. Parameter Generation: The attacker generates a target malicious model parameter independently, often using a direct parameter corruption method like Gradient-based Parameter Corruption (GradPC) [LKY23]. The end result of this phase is a target model parameter $\theta_{p}$ that achieves low accuracy on the test set, but is close to the original parameter $\theta_0$ derived from training on the clean dataset. +2. Poison Data Crafting: In the second phase, GC finds values of the poison data that induce a near-zero gradient at the target parameter $\theta_{p}$ . This is achieved by solving a gradient cancellation optimization problem: specifically, GC minimizes the total gradient of the loss function (with respect to the model parameters) evaluated over the combined (clean and poisoned) dataset, aiming to ensure that the gradient at the malicious parameter $\theta_{p}$ approaches zero. + +# E.2 Metasmooth hyperparameters + +Table 5: Hyperparameters used in the ResNet-9 [Jor24] CIFAR-10 poisoning experiments. The augmentations used are normalization, random horizontal flip, and random translate (2 pixels) + +
HyperparameterValue
Learning rate0.5
β10.85
Weight decay10-5
Exclude BatchNormTrue
OptimizerSGD
Batch size250
Epochs18
Starting learning rate fraction0.5
Relative min. learning rate10000
Scheduler max. iterations50000
Nesterov momentumTrue
BatchNorm ε10-5
BatchNorm momentum0.5
Final biasTrue
Width multiplier2.0
Final scale0.125
Initial scale2.0
Batchnorm locationBefore activation
Activation functionGELU
Pooling typeAverage
Test-time augmentationTrue
+ +# F LR optimization + +Table 6: The grid search was run over all 528 combinations of the hyperparameter values below. + +
ParameterValues
Peak learning rate[7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0]
Initial LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
Final LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
LR peak time[0.25, 0.5, 0.75]
+ +![](images/fec6b010bfcae93afb53b0c60c137838b889e707f0941f8e9d65d3df201207b2.jpg) +MGD step +Figure 14: Graphs of our learned LR schedules. \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13751/images/07f6be6f9521f926d80bf16a7c01a17b4fdf20a667bdd384b75d05b3321c9ece.jpg b/data/2025/2503_13xxx/2503.13751/images/07f6be6f9521f926d80bf16a7c01a17b4fdf20a667bdd384b75d05b3321c9ece.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aafcec73ef6289c4f28cc7ac5de81e2983fde47b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/07f6be6f9521f926d80bf16a7c01a17b4fdf20a667bdd384b75d05b3321c9ece.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb2e02f069335c7cc0aa6c925f123a193065c4438e1f8522fc698d74ab94574c +size 11389 diff --git a/data/2025/2503_13xxx/2503.13751/images/0de69f09eb0ea62160d63ffd2008ef1c27d809b2adc1eb7b7e9d86299c20f1de.jpg b/data/2025/2503_13xxx/2503.13751/images/0de69f09eb0ea62160d63ffd2008ef1c27d809b2adc1eb7b7e9d86299c20f1de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2da512b431ce3131c5f0db47fdef39f7a1ea6f21 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/0de69f09eb0ea62160d63ffd2008ef1c27d809b2adc1eb7b7e9d86299c20f1de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5349a337f2bcf234dd432aa5305fa1432ec631e6a4d3f4a47137241c6eac62a +size 6043 diff --git a/data/2025/2503_13xxx/2503.13751/images/0ee01eb4fa01b3734087808afd61cf8ee91b915d35fec2c214b7e2bfd450099d.jpg b/data/2025/2503_13xxx/2503.13751/images/0ee01eb4fa01b3734087808afd61cf8ee91b915d35fec2c214b7e2bfd450099d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..967f8abfa37868fe0a909d61894a51cddc660e3f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/0ee01eb4fa01b3734087808afd61cf8ee91b915d35fec2c214b7e2bfd450099d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7bc7724b75fc62a3c14b49167799e52b28506528caa49ea6cc8835218f09dd0 +size 32753 diff --git a/data/2025/2503_13xxx/2503.13751/images/168b7e1a3985608aa32a7de01e5cada2b24b954a4e045cde1ad9fe397a71c91e.jpg b/data/2025/2503_13xxx/2503.13751/images/168b7e1a3985608aa32a7de01e5cada2b24b954a4e045cde1ad9fe397a71c91e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..264c3db0edefa2b7738c3a0803564565f0676726 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/168b7e1a3985608aa32a7de01e5cada2b24b954a4e045cde1ad9fe397a71c91e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95e7e91eed3da79b7b0c5440f247dae5a5c3d0eb4335d0005f67b9def2f16d0f +size 12927 diff --git a/data/2025/2503_13xxx/2503.13751/images/1a484312f1ed10309ea34d6652f551a0d6047e126a3f781ebb74cc68734bab99.jpg b/data/2025/2503_13xxx/2503.13751/images/1a484312f1ed10309ea34d6652f551a0d6047e126a3f781ebb74cc68734bab99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78d36b56336b140d3ddf3976b34700291857cef4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/1a484312f1ed10309ea34d6652f551a0d6047e126a3f781ebb74cc68734bab99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b85cb8ef098d6a4bfa255ec3dc79cf1a1e1ff7f4f09e3de155f6ca9cd4f2a2d +size 21312 diff --git a/data/2025/2503_13xxx/2503.13751/images/1d53986854c1c4e6619d16a37b54e29fc0b88bdf3520d589c05111d30ec1c831.jpg b/data/2025/2503_13xxx/2503.13751/images/1d53986854c1c4e6619d16a37b54e29fc0b88bdf3520d589c05111d30ec1c831.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd950f8b59441d91f7bef2d1c3ce548fa39c1e88 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/1d53986854c1c4e6619d16a37b54e29fc0b88bdf3520d589c05111d30ec1c831.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baf0277e2419ed971a556c39c87bfe6f9adbc1433a034a66430906ae246bb8fe +size 40453 diff --git a/data/2025/2503_13xxx/2503.13751/images/1dc990e467236d18f25ade5d515382e45fc4338efb7565274a7fd059602d5385.jpg b/data/2025/2503_13xxx/2503.13751/images/1dc990e467236d18f25ade5d515382e45fc4338efb7565274a7fd059602d5385.jpg new file mode 100644 index 0000000000000000000000000000000000000000..521b10940b1b535d0d54b30fff421f7722842c76 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/1dc990e467236d18f25ade5d515382e45fc4338efb7565274a7fd059602d5385.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c70fdfc736024ebdcbeafe6ef46cbd35ef9d1eeebebf36b9ee3c6a0cf1c830f0 +size 10156 diff --git a/data/2025/2503_13xxx/2503.13751/images/25d0520397f487be3b99371e5ad5a60cf7c24eb09b3e0e6bcccb837ff7007b8c.jpg b/data/2025/2503_13xxx/2503.13751/images/25d0520397f487be3b99371e5ad5a60cf7c24eb09b3e0e6bcccb837ff7007b8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e404353215d231ed4c69d4c74d809b8a0e49b220 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/25d0520397f487be3b99371e5ad5a60cf7c24eb09b3e0e6bcccb837ff7007b8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:580310f46da771eaacd4c1c6256dedfb4141310b0f6b9f2bace7263e3b65a3f8 +size 16290 diff --git a/data/2025/2503_13xxx/2503.13751/images/272461c815d26446577a8316099aea75fcb7fa56b9420f851e8478f829ed1718.jpg b/data/2025/2503_13xxx/2503.13751/images/272461c815d26446577a8316099aea75fcb7fa56b9420f851e8478f829ed1718.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92cb79ea0a2c888a66d392458793d4c74bb41837 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/272461c815d26446577a8316099aea75fcb7fa56b9420f851e8478f829ed1718.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e1676a0c9168d5a5d05b2958fcb9141716929dcbb95dbcb463336f322572b8e +size 7880 diff --git a/data/2025/2503_13xxx/2503.13751/images/2d84dbd682390cd2231744dd8853e9648b3b71fba726778d07a6907663c26341.jpg b/data/2025/2503_13xxx/2503.13751/images/2d84dbd682390cd2231744dd8853e9648b3b71fba726778d07a6907663c26341.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cc3e0eb7b2584bc13f32e4a33f6321069695045 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/2d84dbd682390cd2231744dd8853e9648b3b71fba726778d07a6907663c26341.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca697141875ae93d48c97ccb4866bb202dd5bbf69e0a39947205e6d350de7cf2 +size 4107 diff --git a/data/2025/2503_13xxx/2503.13751/images/36df9944129f621ff0e1ecdf764695eb6b36f0e8d3eaba57ccb6745917a7a037.jpg b/data/2025/2503_13xxx/2503.13751/images/36df9944129f621ff0e1ecdf764695eb6b36f0e8d3eaba57ccb6745917a7a037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba90d65dbc3d6c2a910ff4d6fad4974e3f046866 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/36df9944129f621ff0e1ecdf764695eb6b36f0e8d3eaba57ccb6745917a7a037.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe4a85900822e7db85522ff0c3de2b341cb4ba6982e85b9cc875b1e43d471ac +size 25091 diff --git a/data/2025/2503_13xxx/2503.13751/images/42f5fec1da7593b8a9cc14368f981db0362526c501634b3dc3bde2e396ff808c.jpg b/data/2025/2503_13xxx/2503.13751/images/42f5fec1da7593b8a9cc14368f981db0362526c501634b3dc3bde2e396ff808c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9962cdfb360e379d212d459ccc753801c1e9a41 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/42f5fec1da7593b8a9cc14368f981db0362526c501634b3dc3bde2e396ff808c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbd10d863640e4253250d71fae499a0488348ccacf06d8686adf0493d223fc67 +size 6061 diff --git a/data/2025/2503_13xxx/2503.13751/images/456f1b12f52ed44c1047241fa414546f77f22a232c79c5193cb9f8a81eb78364.jpg b/data/2025/2503_13xxx/2503.13751/images/456f1b12f52ed44c1047241fa414546f77f22a232c79c5193cb9f8a81eb78364.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ea3f6f18e102112911c20141f9a468838332fb7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/456f1b12f52ed44c1047241fa414546f77f22a232c79c5193cb9f8a81eb78364.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7caa1be4781a78335b36e2cc1e86b04994913f68462326464f6900a0428b48a7 +size 9958 diff --git a/data/2025/2503_13xxx/2503.13751/images/47d23c287ed5f746ff659469e2ecd37d7dffa6ecd055b8a1e70ea3974fc87d26.jpg b/data/2025/2503_13xxx/2503.13751/images/47d23c287ed5f746ff659469e2ecd37d7dffa6ecd055b8a1e70ea3974fc87d26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e60d848f9fd11a2a386e9078eb917bcbd5661f81 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/47d23c287ed5f746ff659469e2ecd37d7dffa6ecd055b8a1e70ea3974fc87d26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cc12e5eb464c62f7e13046b49d2ce96701a5fda99b2a54849fcf8dbb0a8e1df +size 6488 diff --git a/data/2025/2503_13xxx/2503.13751/images/49bd3d8ee76b74e6943123a4c519082b91d14235276199ab531f34a03a34cc78.jpg b/data/2025/2503_13xxx/2503.13751/images/49bd3d8ee76b74e6943123a4c519082b91d14235276199ab531f34a03a34cc78.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a0ada67418480cac213ab002f5886c4cbd542ad --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/49bd3d8ee76b74e6943123a4c519082b91d14235276199ab531f34a03a34cc78.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f70fcb6db9292792cbb322d7dcb677392b7082e437641a936a51d5713ed686e9 +size 28771 diff --git a/data/2025/2503_13xxx/2503.13751/images/49f0724ed3d310be3c3308e27e733b61346b7751b4796215b2241423c2690a76.jpg b/data/2025/2503_13xxx/2503.13751/images/49f0724ed3d310be3c3308e27e733b61346b7751b4796215b2241423c2690a76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3a2cffe793a67623e1fa6185578f62442a072e4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/49f0724ed3d310be3c3308e27e733b61346b7751b4796215b2241423c2690a76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8884d0a3af7837c5848e62c176ea2515f891ccd7ce305a66df41f5a9df7a2df +size 13763 diff --git a/data/2025/2503_13xxx/2503.13751/images/4f160909052d60c475611d729d8627a48f2ec03ab554e430d02e6ecf55122fc3.jpg b/data/2025/2503_13xxx/2503.13751/images/4f160909052d60c475611d729d8627a48f2ec03ab554e430d02e6ecf55122fc3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..809f177320576d12683eab1d485a7888290825fa --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/4f160909052d60c475611d729d8627a48f2ec03ab554e430d02e6ecf55122fc3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0eae88c253519e3b7a6ec9dcff22cc43095ca88bf915f9f7d7ff02c9134bed6 +size 19173 diff --git a/data/2025/2503_13xxx/2503.13751/images/51721ea96c8faaf6536328d7c77afa7a0d6214fa87ac1be8d44a523335321046.jpg b/data/2025/2503_13xxx/2503.13751/images/51721ea96c8faaf6536328d7c77afa7a0d6214fa87ac1be8d44a523335321046.jpg new file mode 100644 index 0000000000000000000000000000000000000000..078cc2b4bb1d24756427d0de686614572bb73ff5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/51721ea96c8faaf6536328d7c77afa7a0d6214fa87ac1be8d44a523335321046.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66d73a81dfa62caa19fd1f648aa672db06a0e6c25fec71c176c20b76a0fe1aab +size 10910 diff --git a/data/2025/2503_13xxx/2503.13751/images/526431798295effe5a381666eea095e355d33e12f099452c4e2cca80fa48e073.jpg b/data/2025/2503_13xxx/2503.13751/images/526431798295effe5a381666eea095e355d33e12f099452c4e2cca80fa48e073.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37c415205ed6511e996013abfc919ef6afa81bf2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/526431798295effe5a381666eea095e355d33e12f099452c4e2cca80fa48e073.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cded938fb350313de2e0818bf9af771d9395fa2aa8962120399bea7e8b264ab6 +size 38445 diff --git a/data/2025/2503_13xxx/2503.13751/images/59597a6db89b529053f6ff247a1734fb9cd12de6133d89b59beca3f9f784e818.jpg b/data/2025/2503_13xxx/2503.13751/images/59597a6db89b529053f6ff247a1734fb9cd12de6133d89b59beca3f9f784e818.jpg new file mode 100644 index 0000000000000000000000000000000000000000..918319d0b9b7a6f17ae97af689d2dcb0820556ec --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/59597a6db89b529053f6ff247a1734fb9cd12de6133d89b59beca3f9f784e818.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee91fe0910f74510a6f162323f31d161c5ce9c920d134a3825f723b7dbb95794 +size 10392 diff --git a/data/2025/2503_13xxx/2503.13751/images/599688cbc30a73e86945ceef1aaa6104947a8bf4fb89679b2b4e04e12dbef15b.jpg b/data/2025/2503_13xxx/2503.13751/images/599688cbc30a73e86945ceef1aaa6104947a8bf4fb89679b2b4e04e12dbef15b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5acfecf8d2106644ea7f51e35dbba3ddef3d9f86 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/599688cbc30a73e86945ceef1aaa6104947a8bf4fb89679b2b4e04e12dbef15b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcf08e1e8ed6befe8eaa0c8dae80a1edf100257992e2a575e37fa0e74aa57bbb +size 30667 diff --git a/data/2025/2503_13xxx/2503.13751/images/5f56febcc07b1551f91d149adb7bbceab2088e31bcd8e08cd237fa6bf1e3b1d6.jpg b/data/2025/2503_13xxx/2503.13751/images/5f56febcc07b1551f91d149adb7bbceab2088e31bcd8e08cd237fa6bf1e3b1d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40d604d4008cded7367eafeb64107f6f553db27b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/5f56febcc07b1551f91d149adb7bbceab2088e31bcd8e08cd237fa6bf1e3b1d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3314796c0047add6ba4e0f1615f7cca94de421b3aaac0afa877cd1f5aa46fa3c +size 29000 diff --git a/data/2025/2503_13xxx/2503.13751/images/6148db51da6bfd8bdedea2bdec794c882a9e35cd0cacac66509df7f4a72d5cdb.jpg b/data/2025/2503_13xxx/2503.13751/images/6148db51da6bfd8bdedea2bdec794c882a9e35cd0cacac66509df7f4a72d5cdb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..792cfa3b11bb08b3475631df59459edd8c31fc64 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/6148db51da6bfd8bdedea2bdec794c882a9e35cd0cacac66509df7f4a72d5cdb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:835a7bcc9f013b0469987831e2ec44884ba3b0b31864e28f7bb1d80706e35af6 +size 5624 diff --git a/data/2025/2503_13xxx/2503.13751/images/6446a270cb605f4ff6886a2c76dbd72b6bd1dfa21e1692651ea1fd016399cc45.jpg b/data/2025/2503_13xxx/2503.13751/images/6446a270cb605f4ff6886a2c76dbd72b6bd1dfa21e1692651ea1fd016399cc45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12704e29116d2fc206b5721033f65f1be7ef4e21 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/6446a270cb605f4ff6886a2c76dbd72b6bd1dfa21e1692651ea1fd016399cc45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e04abb4b2a9dc8adb3e5c056029e66d3df6498a355b8d75433de7554431b9b0 +size 10824 diff --git a/data/2025/2503_13xxx/2503.13751/images/650d90f816cfb3285c323b9ac068f712a628be317829f072d7c2a7dc29c4c8a2.jpg b/data/2025/2503_13xxx/2503.13751/images/650d90f816cfb3285c323b9ac068f712a628be317829f072d7c2a7dc29c4c8a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbbaad1a02240f48dcbda4fab6c9c5f7be4f02d6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/650d90f816cfb3285c323b9ac068f712a628be317829f072d7c2a7dc29c4c8a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67a008c3ead15d5442a298addd8f18a1f72b436751a39f46316ec09899bcd7a9 +size 6397 diff --git a/data/2025/2503_13xxx/2503.13751/images/65a1e9d072408c3c3d25eda4daa1b1e739d7581616f6c83ad9fe1690f7769bee.jpg b/data/2025/2503_13xxx/2503.13751/images/65a1e9d072408c3c3d25eda4daa1b1e739d7581616f6c83ad9fe1690f7769bee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d462bdfb5641d973de2178616690eb7aaf6ca28 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/65a1e9d072408c3c3d25eda4daa1b1e739d7581616f6c83ad9fe1690f7769bee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf98a1aab816d1d0d9788c0fc5ef394aeb4ba881468774921ba9aff18d63b8b6 +size 4644 diff --git a/data/2025/2503_13xxx/2503.13751/images/65efa3e7039ec2bf0c17df31db0fea551abb1e8ebec13cd70da4376df689dec2.jpg b/data/2025/2503_13xxx/2503.13751/images/65efa3e7039ec2bf0c17df31db0fea551abb1e8ebec13cd70da4376df689dec2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d67ebe4a093991905f5640fa41855afe5d5af2f8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/65efa3e7039ec2bf0c17df31db0fea551abb1e8ebec13cd70da4376df689dec2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c6df41ae880050032449f5e166a5a0b918f1e84fe307f5d4c0db33c96f58d5f +size 34195 diff --git a/data/2025/2503_13xxx/2503.13751/images/694c0324b89bfbec08aca913604248313960367318fa0dc1e2995eee02ca775c.jpg b/data/2025/2503_13xxx/2503.13751/images/694c0324b89bfbec08aca913604248313960367318fa0dc1e2995eee02ca775c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1741e0c80a788bef10e8a0877a29f1edbc810e72 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/694c0324b89bfbec08aca913604248313960367318fa0dc1e2995eee02ca775c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:913ff0465a8f1a62864f4a776be4fa72559ded7dbf2e0da549da0485155e02a4 +size 4656 diff --git a/data/2025/2503_13xxx/2503.13751/images/6a46b8d6a21defcc70331aed694730c6feb8745fa2314f3686b50930a1b8c3ab.jpg b/data/2025/2503_13xxx/2503.13751/images/6a46b8d6a21defcc70331aed694730c6feb8745fa2314f3686b50930a1b8c3ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0627ab832e3e0c2e6b7ea41ade8a6ab543dfb08 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/6a46b8d6a21defcc70331aed694730c6feb8745fa2314f3686b50930a1b8c3ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2177d33fdeb31ba2c0a78946a32c306bb19fd7773685b84df135e474178a259 +size 5791 diff --git a/data/2025/2503_13xxx/2503.13751/images/6c10546608a77c03434a358377fe9f99d67f5f521f84b43c195a9db66ce3fe3a.jpg b/data/2025/2503_13xxx/2503.13751/images/6c10546608a77c03434a358377fe9f99d67f5f521f84b43c195a9db66ce3fe3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5748b7290928e340e5b34b20304287298defb041 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/6c10546608a77c03434a358377fe9f99d67f5f521f84b43c195a9db66ce3fe3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2540c0ee9106c089eff321dbf49a5f6982822b46322ea5fa0f84367f0bd1fc14 +size 13621 diff --git a/data/2025/2503_13xxx/2503.13751/images/6c1c5eac1041ae1fb22568a46b928a626853285ad2b25a2389f8d63a9ec083a3.jpg b/data/2025/2503_13xxx/2503.13751/images/6c1c5eac1041ae1fb22568a46b928a626853285ad2b25a2389f8d63a9ec083a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23840b1c2bf4f7a7195c6b55115c59bf90174931 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/6c1c5eac1041ae1fb22568a46b928a626853285ad2b25a2389f8d63a9ec083a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dbc92e2461b174cb90740ccf1e5ea2c9ee29fb129813cd48b9cefa95e0f02fe +size 6398 diff --git a/data/2025/2503_13xxx/2503.13751/images/7192b26eb08c7aecbd682eeca7dfd8fe1edf185435ebe5ea3482c61644ac07e2.jpg b/data/2025/2503_13xxx/2503.13751/images/7192b26eb08c7aecbd682eeca7dfd8fe1edf185435ebe5ea3482c61644ac07e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..90b286bace542f7f52ee03108850a3e8ca0ac3a2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/7192b26eb08c7aecbd682eeca7dfd8fe1edf185435ebe5ea3482c61644ac07e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:247579aa3abe55b593b5e0439051c795f2d5f81899cf44691785356e5d252e5d +size 4568 diff --git a/data/2025/2503_13xxx/2503.13751/images/719d47ec6f96ce91103c9fde603e154cba7baa02ad50f8e48186c6c41b3a97ff.jpg b/data/2025/2503_13xxx/2503.13751/images/719d47ec6f96ce91103c9fde603e154cba7baa02ad50f8e48186c6c41b3a97ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7079ee5f949fa892c9158dfc233f61e561cb8868 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/719d47ec6f96ce91103c9fde603e154cba7baa02ad50f8e48186c6c41b3a97ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb47e98f14f80f7618fbcf7d614602ecc6683ac1b659639c77f0ed7caa672a99 +size 26247 diff --git a/data/2025/2503_13xxx/2503.13751/images/72ebdc565733f1e246ef1530655df2fd0a0556ed6198f739627b7b58781f7021.jpg b/data/2025/2503_13xxx/2503.13751/images/72ebdc565733f1e246ef1530655df2fd0a0556ed6198f739627b7b58781f7021.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4bb899ead3caacf4e33022dc0fc8f9dca394f2e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/72ebdc565733f1e246ef1530655df2fd0a0556ed6198f739627b7b58781f7021.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8469f80696d00b3de8c08c4b5aa72cf7c66b3420a685d4dfee66fcc9f385e9ea +size 28717 diff --git a/data/2025/2503_13xxx/2503.13751/images/7fbbbef73dee36330195f6d0fc527ad0a7c1c618fee58e5669d44bef1d5bb793.jpg b/data/2025/2503_13xxx/2503.13751/images/7fbbbef73dee36330195f6d0fc527ad0a7c1c618fee58e5669d44bef1d5bb793.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb5073fbd9e7063d1563b615d371ab1a4477763b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/7fbbbef73dee36330195f6d0fc527ad0a7c1c618fee58e5669d44bef1d5bb793.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52a42174024b6b993774eea52407d54b98324a46fbd71f5c33c650b944380a94 +size 24105 diff --git a/data/2025/2503_13xxx/2503.13751/images/87b96baf0a2e4bfad4e1c9d8f8d653181ea5e5f71efa5c492a82e9d1324d2dd6.jpg b/data/2025/2503_13xxx/2503.13751/images/87b96baf0a2e4bfad4e1c9d8f8d653181ea5e5f71efa5c492a82e9d1324d2dd6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..457b78f100c7174dac7735202baac18f5253e8d8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/87b96baf0a2e4bfad4e1c9d8f8d653181ea5e5f71efa5c492a82e9d1324d2dd6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6545246ca1a1d1db70ef5c0dd8c8adaa1d68637daac93f6e095d54f5e6ef3427 +size 24984 diff --git a/data/2025/2503_13xxx/2503.13751/images/8903e4b2221d100768bfe268b5a001dd442b4e5c27be2741e58072ab69f0a150.jpg b/data/2025/2503_13xxx/2503.13751/images/8903e4b2221d100768bfe268b5a001dd442b4e5c27be2741e58072ab69f0a150.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a330194c0209be1459a928ab3343d0e5cb25edbb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/8903e4b2221d100768bfe268b5a001dd442b4e5c27be2741e58072ab69f0a150.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bc97696208b114ce1c5d6c24901bf0968625a2528ae73d31575e068bfa3797c +size 26693 diff --git a/data/2025/2503_13xxx/2503.13751/images/8ef162573b94d345eed4da85496a669bb5b67c2e40af2ad7caf94d5e5efc2d81.jpg b/data/2025/2503_13xxx/2503.13751/images/8ef162573b94d345eed4da85496a669bb5b67c2e40af2ad7caf94d5e5efc2d81.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3d4139bdeb18d6a52764c8acf8617ec99f2f811 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/8ef162573b94d345eed4da85496a669bb5b67c2e40af2ad7caf94d5e5efc2d81.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5eb5dbab928c4c78ebfc719476b6cb80c029482a1223fe93730b64bea08f0f8 +size 30223 diff --git a/data/2025/2503_13xxx/2503.13751/images/8f3d63cc5291ccdaf6cff091223516c32e3aa9bda6f62ef719868f5a4defa364.jpg b/data/2025/2503_13xxx/2503.13751/images/8f3d63cc5291ccdaf6cff091223516c32e3aa9bda6f62ef719868f5a4defa364.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df3f2ccec9576a5f5ff783a28d2b363f230807c8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/8f3d63cc5291ccdaf6cff091223516c32e3aa9bda6f62ef719868f5a4defa364.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccd3aa36325f5369da73e80f1d8e147f805c208dd4788e933fc20e13e58fe326 +size 6484 diff --git a/data/2025/2503_13xxx/2503.13751/images/93a1935980d353a47f17bc2136d959a36b3a20b7b844279a9d48017c1ff7548b.jpg b/data/2025/2503_13xxx/2503.13751/images/93a1935980d353a47f17bc2136d959a36b3a20b7b844279a9d48017c1ff7548b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ca2a64c58187ecfe5f19181f529712699e1cb99 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/93a1935980d353a47f17bc2136d959a36b3a20b7b844279a9d48017c1ff7548b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24e9ed4c2c97998c7251a6cf9ec526e841cde91b38d2fca50d3108e49f41027c +size 25680 diff --git a/data/2025/2503_13xxx/2503.13751/images/95cf8c6f67f7a1ff5f5908ae786adfca0bdc4e43fa7de5fa95c29ea9182013b3.jpg b/data/2025/2503_13xxx/2503.13751/images/95cf8c6f67f7a1ff5f5908ae786adfca0bdc4e43fa7de5fa95c29ea9182013b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b4371967040bdf8c8145fa6a4eea7978d318ff5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/95cf8c6f67f7a1ff5f5908ae786adfca0bdc4e43fa7de5fa95c29ea9182013b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9619bcf8264a81b41a8a323b5f75a45e13b54074e45c0a0d98c82c76f9659ced +size 33929 diff --git a/data/2025/2503_13xxx/2503.13751/images/983c55704fec1c3570e69e9f79f79845d462ac252b5f882ff32f4fe1718609cd.jpg b/data/2025/2503_13xxx/2503.13751/images/983c55704fec1c3570e69e9f79f79845d462ac252b5f882ff32f4fe1718609cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49df801f017cc268b33d6b4c04005a0c246045f8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/983c55704fec1c3570e69e9f79f79845d462ac252b5f882ff32f4fe1718609cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33d808e657568029c09142fac1de03cd55dfd6750d14b49102510fbb9baed7ac +size 13132 diff --git a/data/2025/2503_13xxx/2503.13751/images/98d486f097013fcbcfaad06f1ed5faf6e5842378185fdbcaf8203beac42a2a71.jpg b/data/2025/2503_13xxx/2503.13751/images/98d486f097013fcbcfaad06f1ed5faf6e5842378185fdbcaf8203beac42a2a71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5daf0d6c767c2ed5ae3f226b717966e0e05f3c45 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/98d486f097013fcbcfaad06f1ed5faf6e5842378185fdbcaf8203beac42a2a71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d86d188f4e647678e2cba20b092a3dab60ad75080efa246ebb507712f46906b +size 6253 diff --git a/data/2025/2503_13xxx/2503.13751/images/9a290638571b9b78e345f3a543e518242910b82563f2b50d1dbb4658ebbf5b4c.jpg b/data/2025/2503_13xxx/2503.13751/images/9a290638571b9b78e345f3a543e518242910b82563f2b50d1dbb4658ebbf5b4c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1237b59d32e538dfe995e1e3f29baa5ce72c06e1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/9a290638571b9b78e345f3a543e518242910b82563f2b50d1dbb4658ebbf5b4c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8c9fd5fe5866ec297f56c47a2b5beee8a64d52be3fd539ba0f2e8b43aab0d43 +size 14811 diff --git a/data/2025/2503_13xxx/2503.13751/images/a19a4da2f67b18c44fb26d6e2a9cceb669d036ffc05618b89b9c68b99f1485ad.jpg b/data/2025/2503_13xxx/2503.13751/images/a19a4da2f67b18c44fb26d6e2a9cceb669d036ffc05618b89b9c68b99f1485ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a7b2ba4a6ad0003ff9cbb10967a2cb8c2d61dd4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/a19a4da2f67b18c44fb26d6e2a9cceb669d036ffc05618b89b9c68b99f1485ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f958b80a410bcc152a112430465490deb9517b45854b86039152325d9c288655 +size 26621 diff --git a/data/2025/2503_13xxx/2503.13751/images/a44549b712996dcbf7a6e5d33eb30b9b8a67f96ff55c88e1ef64e805c6a5238e.jpg b/data/2025/2503_13xxx/2503.13751/images/a44549b712996dcbf7a6e5d33eb30b9b8a67f96ff55c88e1ef64e805c6a5238e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad3e19f45589ccefc756521d5bdfd3fefd09a3b0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/a44549b712996dcbf7a6e5d33eb30b9b8a67f96ff55c88e1ef64e805c6a5238e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d0516494972a5a8bb004459b28987f6d2f5df5dad840086c23e7c0f4d3e9ea9 +size 4136 diff --git a/data/2025/2503_13xxx/2503.13751/images/a5641668a6941e25c895bd575ef0e74375dbeaa1da5fcb13884916b4ecfa5360.jpg b/data/2025/2503_13xxx/2503.13751/images/a5641668a6941e25c895bd575ef0e74375dbeaa1da5fcb13884916b4ecfa5360.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68ffed603b257a6d2d683a34546b84ae97c8fa04 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/a5641668a6941e25c895bd575ef0e74375dbeaa1da5fcb13884916b4ecfa5360.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca289d9c05203cf41ab577a4c72da486198b8ab81c57ff14dfd87bfc7f3bc525 +size 30076 diff --git a/data/2025/2503_13xxx/2503.13751/images/ab6a03663afe6a2c4a405104df1bd0065c08dd9c6f6cb7f26c3a6a34d3c3783e.jpg b/data/2025/2503_13xxx/2503.13751/images/ab6a03663afe6a2c4a405104df1bd0065c08dd9c6f6cb7f26c3a6a34d3c3783e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb9050295d471a1e62c4a33f5c2b0f33e93e788c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/ab6a03663afe6a2c4a405104df1bd0065c08dd9c6f6cb7f26c3a6a34d3c3783e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f16b19d8a6cddc2f81e811bc8f59100aff5439e7bde11fd8a669abce092a9e0c +size 28183 diff --git a/data/2025/2503_13xxx/2503.13751/images/ae72b147ec83b8978675affb3f8e2678e0176541836b311165c6969778223f46.jpg b/data/2025/2503_13xxx/2503.13751/images/ae72b147ec83b8978675affb3f8e2678e0176541836b311165c6969778223f46.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a52bf4e3a31f196bd7ae89bc6fd4be00414eb25 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/ae72b147ec83b8978675affb3f8e2678e0176541836b311165c6969778223f46.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b3eeaced154ca8cf51a5d5ecaf79e11df10355f57ee465c46f20d981563655e +size 16889 diff --git a/data/2025/2503_13xxx/2503.13751/images/af3a8f87efacd4acaa3c56185ccfd5ffdb24bf0c973fc76594255668269e6e61.jpg b/data/2025/2503_13xxx/2503.13751/images/af3a8f87efacd4acaa3c56185ccfd5ffdb24bf0c973fc76594255668269e6e61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..740d6f28ea1c723e452af189efd7ec38616c7b18 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/af3a8f87efacd4acaa3c56185ccfd5ffdb24bf0c973fc76594255668269e6e61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d83e72137499a486e65e0a3092652c666b4980964683e173ae5ac6476904af5 +size 45374 diff --git a/data/2025/2503_13xxx/2503.13751/images/b096a068d0c0ca04838893d62a64c9f0a5b5e122b9e5dcd36bf25ef95f656ef2.jpg b/data/2025/2503_13xxx/2503.13751/images/b096a068d0c0ca04838893d62a64c9f0a5b5e122b9e5dcd36bf25ef95f656ef2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b460499f750b179d8eff5a20d44143cb4e49c7b3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/b096a068d0c0ca04838893d62a64c9f0a5b5e122b9e5dcd36bf25ef95f656ef2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4872508efbec1c6cfa541b1a61aaeb7a098e72daa9c368ad7d48834ec85747d4 +size 29842 diff --git a/data/2025/2503_13xxx/2503.13751/images/b1658df434e665ee2b41168eec4c27970ddad44acffd12d60b48a59d7a89737a.jpg b/data/2025/2503_13xxx/2503.13751/images/b1658df434e665ee2b41168eec4c27970ddad44acffd12d60b48a59d7a89737a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a318bb8762243f768d194e1703782914d2c1665 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/b1658df434e665ee2b41168eec4c27970ddad44acffd12d60b48a59d7a89737a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f3a3b17a9c5f6cf5dd722084832156c38485d22e47f74b5f262698ae6c95b22 +size 26716 diff --git a/data/2025/2503_13xxx/2503.13751/images/b4f07dfbec991e71985782d6905bc8288383a71ad6a7e6b4aaf74a949165576e.jpg b/data/2025/2503_13xxx/2503.13751/images/b4f07dfbec991e71985782d6905bc8288383a71ad6a7e6b4aaf74a949165576e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f530fe51de547e11d35691ef5ea0e245ce62fc5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/b4f07dfbec991e71985782d6905bc8288383a71ad6a7e6b4aaf74a949165576e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb72ebe05792081c77593cec2978d61c339c4c21422cc472aeaf6509b4a153a5 +size 24340 diff --git a/data/2025/2503_13xxx/2503.13751/images/b60c76d8a268a432841f42e53cfdbdc3bf20196ce9a9c75b40fd38fb39467449.jpg b/data/2025/2503_13xxx/2503.13751/images/b60c76d8a268a432841f42e53cfdbdc3bf20196ce9a9c75b40fd38fb39467449.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00321cc6aaf302f8228260da0866fc2598fdca5f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/b60c76d8a268a432841f42e53cfdbdc3bf20196ce9a9c75b40fd38fb39467449.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c17199549739cc5744a88a98326b06feb94f583052b8d488b0ad8f06b6e76154 +size 71742 diff --git a/data/2025/2503_13xxx/2503.13751/images/b822311a6fc4b5b0c4d7fcafd79dd0e3e2f03c4bdeeb8e5787a92fec3738137b.jpg b/data/2025/2503_13xxx/2503.13751/images/b822311a6fc4b5b0c4d7fcafd79dd0e3e2f03c4bdeeb8e5787a92fec3738137b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2614933ef32744bfbe6331eee74d8a3429700b65 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/b822311a6fc4b5b0c4d7fcafd79dd0e3e2f03c4bdeeb8e5787a92fec3738137b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7160d6d1fc63c318e67d173616c762adceb0f6cb4893a9d544fb1818a032732 +size 29388 diff --git a/data/2025/2503_13xxx/2503.13751/images/b9cd9d037ae04f90c0fa195e0fdf750f69e4a1769765dee90905597cb4f0828f.jpg b/data/2025/2503_13xxx/2503.13751/images/b9cd9d037ae04f90c0fa195e0fdf750f69e4a1769765dee90905597cb4f0828f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4257a015cfa368d6c6e477cc2f5e0441d9fb305a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/b9cd9d037ae04f90c0fa195e0fdf750f69e4a1769765dee90905597cb4f0828f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5019fca08fc0a5fa51b017ad4b094ec88b5ed7b30c6f26634105ec09eb61e12e +size 3534 diff --git a/data/2025/2503_13xxx/2503.13751/images/bbd757943fe4c1860ea2472611269a98d62befd2854367d7ed52cbad920bad35.jpg b/data/2025/2503_13xxx/2503.13751/images/bbd757943fe4c1860ea2472611269a98d62befd2854367d7ed52cbad920bad35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75b2bbaa89273ec8a6aa6dd24540940dca738d2e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/bbd757943fe4c1860ea2472611269a98d62befd2854367d7ed52cbad920bad35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebfbf4f06e25538aef7440de8225067b3c9c92b48231404593540fc2a3cca176 +size 247195 diff --git a/data/2025/2503_13xxx/2503.13751/images/bee2bdf66cc8b88534900202c77d8a154c55594c42b87de51729a3c9fc1551b5.jpg b/data/2025/2503_13xxx/2503.13751/images/bee2bdf66cc8b88534900202c77d8a154c55594c42b87de51729a3c9fc1551b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a7fd20b91b50cd24df9cf8a59911ef69ede4706 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/bee2bdf66cc8b88534900202c77d8a154c55594c42b87de51729a3c9fc1551b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c3ad629dd2298332f4c797d5d8753bf2eb37ddb0101e6e092196a9e63957864 +size 46127 diff --git a/data/2025/2503_13xxx/2503.13751/images/c5d7306e1433a0b6c6b42b22995ae8e6a7ebb4b28513e507342f46c61081428d.jpg b/data/2025/2503_13xxx/2503.13751/images/c5d7306e1433a0b6c6b42b22995ae8e6a7ebb4b28513e507342f46c61081428d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45fd9b62c3f19a69faa694940843916d4e4b7048 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/c5d7306e1433a0b6c6b42b22995ae8e6a7ebb4b28513e507342f46c61081428d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66b19e481211fa7ae2ebc526f65895db6c2e14509569fb4afdd7bccc27d14c83 +size 16563 diff --git a/data/2025/2503_13xxx/2503.13751/images/c8f8786e61d5ba9158e9658ee2ecf4f21676cb7dc45a56ce3520a97a215a4a5d.jpg b/data/2025/2503_13xxx/2503.13751/images/c8f8786e61d5ba9158e9658ee2ecf4f21676cb7dc45a56ce3520a97a215a4a5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0703a15acd289ddaf4f8964fca7a04abe4ffe214 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/c8f8786e61d5ba9158e9658ee2ecf4f21676cb7dc45a56ce3520a97a215a4a5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a6ebb6e652f7d849187e2c9997d38cae463a1744b248a25e5f7ff415707755d +size 3187 diff --git a/data/2025/2503_13xxx/2503.13751/images/d6a5bb359b722b6d4e8665b965c7178e9855ad2c66247b2e37784d296598f439.jpg b/data/2025/2503_13xxx/2503.13751/images/d6a5bb359b722b6d4e8665b965c7178e9855ad2c66247b2e37784d296598f439.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ff318040ef1cd3ee85e17a950ff730194d1d52f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/d6a5bb359b722b6d4e8665b965c7178e9855ad2c66247b2e37784d296598f439.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:428b0df243cd9635610a24687e5591b682ee90effb5a47affab8fd9485d47412 +size 3470 diff --git a/data/2025/2503_13xxx/2503.13751/images/e494c3a358667dae09d159f9d7b5c69ee3c58c7df4714b8e48b91a4d500be0c1.jpg b/data/2025/2503_13xxx/2503.13751/images/e494c3a358667dae09d159f9d7b5c69ee3c58c7df4714b8e48b91a4d500be0c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..661bee42ec7fa1e9351f8ad0d8e2d51f7305c661 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/e494c3a358667dae09d159f9d7b5c69ee3c58c7df4714b8e48b91a4d500be0c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1953bab74940214765b16309d6a6bfec1160ff3c2d745d5a368828aa1a68db7 +size 4700 diff --git a/data/2025/2503_13xxx/2503.13751/images/e61aefc6d8918a5c6e6ab0e0697fcbfc36486ad10f9f7a6216b3e5fefb7518f9.jpg b/data/2025/2503_13xxx/2503.13751/images/e61aefc6d8918a5c6e6ab0e0697fcbfc36486ad10f9f7a6216b3e5fefb7518f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7b4eaeebfb99d242cf13daa3eddf155840142d6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/e61aefc6d8918a5c6e6ab0e0697fcbfc36486ad10f9f7a6216b3e5fefb7518f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0aa89044c411ca128367d36930ec262076972b55b5d4a82a088d7f325ae4591e +size 53249 diff --git a/data/2025/2503_13xxx/2503.13751/images/e7446e43ec48b02a63f4846fa903ecdbc189ae9e1d3baeea28d3ab4349f67b7b.jpg b/data/2025/2503_13xxx/2503.13751/images/e7446e43ec48b02a63f4846fa903ecdbc189ae9e1d3baeea28d3ab4349f67b7b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0ffda6c565b6d9b9420e83b3fd1d02ed4521280 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/e7446e43ec48b02a63f4846fa903ecdbc189ae9e1d3baeea28d3ab4349f67b7b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bbf94e7d0f53efb3e25de46bf8b8a4246bad57e636b18c7766ecd91513c45b1 +size 6831 diff --git a/data/2025/2503_13xxx/2503.13751/images/ed91df62b477bd91c24737b738d49397bea204557c82454c3f8740aacb9e13e6.jpg b/data/2025/2503_13xxx/2503.13751/images/ed91df62b477bd91c24737b738d49397bea204557c82454c3f8740aacb9e13e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd3d19b790c3540117306c9e8669e37cff2da2d9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/ed91df62b477bd91c24737b738d49397bea204557c82454c3f8740aacb9e13e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28cb83155f22573a630088c4473a46887a66a79876463aa837d40de9e248b557 +size 30124 diff --git a/data/2025/2503_13xxx/2503.13751/images/f2d9ea4d2966d2c562e632e151129164699c105fdb85a742fbc656549ac3eaca.jpg b/data/2025/2503_13xxx/2503.13751/images/f2d9ea4d2966d2c562e632e151129164699c105fdb85a742fbc656549ac3eaca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f11dad06ea9f65eceab30c1159df53a84ca6035b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/f2d9ea4d2966d2c562e632e151129164699c105fdb85a742fbc656549ac3eaca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fa59b21fcefd4ec31636722f5d3bf358a72c9c22a2c20a2a394170c2212a044 +size 9574 diff --git a/data/2025/2503_13xxx/2503.13751/images/f7cdf24f42ff7038688bd05478168802f6a6fa09213f5ffad00db62ef65980fa.jpg b/data/2025/2503_13xxx/2503.13751/images/f7cdf24f42ff7038688bd05478168802f6a6fa09213f5ffad00db62ef65980fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17194ea0c07480b622cf8744d4c544524d100390 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/f7cdf24f42ff7038688bd05478168802f6a6fa09213f5ffad00db62ef65980fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a11630b29672c49090eef2ac0ca51f52431520b5ebd0e5de760040d5d5aa188f +size 24912 diff --git a/data/2025/2503_13xxx/2503.13751/images/fec6b010bfcae93afb53b0c60c137838b889e707f0941f8e9d65d3df201207b2.jpg b/data/2025/2503_13xxx/2503.13751/images/fec6b010bfcae93afb53b0c60c137838b889e707f0941f8e9d65d3df201207b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61702d652b646471c4db03376e0b543bcde8bbf3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/fec6b010bfcae93afb53b0c60c137838b889e707f0941f8e9d65d3df201207b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c61b6821b82de3f601d88419ba9ba003637baafecabd379db55b7f75fc135655 +size 39548 diff --git a/data/2025/2503_13xxx/2503.13751/images/ff507e583af2cf8d875c5175cfaf529dfdc7148e3aef2836098edd006a592450.jpg b/data/2025/2503_13xxx/2503.13751/images/ff507e583af2cf8d875c5175cfaf529dfdc7148e3aef2836098edd006a592450.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e315e2bcd58c0cd1a3f028d8f6253b28192ecd71 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/ff507e583af2cf8d875c5175cfaf529dfdc7148e3aef2836098edd006a592450.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5576fedae190ad9791878ce6d88930ba9d3899f110624df213622776d878830c +size 31943 diff --git a/data/2025/2503_13xxx/2503.13751/images/ff814a44df13818d805fb28d14ad88facd4bb1f5a2e442555905552236576ed4.jpg b/data/2025/2503_13xxx/2503.13751/images/ff814a44df13818d805fb28d14ad88facd4bb1f5a2e442555905552236576ed4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49e62adb40c7f84cf5639f8c77f33b4c07495b67 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/images/ff814a44df13818d805fb28d14ad88facd4bb1f5a2e442555905552236576ed4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb829c5ecd7bb17a23475b177ba249dfd053ff8087f46b4188912029bbfe995 +size 52086 diff --git a/data/2025/2503_13xxx/2503.13751/layout.json b/data/2025/2503_13xxx/2503.13751/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e63a354b4e9a172e026021314dc3f50c12ca0e38 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13751/layout.json @@ -0,0 +1,27318 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 104, + 107, + 506, + 130 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 107, + 506, + 130 + ], + "spans": [ + { + "bbox": [ + 104, + 107, + 506, + 130 + ], + "type": "text", + "content": "Optimizing ML Training with Metagradient Descent" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 157, + 147, + 452, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 147, + 452, + 178 + ], + "spans": [ + { + "bbox": [ + 157, + 147, + 452, + 178 + ], + "type": "text", + "content": "Logan Engstrom\\*1, Andrew Ilyas\\*2†, Benjamin Chen\\*1, Axel Feldmann\\*1, William Moses\\*3, Aleksander Madry\\*1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "spans": [ + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "text", + "content": "*Equal contribution " + }, + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "inline_equation", + "content": "{}^{1}" + }, + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "text", + "content": " MIT, " + }, + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "inline_equation", + "content": "{}^{2}" + }, + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "text", + "content": " Stanford, " + }, + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "inline_equation", + "content": "{}^{3}" + }, + { + "bbox": [ + 196, + 183, + 414, + 198 + ], + "type": "text", + "content": " UIUC" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 286, + 221, + 324, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 221, + 324, + 232 + ], + "spans": [ + { + "bbox": [ + 286, + 221, + 324, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 93, + 236, + 517, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 236, + 517, + 316 + ], + "spans": [ + { + "bbox": [ + 93, + 236, + 517, + 316 + ], + "type": "text", + "content": "A major challenge in training large-scale machine learning models is configuring the training process to maximize model performance, i.e., finding the best training setup from a vast design space. In this work, we unlock a gradient-based approach to this problem. We first introduce an algorithm for efficiently calculating metagradient gradients through model training at scale. We then introduce a \"smooth model training\" framework that enables effective optimization using metagradient. With metagradient descent (MGD), we greatly improve on existing dataset selection methods, outperform accuracy-degrading data poisoning attacks by an order of magnitude, and automatically find competitive learning rate schedules." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 333, + 178, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 333, + 178, + 348 + ], + "spans": [ + { + "bbox": [ + 69, + 333, + 178, + 348 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 358, + 541, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 358, + 541, + 442 + ], + "spans": [ + { + "bbox": [ + 67, + 358, + 541, + 442 + ], + "type": "text", + "content": "How should I clean my data? What architecture should I use? Training large-scale (i.e., deep) machine learning models entails making many design decisions. When making such decisions, typical practice is to exhaustively search over a small set of standard options. For example, we might try a few well-known data cleaning heuristics, construct a grid over a hyperparameters, and choose the options that yield the best models. However, given that this process explores only a small part of the overall design space (e.g., one can construct " + }, + { + "bbox": [ + 67, + 358, + 541, + 442 + ], + "type": "inline_equation", + "content": "2^{n}" + }, + { + "bbox": [ + 67, + 358, + 541, + 442 + ], + "type": "text", + "content": " possible training datasets from a pool of " + }, + { + "bbox": [ + 67, + 358, + 541, + 442 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 358, + 541, + 442 + ], + "type": "text", + "content": " candidate datapoints), it is unlikely that this approach really yields the optimal training configuration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 443, + 541, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 443, + 541, + 563 + ], + "spans": [ + { + "bbox": [ + 67, + 443, + 541, + 563 + ], + "type": "text", + "content": "How can we find optimal (or at least, better) training configurations? To do so, we take the optimization perspective on designing model training. From this well-studied perspective, deciding on a training configuration—or as we will call it, a set of metaparameters—is just a high-dimensional optimization problem. The input space of this problem comprises all possible metaparameter choices, including which datapoints to train on, what model architecture to use, and how to initialize model weights. The objective function takes in a set of metaparameters, trains a machine learning model according to those metaparameters, and then returns a target metric evaluated on that model (e.g., test accuracy). From this perspective, any procedure for selecting metaparameters—including the typical practice of grid-searching over standard options—is just an optimization algorithm, whose goal is to maximize the objective function with respect to the (high-dimensional) input." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "text", + "content": "Given that selecting metaparameters is \"just\" a high-dimensional optimization problem, a natural tool to consider is the gradient. After all, in many contexts, gradients offer a more effective approach to maximizing high-dimensional functions than grid search. Indeed, for a sufficiently \"well-behaved\" function " + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "inline_equation", + "content": "f(x)" + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "text", + "content": " with gradient " + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "inline_equation", + "content": "\\nabla f(x)" + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "text", + "content": ", we can optimize " + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "text", + "content": " by iteratively updating " + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "text", + "content": " in the direction of " + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "inline_equation", + "content": "\\nabla f(x)" + }, + { + "bbox": [ + 67, + 562, + 539, + 634 + ], + "type": "text", + "content": ". This insight suggests a generic recipe for selecting metaparameters: first, make the objective differentiable with respect to the metaparameters; second, update via gradient steps." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 634, + 539, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 634, + 539, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 634, + 539, + 682 + ], + "type": "text", + "content": "Now, the idea of using gradients to search for metaparameters is not new. Indeed, there is a substantial line of work that aims to optimize metaparameters (e.g., architectures, regularizers, or data augmentation schemes) with gradient-based methods [MDA15; LSY18; LVD20]. However, such methods have not managed to scale beyond relatively small settings. This state of affairs prompts our main question:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 154, + 686, + 453, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 686, + 453, + 700 + ], + "spans": [ + { + "bbox": [ + 154, + 686, + 453, + 700 + ], + "type": "text", + "content": "Can we scalably configure model training using gradient-based methods?" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 202, + 35, + 568 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 202, + 35, + 568 + ], + "spans": [ + { + "bbox": [ + 14, + 202, + 35, + 568 + ], + "type": "text", + "content": "arXiv:2503.13751v1 [stat.ML] 17 Mar 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 80, + 711, + 387, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 711, + 387, + 722 + ], + "spans": [ + { + "bbox": [ + 80, + 711, + 387, + 722 + ], + "type": "inline_equation", + "content": "^{\\dagger}" + }, + { + "bbox": [ + 80, + 711, + 387, + 722 + ], + "type": "text", + "content": "Work done at MIT EECS. Correspondence to {engstrom,ailyas,benchen}@mit.edu." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 73, + 225, + 223 + ], + "blocks": [ + { + "bbox": [ + 82, + 73, + 225, + 223 + ], + "lines": [ + { + "bbox": [ + 82, + 73, + 225, + 223 + ], + "spans": [ + { + "bbox": [ + 82, + 73, + 225, + 223 + ], + "type": "image", + "image_path": "4f160909052d60c475611d729d8627a48f2ec03ab554e430d02e6ecf55122fc3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 235, + 541, + 260 + ], + "lines": [ + { + "bbox": [ + 68, + 235, + 541, + 260 + ], + "spans": [ + { + "bbox": [ + 68, + 235, + 541, + 260 + ], + "type": "text", + "content": "Figure 1: Our proto-algorithm, metagradient descent (MGD), uses gradients to achieve state-of-the-art performance across a variety of applications, including data selection and data poisoning." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 228, + 74, + 379, + 223 + ], + "blocks": [ + { + "bbox": [ + 228, + 74, + 379, + 223 + ], + "lines": [ + { + "bbox": [ + 228, + 74, + 379, + 223 + ], + "spans": [ + { + "bbox": [ + 228, + 74, + 379, + 223 + ], + "type": "image", + "image_path": "ae72b147ec83b8978675affb3f8e2678e0176541836b311165c6969778223f46.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 383, + 72, + 531, + 222 + ], + "blocks": [ + { + "bbox": [ + 383, + 72, + 531, + 222 + ], + "lines": [ + { + "bbox": [ + 383, + 72, + 531, + 222 + ], + "spans": [ + { + "bbox": [ + 383, + 72, + 531, + 222 + ], + "type": "image", + "image_path": "25d0520397f487be3b99371e5ad5a60cf7c24eb09b3e0e6bcccb837ff7007b8c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 279, + 176, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 279, + 176, + 292 + ], + "spans": [ + { + "bbox": [ + 69, + 279, + 176, + 292 + ], + "type": "text", + "content": "1.1 Contributions" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 299, + 541, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 299, + 541, + 323 + ], + "spans": [ + { + "bbox": [ + 68, + 299, + 541, + 323 + ], + "type": "text", + "content": "In this work, we answer this question in the affirmative, adding \"gradient descent on metaparameters\" to the large-scale machine learning toolkit. Along the way, we will face—and address—two main challenges." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 323, + 541, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 323, + 541, + 371 + ], + "spans": [ + { + "bbox": [ + 68, + 323, + 541, + 371 + ], + "type": "text", + "content": "First, existing methods for computing metagradients do not scale. In response, we devise an algorithm, REPLAY, that can take metagradients in large-scale settings. By combining reverse-mode autodifferentiation (AD) with an efficient data structure, REPLAY can calculate exact metagradients for models with billions of parameters and thousands of training steps." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 371, + 541, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 371, + 541, + 419 + ], + "spans": [ + { + "bbox": [ + 68, + 371, + 541, + 419 + ], + "type": "text", + "content": "Second, we find that metagradients of standard training routines are not necessarily helpful for optimization, which we connect to non-smoothness of the metaparameter optimization landscape. Borrowing tools from convex optimization, we devise a framework for designing \"metasmooth\" training routines that do admit helpful metagradients." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 419, + 540, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 419, + 540, + 456 + ], + "spans": [ + { + "bbox": [ + 68, + 419, + 540, + 456 + ], + "type": "text", + "content": "Addressing the challenges above unlocks a simple recipe for solving a broad range of machine learning tasks: (a) frame the task as a continuous optimization problem over metaparameters; (b) design a metasmooth training routine; (c) perform metagradient descent (MGD). Applying this recipe:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 463, + 539, + 607 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 83, + 463, + 538, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 463, + 538, + 487 + ], + "spans": [ + { + "bbox": [ + 83, + 463, + 538, + 487 + ], + "type": "text", + "content": "- In the DataComp-small11 competition [GIF+24], we achieve state-of-the-art pre-training data selection for CLIP (2x larger performance improvement than the previous DataComp-small1 leader [Eco24]);" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 495, + 539, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 495, + 539, + 531 + ], + "spans": [ + { + "bbox": [ + 83, + 495, + 539, + 531 + ], + "type": "text", + "content": "- In the context of data selection for instruction tuning (as introduced by Xia et al. [XMG+24]), we substantially improve on data selection for Gemma-2B (outperforming existing selection methods as well as full-data training);" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 538, + 539, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 538, + 539, + 576 + ], + "spans": [ + { + "bbox": [ + 83, + 538, + 539, + 576 + ], + "type": "text", + "content": "- In the accuracy-degrading data poisoning setting (defined by Huber [Hub64] and pioneered by Lu et al. [LKY22] for deep neural networks), we improve attacks on DNNs by an order of magnitude, dropping CIFAR-10 accuracy from " + }, + { + "bbox": [ + 83, + 538, + 539, + 576 + ], + "type": "inline_equation", + "content": "92\\% \\rightarrow 78\\%" + }, + { + "bbox": [ + 83, + 538, + 539, + 576 + ], + "type": "text", + "content": " (the best previous attack [LKY23] only reduces accuracy to " + }, + { + "bbox": [ + 83, + 538, + 539, + 576 + ], + "type": "inline_equation", + "content": "91\\%" + }, + { + "bbox": [ + 83, + 538, + 539, + 576 + ], + "type": "text", + "content": ");" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 582, + 539, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 582, + 539, + 607 + ], + "spans": [ + { + "bbox": [ + 83, + 582, + 539, + 607 + ], + "type": "text", + "content": "- For the task of hyperparameter optimization, we efficiently find a competitive CIFAR-10 learning rate schedule (matching the performance of a schedule found by grid search)." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 634, + 321, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 634, + 321, + 651 + ], + "spans": [ + { + "bbox": [ + 68, + 634, + 321, + 651 + ], + "type": "text", + "content": "2 Scalably computing metagradients" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 659, + 541, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 659, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 68, + 659, + 541, + 696 + ], + "type": "text", + "content": "In this section we present REPLAY, an algorithm for computing metagradients of large-scale iterative ML algorithms. We first detail the setting, then discuss existing approaches to computing metagradients, and conclude by describing REPLAY." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 211, + 72, + 400, + 138 + ], + "blocks": [ + { + "bbox": [ + 211, + 72, + 400, + 138 + ], + "lines": [ + { + "bbox": [ + 211, + 72, + 400, + 138 + ], + "spans": [ + { + "bbox": [ + 211, + 72, + 400, + 138 + ], + "type": "image", + "image_path": "1dc990e467236d18f25ade5d515382e45fc4338efb7565274a7fd059602d5385.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 181, + 140, + 247, + 153 + ], + "lines": [ + { + "bbox": [ + 181, + 140, + 247, + 153 + ], + "spans": [ + { + "bbox": [ + 181, + 140, + 247, + 153 + ], + "type": "text", + "content": "Training setup" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 267, + 143, + 333, + 153 + ], + "lines": [ + { + "bbox": [ + 267, + 143, + 333, + 153 + ], + "spans": [ + { + "bbox": [ + 267, + 143, + 333, + 153 + ], + "type": "text", + "content": "Trained model" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 342, + 143, + 428, + 154 + ], + "lines": [ + { + "bbox": [ + 342, + 143, + 428, + 154 + ], + "spans": [ + { + "bbox": [ + 342, + 143, + 428, + 154 + ], + "type": "text", + "content": "Observed behavior" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "lines": [ + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "spans": [ + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "text", + "content": "Figure 2: An illustration of the metagradient. We embed a given aspect of the training setup (e.g., the training dataset, or optimizer hyperparameters) into a continuous metaparameter vector " + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "inline_equation", + "content": "z \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "text", + "content": ". This metaparameter defines a model " + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(z)" + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "text", + "content": " by way of the learning algorithm " + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "text", + "content": ", which in turn defines an output " + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "inline_equation", + "content": "\\phi(z)" + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "text", + "content": ". The metagradient " + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "inline_equation", + "content": "\\nabla_z \\phi(\\mathcal{A}(z)) \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 164, + 541, + 214 + ], + "type": "text", + "content": " is the gradient of this model output with respect to the metaparameter." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 232, + 230, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 232, + 230, + 247 + ], + "spans": [ + { + "bbox": [ + 68, + 232, + 230, + 247 + ], + "type": "text", + "content": "2.1 What is a metagradient?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 253, + 541, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 253, + 541, + 289 + ], + "spans": [ + { + "bbox": [ + 67, + 253, + 541, + 289 + ], + "type": "text", + "content": "Training a machine learning model is a two-step process. First, we decide on a training setup—we must pick, for example, a neural network architecture, a training dataset, and an optimizer for training. Second, we apply the algorithm defined by this training setup to train a model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 289, + 541, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 289, + 541, + 314 + ], + "spans": [ + { + "bbox": [ + 68, + 289, + 541, + 314 + ], + "type": "text", + "content": "Our overall goal in this paper is to optimize model behavior as a function of the training setup (or, as we call it, the metaparameters) using gradient-based methods. To this end, we define the following notation:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 320, + 539, + 443 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 83, + 320, + 538, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 320, + 538, + 369 + ], + "spans": [ + { + "bbox": [ + 83, + 320, + 538, + 369 + ], + "type": "text", + "content": "- Let " + }, + { + "bbox": [ + 83, + 320, + 538, + 369 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^n" + }, + { + "bbox": [ + 83, + 320, + 538, + 369 + ], + "type": "text", + "content": " be a vector of continuous metaparameters representing the aspects of the training setup we aim to optimize. For example, if we only want to adjust the learning rate and weight decay of SGD then " + }, + { + "bbox": [ + 83, + 320, + 538, + 369 + ], + "type": "inline_equation", + "content": "n = 2" + }, + { + "bbox": [ + 83, + 320, + 538, + 369 + ], + "type": "text", + "content": ". We handle discrete metaparameters (e.g., choice of training data) by finding a continuous relaxation (e.g., importance weights)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "spans": [ + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "text", + "content": "- Let " + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "text", + "content": " be an algorithm mapping " + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "text", + "content": " to a trained machine learning model; we assume all other aspects of the training setup outside " + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "text", + "content": " are fixed and thus part of the algorithm " + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 83, + 376, + 539, + 400 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "spans": [ + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": "- Finally, let " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": " be an output function mapping a model " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": " to a vector " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\phi(\\theta) \\in \\mathbb{R}" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": ". For example, " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\phi(\\theta)" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": " might represent the validation loss of the model " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": ". We require that " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": " be differentiable with respect to " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": ", but otherwise make no assumptions on " + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 83, + 407, + 539, + 443 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "spans": [ + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "text", + "content": "With this notation in place, we define the training function " + }, + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "inline_equation", + "content": "f \\coloneqq \\phi \\circ \\mathcal{A}" + }, + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "text", + "content": " mapping the training setup " + }, + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "text", + "content": " directly to the output function " + }, + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 68, + 452, + 541, + 475 + ], + "type": "text", + "content": " evaluated on the corresponding model." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 476, + 539, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 476, + 539, + 501 + ], + "spans": [ + { + "bbox": [ + 69, + 476, + 539, + 501 + ], + "type": "text", + "content": "Finally, the metagradient is the gradient of the training function with respect to the metaparameters, " + }, + { + "bbox": [ + 69, + 476, + 539, + 501 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{z}}f(\\mathbf{z})" + }, + { + "bbox": [ + 69, + 476, + 539, + 501 + ], + "type": "text", + "content": ". Intuitively, the metagradient defines the \"direction of steepest ascent\" in metaparameter space." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 515, + 541, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 515, + 541, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 515, + 541, + 539 + ], + "type": "text", + "content": "Our focus: iterative algorithms. To efficiently compute the metagradient, we restrict our focus to cases where the algorithm " + }, + { + "bbox": [ + 67, + 515, + 541, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 515, + 541, + 539 + ], + "type": "text", + "content": " is iterative, i.e., when it can be written in the form" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 199, + 548, + 541, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 548, + 541, + 578 + ], + "spans": [ + { + "bbox": [ + 199, + 548, + 541, + 578 + ], + "type": "interline_equation", + "content": "\\underbrace {\\mathcal {A} (z) := \\mathbf {s} _ {T}} _ {\\text {m o d e l s t a t e a f t e r T s t e p s}}, \\quad \\text {w h e r e} \\quad \\underbrace {\\mathbf {s} _ {t + 1} : = h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})} _ {\\text {o p t i m i z e r s t e p t}}. \\tag {1}", + "image_path": "6446a270cb605f4ff6886a2c76dbd72b6bd1dfa21e1692651ea1fd016399cc45.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " is the optimizer state at step " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " (with " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_0" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " being the initial state) and " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "h_t" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " is the update mapping from state " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " to state " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "t + 1" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": ". The form of (1) captures most large-scale training algorithms. For example, if the setup " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^T" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " is a per-step learning rate, and the algorithm " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " is full batch gradient descent, then each update " + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "inline_equation", + "content": "h_t" + }, + { + "bbox": [ + 67, + 586, + 541, + 624 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 247, + 632, + 361, + 645 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 632, + 361, + 645 + ], + "spans": [ + { + "bbox": [ + 247, + 632, + 361, + 645 + ], + "type": "interline_equation", + "content": "h _ {t} (\\mathbf {s} _ {t}, \\mathbf {z}) := \\mathbf {s} _ {t} - z _ {t} \\nabla \\ell (\\mathbf {s} _ {t}),", + "image_path": "d6a5bb359b722b6d4e8665b965c7178e9855ad2c66247b2e37784d296598f439.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "spans": [ + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "content": " is the learning rate at step " + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "content": " is the training loss, and the state " + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t" + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "content": " comprises the parameters at step " + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "content": ". For more complex algorithms like Adam [KB15], the state " + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t" + }, + { + "bbox": [ + 67, + 654, + 541, + 680 + ], + "type": "text", + "content": " includes terms like gradient moments." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 358, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 358, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 358, + 85 + ], + "type": "text", + "content": "2.2 Warmup: Metagradients via autodifferentiation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 541, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 541, + 199 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 541, + 199 + ], + "type": "text", + "content": "A key primitive we leverage to calculate metagradients is automatic differentiation (AD)—a standard tool for taking gradients through computer-defined functions. AD takes gradients by decomposing functions into elementary operations with known derivatives, then combining these derivatives using the chain rule. Concretely, AD operates in two passes: a \"forward pass,\" which executes the function of interest and stores intermediate products for each elementary operation; and a \"backward pass,\" which calculates the gradient by propagating chains of partial derivatives using these stored products. For the purposes of this paper, we will view AD as a black box that calculates the gradient of a many-to-one function (i.e., any " + }, + { + "bbox": [ + 67, + 91, + 541, + 199 + ], + "type": "inline_equation", + "content": "f: \\mathbb{R}^d \\to \\mathbb{R}" + }, + { + "bbox": [ + 67, + 91, + 541, + 199 + ], + "type": "text", + "content": ") at a given point using only a small constant factor more time than calculating the function itself (along with the space cost of storing the necessary forward-pass products)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 199, + 541, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 199, + 541, + 259 + ], + "spans": [ + { + "bbox": [ + 67, + 199, + 541, + 259 + ], + "type": "text", + "content": "What does this have to do with metagradients? Well, seeing as how training itself is a computer-defined function, AD is a natural tool for calculating the metagradient. The main challenge, as we discuss in the sequel, is that AD-based approaches to calculating the metagradient tend to be too resource-intensive for the large-scale machine learning algorithms we consider. In the remainder of this section we build up background before finally describing REPLAY, our algorithm for scalably computing (exact) metagradients." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 274, + 541, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 274, + 541, + 309 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 541, + 309 + ], + "type": "text", + "content": "Approach #1: Direct AD. The direct approach to calculating metagradients exploits the fact that nearly any learning algorithm is itself a sequence of differentiable computer-defined operations—meaning the training function " + }, + { + "bbox": [ + 67, + 274, + 541, + 309 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 274, + 541, + 309 + ], + "type": "text", + "content": " is also differentiable." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 310, + 541, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 310, + 541, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 310, + 541, + 370 + ], + "type": "text", + "content": "However, operationalizing this observation to compute metagradients turns out to be challenging. The reason is that AD stores intermediate products for each operation. The amount of data stored thus scales with the number of operations in the function of interest. In the case of our training function " + }, + { + "bbox": [ + 67, + 310, + 541, + 370 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 310, + 541, + 370 + ], + "type": "text", + "content": ", this number encompasses all the operations used to train a machine learning model. As a result, even in a toy scenario like MNIST training, computing metagradients with naive AD would require storing terabytes of data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 384, + 541, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 541, + 421 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 541, + 421 + ], + "type": "text", + "content": "Approach #2: Exploiting structure with step-wise AD. A more efficient method for calculating the metagradient, step-wise AD, leverages the structure of iterative learning algorithms [Wer90; MDA15; FDF+17]. Recall from (1) that such algorithms take the form" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 217, + 429, + 391, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 429, + 391, + 442 + ], + "spans": [ + { + "bbox": [ + 217, + 429, + 391, + 442 + ], + "type": "interline_equation", + "content": "\\mathcal {A} (\\mathbf {z}) := \\mathbf {s} _ {T}, \\quad \\text {w h e r e} \\quad \\mathbf {s} _ {t + 1} := h _ {t} (\\mathbf {s} _ {t}, \\mathbf {z}).", + "image_path": "e494c3a358667dae09d159f9d7b5c69ee3c58c7df4714b8e48b91a4d500be0c1.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 449, + 541, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 449, + 541, + 475 + ], + "spans": [ + { + "bbox": [ + 67, + 449, + 541, + 475 + ], + "type": "text", + "content": "Algebraic manipulation (in particular, using the chain rule, the law of the total derivative, and the identity " + }, + { + "bbox": [ + 67, + 449, + 541, + 475 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t = h_{t-1}(\\mathbf{s}_{t-1}, \\mathbf{z})" + }, + { + "bbox": [ + 67, + 449, + 541, + 475 + ], + "type": "text", + "content": ") allows us to write the metagradient over an iterative algorithm as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 194, + 483, + 541, + 537 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 483, + 541, + 537 + ], + "spans": [ + { + "bbox": [ + 194, + 483, + 541, + 537 + ], + "type": "interline_equation", + "content": "\\frac {\\partial f (\\mathbf {z})}{\\partial \\mathbf {z}} = \\frac {\\partial \\phi (\\mathcal {A} (\\mathbf {z}))}{\\partial \\mathbf {z}} = \\sum_ {t = 1} ^ {T} \\underbrace {\\overbrace {\\partial \\phi (\\mathbf {s} _ {T})} ^ {A _ {t}} . \\overbrace {\\partial \\mathbf {s} _ {t}} ^ {\\partial \\phi (\\mathbf {s} _ {T})}} _ {B _ {t}}, \\tag {2}", + "image_path": "49f0724ed3d310be3c3308e27e733b61346b7751b4796215b2241423c2690a76.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "text", + "content": "where we have introduced the notation " + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "inline_equation", + "content": "A_{t}" + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "inline_equation", + "content": "B_{t}" + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "text", + "content": " for notational convenience. Step-wise AD computes the metagradient by calculating each term in the sum of (2) one at a time. For each term, the main challenge lies in computing " + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "inline_equation", + "content": "A_{t}" + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "text", + "content": ", since given " + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "inline_equation", + "content": "A_{t}" + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "text", + "content": " we can straightforwardly compute " + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "inline_equation", + "content": "B_{t}" + }, + { + "bbox": [ + 67, + 544, + 541, + 594 + ], + "type": "text", + "content": " (the entire term) by differentiating through a single model update, i.e.," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 197, + 601, + 411, + 626 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 601, + 411, + 626 + ], + "spans": [ + { + "bbox": [ + 197, + 601, + 411, + 626 + ], + "type": "interline_equation", + "content": "B _ {t} := A _ {t} \\cdot \\frac {\\partial h _ {t - 1} (\\mathbf {s} _ {t - 1} , \\mathbf {z})}{\\partial \\mathbf {z}} = \\frac {\\partial (A _ {t} \\cdot h _ {t - 1} (\\mathbf {s} _ {t - 1} , \\mathbf {z}))}{\\partial \\mathbf {z}},", + "image_path": "272461c815d26446577a8316099aea75fcb7fa56b9420f851e8478f829ed1718.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "spans": [ + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "text", + "content": "which is just a single call to our assumed \"AD oracle\" on the function " + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\mapsto A_t \\cdot h_{t-1}(\\mathbf{s}_{t-1}, \\mathbf{z})" + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "text", + "content": ". Computing the " + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "inline_equation", + "content": "A_t" + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "text", + "content": " terms is less straightforward as we need to relate " + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "inline_equation", + "content": "s_T" + }, + { + "bbox": [ + 67, + 632, + 541, + 658 + ], + "type": "text", + "content": "; to do so, we exploit the recurrence" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 178, + 664, + 541, + 691 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 664, + 541, + 691 + ], + "spans": [ + { + "bbox": [ + 178, + 664, + 541, + 691 + ], + "type": "interline_equation", + "content": "A _ {t} := \\frac {\\partial \\phi (\\mathbf {s} _ {T})}{\\partial \\mathbf {s} _ {t}} = \\frac {\\partial \\phi (\\mathbf {s} _ {T})}{\\partial \\mathbf {s} _ {t + 1}} \\cdot \\frac {\\partial h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})}{\\partial \\mathbf {s} _ {t}} = \\frac {\\partial \\left(A _ {t + 1} \\cdot h _ {t} (\\mathbf {s} _ {t} , \\mathbf {z})\\right)}{\\partial \\mathbf {s} _ {t}}, \\tag {3}", + "image_path": "07f6be6f9521f926d80bf16a7c01a17b4fdf20a667bdd384b75d05b3321c9ece.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "content": "making " + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "inline_equation", + "content": "A_{t}" + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "content": " straightforward to compute (again, a single \"AD oracle\" call) given " + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "inline_equation", + "content": "A_{t+1}" + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "content": ". Step-wise AD exploits this fact to successively calculate the gradient with respect to each state, from state " + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "content": " down to state 0." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 90, + 87, + 521, + 297 + ], + "blocks": [ + { + "bbox": [ + 90, + 87, + 521, + 297 + ], + "lines": [ + { + "bbox": [ + 90, + 87, + 521, + 297 + ], + "spans": [ + { + "bbox": [ + 90, + 87, + 521, + 297 + ], + "type": "image", + "image_path": "e61aefc6d8918a5c6e6ab0e0697fcbfc36486ad10f9f7a6216b3e5fefb7518f9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "lines": [ + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "spans": [ + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": "Figure 3: The lazy " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": "-ary tree structure for traversing optimizer states in reverse order, with " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "k = 2" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": ". Recall that " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": " is the number of states (parameterized such that " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "n = T + 1" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": "). Each node represents the correspondingly numbered state. We give an example of the traversal using the blue arrows in the figure, which denote the traversal path up to state " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "s_{\\frac{3n}{4} + 1}" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": ". The gray cylinders indicate the states that are stored when the traversal is at state " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "s_{\\frac{3n}{4} + 1}" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": "; the other states are not stored at this point in the traversal. Traversing this structure requires storing " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log(n))" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": " state and computing " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(n \\log(n))" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": " optimizer steps—compared to " + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 307, + 541, + 381 + ], + "type": "text", + "content": " for simply training." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "spans": [ + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": "Bringing these ingredients together, the algorithm executes as follows. As a preprocessing step, it trains the model and stores all intermediate states " + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_0,\\dots ,\\mathbf{s}_T" + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": ". Then, the algorithm calculates and sums the terms in (2). It first computes " + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "inline_equation", + "content": "A_{T}\\coloneqq \\partial \\phi (\\mathbf{s}_{T}) / \\mathbf{s}_{T}" + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": ", the gradient of the output function " + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": " with respect to the final state. Then, the algorithm steps through " + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_{T - 1},\\ldots ,\\mathbf{s}_0" + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": " in reverse order, calculating (a) the gradient with respect to each state " + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "inline_equation", + "content": "A_{t}" + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": " (via (3)) and (b) the gradient with respect to " + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": " at that step " + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "inline_equation", + "content": "B_{t}" + }, + { + "bbox": [ + 67, + 400, + 541, + 484 + ], + "type": "text", + "content": " (via (2), using the previously calculated gradient with respect to that state). AD calculates both quantities--each requires differentiating over only one train step. Finally, the algorithm returns the final metagradient as the sum of the terms." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 484, + 541, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 484, + 541, + 510 + ], + "spans": [ + { + "bbox": [ + 67, + 484, + 541, + 510 + ], + "type": "text", + "content": "Despite improving storage overhead compared to \"direct AD\", step-wise AD is still too space-intensive at scale. After all, this algorithm saves every optimizer state." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 525, + 142, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 525, + 142, + 537 + ], + "spans": [ + { + "bbox": [ + 69, + 525, + 142, + 537 + ], + "type": "text", + "content": "2.3 REPLAY" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": "REPLAY is our algorithm for efficiently and exactly computing metagradients. It uses " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(k\\log_k(T))" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " space and requires running the learning algorithm " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " a total of " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "1 + \\log_{k}(T)" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " times, with " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " a user-chosen constant. The main idea is to make the space-intensive subroutine of step-wise AD—a reverse-order traversal of the optimizer states at each step—much more efficient. After all, step-wise AD stores all the states to reverse traverse them. REPLAY modifies step-wise AD to traverse states in less space by exploiting a simple observation: when training is deterministic, one can reinstantiate an optimizer state " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " by \"replaying\" training from a fixed point " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "t' < t" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " at the compute cost of " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "t - t'" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " training steps. For example, one simple scheme saves every other state, then \"replays\" the remaining states when (reverse) traversing; this routine stores " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "T/2" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " states but computes an extra " + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "inline_equation", + "content": "T/2" + }, + { + "bbox": [ + 67, + 544, + 541, + 652 + ], + "type": "text", + "content": " model updates compared to storing all the states." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 652, + 541, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 652, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 652, + 541, + 715 + ], + "type": "text", + "content": "REPLAY performs a reverse-order traversal the optimizer states while balancing the compute cost of \"replaying\" training with the storage cost of saving states. We use a combination of deterministic training (fixing data ordering, data augmentation, and any other randomness in the training process) and an efficient data structure (similar to a segment tree; see Figure 3) to reverse-order traverse the optimizer states with " + }, + { + "bbox": [ + 67, + 652, + 541, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(k\\log_k(T))" + }, + { + "bbox": [ + 67, + 652, + 541, + 715 + ], + "type": "text", + "content": " space and an additional " + }, + { + "bbox": [ + 67, + 652, + 541, + 715 + ], + "type": "inline_equation", + "content": "T\\log_k(T)" + }, + { + "bbox": [ + 67, + 652, + 541, + 715 + ], + "type": "text", + "content": " model steps." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "text", + "content": "Specifically, REPLAY recursively saves and replays training states. The algorithm splits the training trajectory into " + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "text", + "content": " segments, performs the full training routine while saving only the start of each segment, then recurses into each segment (in reverse) to retrieve the states in reverse-order. The recursion depth bottoms out at " + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "inline_equation", + "content": "\\log_k(T)" + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "text", + "content": ", at which point the algorithm has " + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "text", + "content": " consecutive optimizer states in memory; the algorithm then backpropagates along this segment, before deleting all these states from memory and then reinstantating the next " + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 72, + 543, + 169 + ], + "type": "text", + "content": "-length segment of optimizer states. We provide additional details on the algorithm in Appendix A.2. REPLAY unlocks computing large-scale metagradients by requiring only logarithmic storage and additional compute time." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 175, + 541, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 175, + 541, + 225 + ], + "spans": [ + { + "bbox": [ + 67, + 175, + 541, + 225 + ], + "type": "text", + "content": "Remark 1 (Connection to rematerialization). In a broad sense, both REPLAY and step-wise AD above can be viewed as special cases of a classical approach in AD (and computing broadly) known as rematerialization [CAC+81; BCT92; ZP00; GW08; CXZ+16]. To our knowledge, however, REPLAY is the first application of this particular rematerialization technique to the problem of computing metagradients through model training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 232, + 541, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 232, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 67, + 232, + 541, + 281 + ], + "type": "text", + "content": "Remark 2 (Reversible learning). An alternative approach to calculating metagradients that does not save any state is reversible learning [MDA15], for which one can \"invert\" previous training states from future ones. We focus here on general (non-reversible) learning algorithms for two reasons: first, even simple algorithms such as SGD without momentum are non-reversible; second, reversibility in practice introduces numerical precision issues." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 299, + 361, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 299, + 361, + 316 + ], + "spans": [ + { + "bbox": [ + 68, + 299, + 361, + 316 + ], + "type": "text", + "content": "3 Designing metasmooth training routines" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "text", + "content": "Given a training function " + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "text", + "content": ", REPLAY enables us to compute metagradients " + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "inline_equation", + "content": "\\nabla f(\\mathbf{z})" + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "text", + "content": " for any setup " + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "text", + "content": ". Can we immediately use these metagradients to optimize model training setups? The answer is (generally) no: we find that applying REPLAY to a function " + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "text", + "content": " representing a standard model training and evaluation routine yields metagradients that are often " + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "inline_equation", + "content": "\\pm \\infty" + }, + { + "bbox": [ + 67, + 324, + 541, + 384 + ], + "type": "text", + "content": "-valued and generally unhelpful for optimization. Indeed, previous work has observed similar issues optimizing over even (very) small-scale training [BSF94; Pea96; MDA15]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 384, + 541, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 541, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 541, + 445 + ], + "type": "text", + "content": "In this section, we show that an underlying source of the issue is the landscape of the metaparameter optimization problem. We then present a framework for modifying standard learning algorithms to admit useful metagradient, i.e., to be metasmooth. To use a familiar analogy: just as residual connections and improved initialization schemes can improve optimization in standard deep learning algorithms, our framework introduces an analogous set of modifications to enable optimization with metagradient." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 460, + 336, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 460, + 336, + 475 + ], + "spans": [ + { + "bbox": [ + 68, + 460, + 336, + 475 + ], + "type": "text", + "content": "3.1 The metaparameter optimization landscape" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 480, + 542, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 480, + 542, + 518 + ], + "spans": [ + { + "bbox": [ + 67, + 480, + 542, + 518 + ], + "type": "text", + "content": "We first review the notion of smoothness from optimization theory, and then adapt it to the setting of metagradients. The resulting metasmoothness metric allows us to quantify (and later, improve) the amenability of the metaparameter optimization problem to gradient-based methods." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "spans": [ + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "text", + "content": "Smoothness. In optimization theory, the basic property of a function that controls how effectively it can be optimized with first-order methods is smoothness. Specifically, a function " + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "inline_equation", + "content": "f(\\mathbf{z})" + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "text", + "content": "-smooth at a point " + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "text", + "content": " if its gradient " + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "inline_equation", + "content": "\\nabla f" + }, + { + "bbox": [ + 67, + 531, + 541, + 568 + ], + "type": "text", + "content": " satisfies the property that" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 200, + 575, + 541, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 575, + 541, + 590 + ], + "spans": [ + { + "bbox": [ + 200, + 575, + 541, + 590 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla f (\\mathbf {z}) - \\nabla f \\left(\\mathbf {z} ^ {\\prime}\\right) \\right\\| \\leq \\beta \\cdot \\left\\| \\mathbf {z} - \\mathbf {z} ^ {\\prime} \\right\\| \\quad \\text {f o r a l l} \\mathbf {z} ^ {\\prime}, \\tag {4}", + "image_path": "6c1c5eac1041ae1fb22568a46b928a626853285ad2b25a2389f8d63a9ec083a3.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "text", + "content": "or in other words, if its gradient does not change too quickly around " + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "text", + "content": ". To motivate this definition: if a function " + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "text", + "content": "-smooth at " + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "text", + "content": ", then a step of gradient descent with step size " + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "inline_equation", + "content": "1 / \\beta" + }, + { + "bbox": [ + 67, + 599, + 540, + 635 + ], + "type": "text", + "content": " will successfully decrease the value of the function:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 212, + 641, + 397, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 641, + 397, + 669 + ], + "spans": [ + { + "bbox": [ + 212, + 641, + 397, + 669 + ], + "type": "interline_equation", + "content": "f \\left(\\mathbf {z} - \\frac {1}{\\beta} \\nabla f (\\mathbf {z})\\right) \\leq f (\\mathbf {z}) - \\frac {1}{2 \\beta} \\| \\nabla f (\\mathbf {z}) \\| ^ {2}.", + "image_path": "650d90f816cfb3285c323b9ac068f712a628be317829f072d7c2a7dc29c4c8a2.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 676, + 387, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 676, + 387, + 690 + ], + "spans": [ + { + "bbox": [ + 69, + 676, + 387, + 690 + ], + "type": "text", + "content": "This guarantee makes " + }, + { + "bbox": [ + 69, + 676, + 387, + 690 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 69, + 676, + 387, + 690 + ], + "type": "text", + "content": "-smoothness a good measure of gradient utility." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 539, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 539, + 131 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 539, + 131 + ], + "type": "text", + "content": "Metasmoothness. There are two main challenges in adapting the smoothness property to the metagradient setting. First, evaluating (4) requires a search over all possible " + }, + { + "bbox": [ + 67, + 72, + 539, + 131 + ], + "type": "inline_equation", + "content": "\\mathbf{z}'" + }, + { + "bbox": [ + 67, + 72, + 539, + 131 + ], + "type": "text", + "content": ", which is infeasible. Second, even if we could exactly evaluate the left-hand side of (4), it would be difficult to disentangle non-smoothness of the training function " + }, + { + "bbox": [ + 67, + 72, + 539, + 131 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 72, + 539, + 131 + ], + "type": "text", + "content": " from potential error in metagradient computation (e.g., a numerically unstable operation in REPLAY)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 133, + 539, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 539, + 181 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 539, + 181 + ], + "type": "text", + "content": "To sidestep these issues, we propose a metric called metasmoothness, given in Definition 1. Metasmoothness is cheap to compute—requiring only three evaluations of the training function—and does not rely on metagradient computation. For the remainder of this section, we fix a small constant " + }, + { + "bbox": [ + 67, + 133, + 539, + 181 + ], + "type": "inline_equation", + "content": "h > 0" + }, + { + "bbox": [ + 67, + 133, + 539, + 181 + ], + "type": "text", + "content": ", and define the corresponding finite-differences estimator of the directional derivative " + }, + { + "bbox": [ + 67, + 133, + 539, + 181 + ], + "type": "inline_equation", + "content": "\\Delta_f" + }, + { + "bbox": [ + 67, + 133, + 539, + 181 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 238, + 189, + 372, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 189, + 372, + 213 + ], + "spans": [ + { + "bbox": [ + 238, + 189, + 372, + 213 + ], + "type": "interline_equation", + "content": "\\Delta_ {f} (\\mathbf {z}; \\mathbf {v}) := \\frac {f (\\mathbf {z} + h \\mathbf {v}) - f (\\mathbf {z})}{h}.", + "image_path": "694c0324b89bfbec08aca913604248313960367318fa0dc1e2995eee02ca775c.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "spans": [ + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": "Definition 1 (Metasmoothness of " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " towards " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": "). Consider a training function " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " mapping metaparameters " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^n" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " to model output " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "f(\\mathbf{z}) \\in \\mathbb{R}" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": ". Given a metaparameter " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " and a vector " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "\\mathbf{v} \\in \\mathbb{R}^n" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": ", the metasmoothness of " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " towards " + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 220, + 539, + 256 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 225, + 263, + 539, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 263, + 539, + 291 + ], + "spans": [ + { + "bbox": [ + 225, + 263, + 539, + 291 + ], + "type": "interline_equation", + "content": "S _ {h, \\mathbf {v}} (f; \\mathbf {z}) := \\left| \\frac {\\Delta_ {f} (\\mathbf {z} + h \\mathbf {v}) - \\Delta_ {f} (\\mathbf {z})}{h} \\right|. \\tag {5}", + "image_path": "98d486f097013fcbcfaad06f1ed5faf6e5842378185fdbcaf8203beac42a2a71.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "spans": [ + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "text", + "content": "Definition 1 measures the rate of change of the derivative of " + }, + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "inline_equation", + "content": "f(\\mathbf{z})" + }, + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "text", + "content": " in the direction of a given vector " + }, + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "text", + "content": ", and is therefore related to " + }, + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 298, + 539, + 323 + ], + "type": "text", + "content": "-smoothness in that:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 328, + 538, + 373 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "text", + "content": "(a) If " + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "text", + "content": "-smooth at " + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "inline_equation", + "content": "S_{h,\\mathbf{v}}(f;\\mathbf{z}) \\leq \\beta" + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "inline_equation", + "content": "(h,\\mathbf{v})" + }, + { + "bbox": [ + 76, + 328, + 526, + 342 + ], + "type": "text", + "content": " (so Definition 1 is necessary for smoothness)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "spans": [ + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "text", + "content": "(b) If " + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "inline_equation", + "content": "\\lim_{h\\to 0}S_{h,\\mathbf{v}}(f;\\mathbf{z})\\leq \\beta" + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "inline_equation", + "content": "\\mathbf{z}\\in \\mathbb{R}^n" + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "inline_equation", + "content": "\\mathbf{v}\\in \\mathbb{S}^{n - 1}" + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 76, + 349, + 538, + 373 + ], + "type": "text", + "content": "-smooth everywhere (so a global version of Definition 1 is sufficient for smoothness)." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "spans": [ + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": "Empirical metasmoothness. Definition 1 lets us measure the meta-smoothness of a training function " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " at a particular metaparameter " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " (towards a direction " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": "). This definition, however, has two shortcomings. First, recall that the training function " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " is a composition of a learning algorithm " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " and an output function " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": ", so the smoothness of " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " depends on that of both " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " (in particular, " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\frac{\\partial f}{\\partial \\mathbf{z}} = \\frac{\\partial \\phi}{\\partial \\mathcal{A}} \\cdot \\frac{\\partial \\mathcal{A}}{\\partial \\mathbf{z}}" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": "). Since the output function " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " might be unknown ahead of time, we are most interested in measuring the overall metasmoothness of a learning algorithm " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": ". Second, while the result of (5) does have a concrete basis in optimization theory, it may not be easy to interpret in practice (e.g., what does " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "S = 200" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": " mean?). We address both issues simultaneously by (a) proposing an interpretable \"binarized\" version of Definition 1, and (b) studying metasmoothness in the space of model parameters " + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 67, + 388, + 539, + 497 + ], + "type": "text", + "content": ", instead of the output space." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "spans": [ + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": "Definition 2 (Empirical metasmoothness of " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": "). Let " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": " be a learning algorithm which maps metaparameters " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^n" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": " to model parameters " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\theta \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": " be a metaparameter vector, and let " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": " be a given direction. Let " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\mathbf{d} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": " be the per-coordinate variation in " + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 67, + 502, + 539, + 538 + ], + "type": "text", + "content": ", i.e.," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 247, + 546, + 360, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 546, + 360, + 559 + ], + "spans": [ + { + "bbox": [ + 247, + 546, + 360, + 559 + ], + "type": "interline_equation", + "content": "\\mathbf {d} = \\left| \\mathcal {A} (\\mathbf {z} + 2 h \\mathbf {v}) - \\mathcal {A} (\\mathbf {z}) \\right|", + "image_path": "c8f8786e61d5ba9158e9658ee2ecf4f21676cb7dc45a56ce3520a97a215a4a5d.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "spans": [ + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "text", + "content": "The empirical " + }, + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "inline_equation", + "content": "(h,\\mathbf{v})" + }, + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "text", + "content": "-metasmoothness of " + }, + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 69, + 567, + 307, + 580 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 156, + 587, + 541, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 587, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 156, + 587, + 541, + 613 + ], + "type": "interline_equation", + "content": "\\widehat {S} _ {h, \\mathbf {v}} (\\mathcal {A}; \\mathbf {z}) = \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} (\\mathbf {z}; \\mathbf {v})\\right) ^ {\\top} \\cdot \\operatorname {d i a g} \\left(\\frac {\\mathbf {d}}{\\| \\mathbf {d} \\| _ {1}}\\right) \\cdot \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} (\\mathbf {z} + h \\mathbf {v}; \\mathbf {v})\\right), \\tag {6}", + "image_path": "59597a6db89b529053f6ff247a1734fb9cd12de6133d89b59beca3f9f784e818.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 620, + 217, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 620, + 217, + 633 + ], + "spans": [ + { + "bbox": [ + 69, + 620, + 217, + 633 + ], + "type": "text", + "content": "weights each parameter by its range." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": "Intuitively, (6) measures the agreement in sign between the (finite-difference approximation of the) metagradient in the direction of " + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": " and at " + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "inline_equation", + "content": "\\mathbf{z} + h\\mathbf{v}" + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": ", averaged across parameter coordinates and weighted by the variation in each coordinate. Taking a weighted average of sign agreements ensures that " + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "inline_equation", + "content": "\\widehat{S} \\in [-1,1]" + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": " (making it easier to interpret than Definition 1). The " + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "inline_equation", + "content": "\\mathrm{diag}(\\mathsf{d} / \\| \\mathsf{d}\\|_1)" + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": " term weights each agreement proportionally to the scale of the corresponding parameter change (downweighting, e.g., coordinates " + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": " that are essentially constant). Finally, observe that Definition 2 is efficient to compute in practice: it requires only three calls to the learning algorithm " + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 639, + 539, + 723 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 68, + 237, + 248 + ], + "blocks": [ + { + "bbox": [ + 76, + 68, + 237, + 248 + ], + "lines": [ + { + "bbox": [ + 76, + 68, + 237, + 248 + ], + "spans": [ + { + "bbox": [ + 76, + 68, + 237, + 248 + ], + "type": "image", + "image_path": "8903e4b2221d100768bfe268b5a001dd442b4e5c27be2741e58072ab69f0a150.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 256, + 170, + 266 + ], + "lines": [ + { + "bbox": [ + 157, + 256, + 170, + 266 + ], + "spans": [ + { + "bbox": [ + 157, + 256, + 170, + 266 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 244, + 69, + 360, + 223 + ], + "blocks": [ + { + "bbox": [ + 244, + 69, + 360, + 223 + ], + "lines": [ + { + "bbox": [ + 244, + 69, + 360, + 223 + ], + "spans": [ + { + "bbox": [ + 244, + 69, + 360, + 223 + ], + "type": "image", + "image_path": "1a484312f1ed10309ea34d6652f551a0d6047e126a3f781ebb74cc68734bab99.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 276, + 541, + 338 + ], + "lines": [ + { + "bbox": [ + 67, + 276, + 541, + 338 + ], + "spans": [ + { + "bbox": [ + 67, + 276, + 541, + 338 + ], + "type": "text", + "content": "Figure 4: (a) For a variety of training configurations of a ResNet-9 model, we plot metasmoothness (Def. 2) against test accuracy. Strategies such as increasing width, placing batch normalization before activations, and scaling down network outputs consistently improve metasmoothness, at a minor cost to accuracy. (b) Smoother training configurations can be optimized via metagradients more effectively. Here, as in Section 4.3, we use metagradients to gradient ascend on validation loss." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 368, + 68, + 536, + 247 + ], + "blocks": [ + { + "bbox": [ + 368, + 68, + 536, + 247 + ], + "lines": [ + { + "bbox": [ + 368, + 68, + 536, + 247 + ], + "spans": [ + { + "bbox": [ + 368, + 68, + 536, + 247 + ], + "type": "image", + "image_path": "a19a4da2f67b18c44fb26d6e2a9cceb669d036ffc05618b89b9c68b99f1485ad.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 429, + 256, + 443, + 266 + ], + "lines": [ + { + "bbox": [ + 429, + 256, + 443, + 266 + ], + "spans": [ + { + "bbox": [ + 429, + 256, + 443, + 266 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "text", + "content": "Remark 3. Ideally, recalling the smoothness definition (4), we would evaluate metasmoothness in all possible directions " + }, + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "text", + "content": " and all points " + }, + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "text", + "content": ". Empirically, we find in the sequel (Section 3.2) that this single-direction approximation at a single point " + }, + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 354, + 541, + 392 + ], + "type": "text", + "content": " still yields a useful estimate of metasmoothness (e.g., one that correlates with metagradients utility)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 410, + 336, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 410, + 336, + 424 + ], + "spans": [ + { + "bbox": [ + 68, + 410, + 336, + 424 + ], + "type": "text", + "content": "3.2 Estimating and improving metasmoothness" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 430, + 541, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 430, + 541, + 501 + ], + "spans": [ + { + "bbox": [ + 67, + 430, + 541, + 501 + ], + "type": "text", + "content": "Having established a method for quantifying metasmoothness, we turn to the practical question: how can we design learning algorithms that are amenable to metagradient optimization? To answer this question, we introduce a straightforward framework: given a learning algorithm, explore a fixed menu of possible modifications to the training setup, and choose the combination that maximizes empirical metasmoothness. In practice, we find that this framework allows us to slightly modify learning algorithms in a way that makes them amenable to first-order methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "spans": [ + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "text", + "content": "As a case study, we study the task of training ResNet-9 on the CIFAR-10 dataset [Kri09]. We let the metaparameters " + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "text", + "content": " be a perturbation to the pixels of 1000 random training images (so " + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^{1000 \\times 32 \\times 32 \\times 3}" + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "text", + "content": "). We estimate the empirical metasmoothness of different learning algorithms " + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{z} = \\mathbf{0}" + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "text", + "content": " using Definition 2. Concretely, we proceed as follows for each learning algorithm " + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 502, + 541, + 550 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 80, + 553, + 408, + 603 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 80, + 553, + 408, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 553, + 408, + 567 + ], + "spans": [ + { + "bbox": [ + 80, + 553, + 408, + 567 + ], + "type": "text", + "content": "1. Let " + }, + { + "bbox": [ + 80, + 553, + 408, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0 = \\mathbf{0}" + }, + { + "bbox": [ + 80, + 553, + 408, + 567 + ], + "type": "text", + "content": " be the metaparameter corresponding to the original dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 80, + 571, + 320, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 571, + 320, + 585 + ], + "spans": [ + { + "bbox": [ + 80, + 571, + 320, + 585 + ], + "type": "text", + "content": "2. Sample a random perturbation vector " + }, + { + "bbox": [ + 80, + 571, + 320, + 585 + ], + "type": "inline_equation", + "content": "\\mathbf{v} \\sim \\mathcal{N}(0,1)" + }, + { + "bbox": [ + 80, + 571, + 320, + 585 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 80, + 590, + 310, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 590, + 310, + 603 + ], + "spans": [ + { + "bbox": [ + 80, + 590, + 310, + 603 + ], + "type": "text", + "content": "3. Compute the empirical metasmoothness (6), i.e.," + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 98, + 608, + 541, + 647 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "spans": [ + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "text", + "content": "(a) Let " + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "inline_equation", + "content": "\\theta_0\\coloneqq \\mathcal{A}(\\mathbf{z}_0)" + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "inline_equation", + "content": "\\theta_h\\coloneqq \\mathcal{A}(\\mathbf{z}_0 + h\\cdot \\mathbf{v})" + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "inline_equation", + "content": "\\theta_{2h}\\coloneqq \\mathcal{A}(\\mathbf{z}_0 + 2h\\cdot \\mathbf{v})" + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "text", + "content": " be the model parameters that result from training with training dataset perturbations " + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0,\\mathbf{z}_0 + h\\mathbf{v}" + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0 + 2h\\mathbf{v}" + }, + { + "bbox": [ + 98, + 608, + 541, + 633 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 98, + 634, + 288, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 634, + 288, + 647 + ], + "spans": [ + { + "bbox": [ + 98, + 634, + 288, + 647 + ], + "type": "text", + "content": "(b) Compute the approximate derivatives" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 196, + 651, + 459, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 651, + 459, + 666 + ], + "spans": [ + { + "bbox": [ + 196, + 651, + 459, + 666 + ], + "type": "interline_equation", + "content": "\\Delta_ {\\mathcal {A}} (\\mathbf {z} _ {0}; \\mathbf {v}) = \\left(\\theta_ {h} - \\theta_ {0}\\right) / h, \\quad \\Delta_ {\\mathcal {A}} (\\mathbf {z} _ {0} + h \\mathbf {v}; \\mathbf {v}) = \\left(\\theta_ {2 h} - \\theta_ {h}\\right) / h.", + "image_path": "e7446e43ec48b02a63f4846fa903ecdbc189ae9e1d3baeea28d3ab4349f67b7b.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 99, + 670, + 541, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 670, + 541, + 684 + ], + "spans": [ + { + "bbox": [ + 99, + 670, + 541, + 684 + ], + "type": "text", + "content": "(c) Compute the weighting vector " + }, + { + "bbox": [ + 99, + 670, + 541, + 684 + ], + "type": "inline_equation", + "content": "\\mathbf{d} = |\\theta_{2h} - \\theta_0|" + }, + { + "bbox": [ + 99, + 670, + 541, + 684 + ], + "type": "text", + "content": ", and compute the average metasmoothness (6), i.e.," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 173, + 687, + 481, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 687, + 481, + 714 + ], + "spans": [ + { + "bbox": [ + 173, + 687, + 481, + 714 + ], + "type": "interline_equation", + "content": "\\widehat {S} _ {h, \\mathbf {v}} (\\mathcal {A}; z _ {0}) = \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} \\left(\\mathbf {z} _ {0} + h \\mathbf {v}; \\mathbf {v}\\right)\\right) ^ {\\top} \\cdot \\operatorname {d i a g} \\left(\\frac {\\mathbf {d}}{\\| \\mathbf {d} \\| _ {1}}\\right) \\cdot \\operatorname {s i g n} \\left(\\Delta_ {\\mathcal {A}} \\left(\\mathbf {z} _ {0}; \\mathbf {v}\\right)\\right).", + "image_path": "456f1b12f52ed44c1047241fa414546f77f22a232c79c5193cb9f8a81eb78364.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 93, + 71, + 299, + 224 + ], + "blocks": [ + { + "bbox": [ + 93, + 71, + 299, + 224 + ], + "lines": [ + { + "bbox": [ + 93, + 71, + 299, + 224 + ], + "spans": [ + { + "bbox": [ + 93, + 71, + 299, + 224 + ], + "type": "image", + "image_path": "95cf8c6f67f7a1ff5f5908ae786adfca0bdc4e43fa7de5fa95c29ea9182013b3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 307, + 72, + 512, + 223 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 512, + 223 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 512, + 223 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 512, + 223 + ], + "type": "image", + "image_path": "8ef162573b94d345eed4da85496a669bb5b67c2e40af2ad7caf94d5e5efc2d81.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 93, + 230, + 299, + 380 + ], + "blocks": [ + { + "bbox": [ + 93, + 230, + 299, + 380 + ], + "lines": [ + { + "bbox": [ + 93, + 230, + 299, + 380 + ], + "spans": [ + { + "bbox": [ + 93, + 230, + 299, + 380 + ], + "type": "image", + "image_path": "65efa3e7039ec2bf0c17df31db0fea551abb1e8ebec13cd70da4376df689dec2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "lines": [ + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "text", + "content": "Figure 5: The effect of metasmoothness on the optimization landscape. Each plot above visualizes the loss landscape of a (deterministic) learning algorithm " + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "text", + "content": ", with the " + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "text", + "content": "- and " + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "text", + "content": "-axes representing additive perturbations to 1000 examples in the training set and the " + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 67, + 391, + 541, + 464 + ], + "type": "text", + "content": "-axis representing the resulting model's loss on the test example given in the title. In each row, the left plot is a non-smooth algorithm, and the right plot is a smooth algorithm (as per Definition 2) evaluated on the same example. Overall, empirical metasmoothness seems to strongly correlate with qualitative landscape smoothness. See Figure 12 for more examples." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 230, + 513, + 380 + ], + "blocks": [ + { + "bbox": [ + 306, + 230, + 513, + 380 + ], + "lines": [ + { + "bbox": [ + 306, + 230, + 513, + 380 + ], + "spans": [ + { + "bbox": [ + 306, + 230, + 513, + 380 + ], + "type": "image", + "image_path": "b822311a6fc4b5b0c4d7fcafd79dd0e3e2f03c4bdeeb8e5787a92fec3738137b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 484, + 541, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 484, + 541, + 579 + ], + "spans": [ + { + "bbox": [ + 67, + 484, + 541, + 579 + ], + "type": "text", + "content": "Metasmooth learning algorithms. We apply the procedure above to estimate the metasmoothness of learning algorithms induced by different design choices (batch size, network width, BatchNorm placement, gradient scaling), and report the results in Figure 4 (left). On one hand, \"standard\" learning algorithms (i.e., those designed without metasmoothness in mind) are not metasmooth. On the other hand, our investigation reveals central factors driving metasmoothness. In addition to \"standard\" hyperparameters such as batch size and network width playing a role, we find that placing Batch Normalization layers prior to nonlinearities (instead of after) and scaling the final layer output are both crucial to metasmoothness. Note that the modifications we consider above are not exhaustive—see Appendix E for the full training setup." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 580, + 541, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 580, + 541, + 616 + ], + "spans": [ + { + "bbox": [ + 68, + 580, + 541, + 616 + ], + "type": "text", + "content": "Finally, in Figure 5, we plot the optimization landscape of both metasmooth (right) and non-metasmooth (left) models. We find that the landscapes of metasmooth models are much smoother and—qualitatively—more straightforward to optimize." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 631, + 541, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 631, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 631, + 541, + 715 + ], + "type": "text", + "content": "Metasmoothness/performance tradeoffs? Figure 4 (left) relates metasmoothness to model accuracy for the considered learning algorithms. While there is no clear trend, the top-performing learning algorithms are not always metasmooth. However, the trade-off is not too severe: the most metasmooth algorithms still achieve near-optimal accuracy. Furthermore, it is possible that with additional searching we could identify even more accurate metasmooth models. Taken together with our previous experiment, our results suggest that jointly searching over metasmoothness and model accuracy is a general recipe for designing learning algorithms that are both performant and metasmooth. Finally, as we discuss in Section 5, a fruitful avenue" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 97 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 97 + ], + "type": "text", + "content": "for future work may be to design metasmooth learning algorithms directly, i.e., without relying on stability heuristics or grid search." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 111, + 541, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 111, + 541, + 220 + ], + "spans": [ + { + "bbox": [ + 67, + 111, + 541, + 220 + ], + "type": "text", + "content": "Does metasmoothness aid downstream optimization? Recall that our motivation for studying metasmoothness is to develop learning algorithms that we can optimize the metaparameters of via metagradients (using first-order methods). We started with the notion of " + }, + { + "bbox": [ + 67, + 111, + 541, + 220 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 111, + 541, + 220 + ], + "type": "text", + "content": "-smoothness from optimization theory, and we adapted it to the setting of metagradients by making a series of approximations and modifications. The final question we address is: does our final notion of metasmoothness actually predict the utility of metagradients for optimization? Figure 4 (right) demonstrates that metasmoothness strongly predicts our ability to optimize the metaparameters of a given learning algorithm. We use metagradients (computed by REPLAY) to gradient ascend on validation loss with respect to the metaparameters " + }, + { + "bbox": [ + 67, + 111, + 541, + 220 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 111, + 541, + 220 + ], + "type": "text", + "content": ", and measure the change in model loss." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 238, + 179, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 238, + 179, + 255 + ], + "spans": [ + { + "bbox": [ + 69, + 238, + 179, + 255 + ], + "type": "text", + "content": "4 Applications" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 263, + 541, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 263, + 541, + 350 + ], + "spans": [ + { + "bbox": [ + 67, + 263, + 541, + 350 + ], + "type": "text", + "content": "In this section, apply metagradients to three problems in machine learning: selecting training data, poisoning training data, and searching for hyperparameters. In each setting we follow the same recipe: we frame the task as an optimization problem, modify the learning algorithm of interest to be smooth, then solve by first-order optimizing with meta-gradients—which we refer to, in a catch-all manner across algorithms, as metagradient descent (MGD). In particular: we substantially improve on existing dataset selection methods (Section 4.1, Section 4.2), perform the first effective accuracy-degrading data poisoning attack (Section 4.3), and discover one-cycle learning rate schedules with MGD (Section 4.4)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 363, + 291, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 363, + 291, + 378 + ], + "spans": [ + { + "bbox": [ + 67, + 363, + 291, + 378 + ], + "type": "text", + "content": "4.1 Selecting multimodal training data" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 384, + 541, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 541, + 469 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 541, + 469 + ], + "type": "text", + "content": "Curating a training dataset from a mass of unfiltered data is a necessary and influential step in any large-scale machine learning pipeline. Deciding how to curate such a dataset is a challenging problem that has attracted substantial recent interest [FiW+22; ATS+23; EFM24; GIF+24]. In this section, we frame pre-training data selection as an optimization problem, and then solve this problem by first-order optimizing with metagradient. Applying our method to the DataComp-small benchmark [GIF+24], we greatly improve on the state-of-the-art (our improvement over state-of-the-art is roughly the same as the improvement of state-ofthe-art over training on random data)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 483, + 129, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 483, + 129, + 496 + ], + "spans": [ + { + "bbox": [ + 69, + 483, + 129, + 496 + ], + "type": "text", + "content": "4.1.1 Setup" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 501, + 541, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 501, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 501, + 541, + 552 + ], + "type": "text", + "content": "The goal of dataset selection is to choose a training data subset (out of a broad pool of data) that maximizes trained machine learning model performance. Given this goal, dataset selection has a natural interpretation as a combinatorial metaparameter optimization problem. In particular, in the language of Section 2.1, for a training set of size " + }, + { + "bbox": [ + 67, + 501, + 541, + 552 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 501, + 541, + 552 + ], + "type": "text", + "content": ", let" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 556, + 539, + 633 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 77, + 556, + 539, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 556, + 539, + 582 + ], + "spans": [ + { + "bbox": [ + 77, + 556, + 539, + 582 + ], + "type": "text", + "content": "(a) the metaparameters " + }, + { + "bbox": [ + 77, + 556, + 539, + 582 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathcal{C} \\coloneqq \\mathbb{Z}_{\\geq 0}^{n}" + }, + { + "bbox": [ + 77, + 556, + 539, + 582 + ], + "type": "text", + "content": " be non-negative data counts representing the number of times each training sample repeats in the training data;" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "spans": [ + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "text", + "content": "(b) the algorithm " + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "text", + "content": " be a standard large-scale learning procedure, which runs on a training set comprising " + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "text", + "content": " copies of each sample " + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "inline_equation", + "content": "i\\in [n]" + }, + { + "bbox": [ + 76, + 589, + 539, + 613 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 77, + 621, + 449, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 621, + 449, + 633 + ], + "spans": [ + { + "bbox": [ + 77, + 621, + 449, + 633 + ], + "type": "text", + "content": "(c) the output function " + }, + { + "bbox": [ + 77, + 621, + 449, + 633 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 77, + 621, + 449, + 633 + ], + "type": "text", + "content": " be the loss of the trained model on a target distribution " + }, + { + "bbox": [ + 77, + 621, + 449, + 633 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 77, + 621, + 449, + 633 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 640, + 500, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 640, + 500, + 654 + ], + "spans": [ + { + "bbox": [ + 69, + 640, + 500, + 654 + ], + "type": "text", + "content": "Then, defining " + }, + { + "bbox": [ + 69, + 640, + 500, + 654 + ], + "type": "inline_equation", + "content": "f(\\mathbf{c}) \\coloneqq \\phi(\\mathcal{A}(\\mathbf{c}))" + }, + { + "bbox": [ + 69, + 640, + 500, + 654 + ], + "type": "text", + "content": " (as in Section 2.1), our goal is to find the data counts " + }, + { + "bbox": [ + 69, + 640, + 500, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{c}^*" + }, + { + "bbox": [ + 69, + 640, + 500, + 654 + ], + "type": "text", + "content": " that solve" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 261, + 662, + 541, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 662, + 541, + 685 + ], + "spans": [ + { + "bbox": [ + 261, + 662, + 541, + 685 + ], + "type": "interline_equation", + "content": "\\mathbf {c} ^ {*} := \\underset {\\mathbf {c} \\in \\mathcal {C}} {\\arg \\min } f (\\mathbf {c}). \\tag {7}", + "image_path": "a44549b712996dcbf7a6e5d33eb30b9b8a67f96ff55c88e1ef64e805c6a5238e.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 255, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 255, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 255, + 85 + ], + "type": "text", + "content": "4.1.2 Gradient descent on training data" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 91, + 541, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 91, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 68, + 91, + 541, + 140 + ], + "type": "text", + "content": "Metagradients let us directly minimize the target task loss (7) with respect to the choice of training data. At a high level, our algorithm operates as follows: we start with a randomly chosen set of training data, then iteratively update the dataset selection using metagradients with respect to importance weights placed on each training datapoint. The specifics of our method are in Algorithm 1; we describe its core ideas below." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "spans": [ + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": "Idea 1: A surrogate algorithm. We cannot use metagradients to optimize (7) directly, because the metaparameters of interest " + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": " are discrete counts (and so the algorithm " + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": " is non-differentiable with respect to " + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": "). To circumvent this problem, we relax " + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": ": we define a surrogate algorithm " + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathbf{c}}^{\\prime}" + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": " that takes in a continuous metaparameter " + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^{n}" + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": ", whose metagradient we can compute, then optimize using the metagradient on " + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathbf{c}}^{\\prime}" + }, + { + "bbox": [ + 68, + 154, + 541, + 202 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "spans": [ + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": "This surrogate learning algorithm " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathrm{c}}^{\\prime}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " maps a metaparameter " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^{n}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " (representing a perturbation to training data weights) to a machine learning model. The surrogate is defined by a set of counts " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathbb{Z}_{+}^{n}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": ", and a hyperparameter " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " denoting a specific training iteration, both of which we bake into the surrogate algorithm itself. Given a metaparameter " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^{n}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": ", the algorithm " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathrm{c}}^{\\prime}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " trains a model \"as usual\" using the fixed counts " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": ". That is, it makes " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " copies of each training sample " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": ", shuffles and partitions the data into batches, and then at each iteration minimizes the batch loss with a step—just as the original learning algorithm " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": ". At iteration " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": ", however, in addition to the original loss on the " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": "-th batch, the algorithm upweights each training sample " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " according to the metaparameter " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "z_{i}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": ". In other words, the objective at iteration " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " of the surrogate algorithm " + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathrm{c}}^{\\prime}" + }, + { + "bbox": [ + 68, + 202, + 541, + 310 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 186, + 310, + 424, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 310, + 424, + 342 + ], + "spans": [ + { + "bbox": [ + 186, + 310, + 424, + 342 + ], + "type": "interline_equation", + "content": "\\ell_ {t} ^ {\\prime} (\\theta) := \\left\\{ \\begin{array}{l l} \\sum_ {x \\in t ^ {\\text {t h}} \\text {b a t c h}} \\ell (x; \\theta) & \\text {i f} t \\neq k \\\\ \\sum_ {x \\in t ^ {\\text {t h}} \\text {b a t c h}} \\ell (x; \\theta) + \\sum_ {i = 1} ^ {n} z _ {i} \\ell (x _ {i}; \\theta) & \\text {i f} t = k \\end{array} \\right.", + "image_path": "51721ea96c8faaf6536328d7c77afa7a0d6214fa87ac1be8d44a523335321046.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 346, + 277, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 346, + 277, + 358 + ], + "spans": [ + { + "bbox": [ + 69, + 346, + 277, + 358 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 69, + 346, + 277, + 358 + ], + "type": "inline_equation", + "content": "\\ell (x;\\theta)" + }, + { + "bbox": [ + 69, + 346, + 277, + 358 + ], + "type": "text", + "content": " is the training loss on example " + }, + { + "bbox": [ + 69, + 346, + 277, + 358 + ], + "type": "inline_equation", + "content": "x" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": "Observe that when " + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "inline_equation", + "content": "\\mathbf{z} = \\mathbf{0}_n" + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": ", the algorithm " + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathbf{c}}'" + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": " is identical to the standard learning algorithm " + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": ". And while " + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": " was a function of (nondifferentiable) discrete data counts " + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathbf{c}}'" + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": " is differentiable with respect to its input " + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 68, + 358, + 541, + 395 + ], + "type": "text", + "content": ", and so we can compute the metagradient" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 251, + 403, + 358, + 420 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 403, + 358, + 420 + ], + "spans": [ + { + "bbox": [ + 251, + 403, + 358, + 420 + ], + "type": "interline_equation", + "content": "\\mathbf {g} := \\nabla_ {\\mathbf {z}} \\phi \\big (\\mathcal {A} _ {\\mathbf {c}} ^ {\\prime} (\\mathbf {z}) \\big) \\big | _ {\\mathbf {z} = \\mathbf {0} _ {n}}.", + "image_path": "b9cd9d037ae04f90c0fa195e0fdf750f69e4a1769765dee90905597cb4f0828f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "spans": [ + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": "Intuitively, the entries of the metagradient " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "\\mathbf{g}" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": " capture the effect of adding an infinitesimal amount of each training sample " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": " to the training data at iteration " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": ". A positive entry " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "g_{i}" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": " indicates that adding an infinitesimal amount of sample " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": " to the training data would increase the loss, and a negative entry indicates that adding an infinitesimal amount of sample " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": " to the training data would decrease the loss; the slot at " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": " represents the (estimated) effect of adding a copy of sample " + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 427, + 541, + 489 + ], + "type": "text", + "content": " to the training data at every batch containing the sample." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 502, + 541, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 502, + 541, + 527 + ], + "spans": [ + { + "bbox": [ + 68, + 502, + 541, + 527 + ], + "type": "text", + "content": "Idea 2: Block coordinate descent. We then use the metagradient " + }, + { + "bbox": [ + 68, + 502, + 541, + 527 + ], + "type": "inline_equation", + "content": "\\mathbf{g}" + }, + { + "bbox": [ + 68, + 502, + 541, + 527 + ], + "type": "text", + "content": " to iteratively update our selected dataset. We update data counts as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 209, + 536, + 541, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 536, + 541, + 550 + ], + "spans": [ + { + "bbox": [ + 209, + 536, + 541, + 550 + ], + "type": "interline_equation", + "content": "\\mathbf {c} \\leftarrow \\mathbf {c} - \\operatorname {s i g n} (\\mathbf {g}) \\odot \\mathbf {m}, \\quad \\mathbf {m} \\sim \\text {B e r n o u l l i} (p), \\tag {8}", + "image_path": "6a46b8d6a21defcc70331aed694730c6feb8745fa2314f3686b50930a1b8c3ab.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 558, + 541, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 558, + 541, + 606 + ], + "spans": [ + { + "bbox": [ + 68, + 558, + 541, + 606 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 558, + 541, + 606 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 68, + 558, + 541, + 606 + ], + "type": "text", + "content": " is a hyperparameter controlling the fraction of sample counts to update. This algorithm resembles a block coordinate descent algorithm [OR00], with the main difference being that we take signed gradient steps with step size 1 (projected onto non-negative integers) to ensure that the counts remain well-defined. As a result, " + }, + { + "bbox": [ + 68, + 558, + 541, + 606 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 68, + 558, + 541, + 606 + ], + "type": "text", + "content": " implicitly controls the algorithm's step size." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 606, + 541, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 606, + 541, + 643 + ], + "spans": [ + { + "bbox": [ + 68, + 606, + 541, + 643 + ], + "type": "text", + "content": "Applying (8) concludes a single optimization step. By repeating this process of estimating the metagradient, updating our counts vector, then constructing a new training dataset, we iteratively improve the selected data. Pseudocode for our algorithm can be found in Algorithm 1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 657, + 136, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 657, + 136, + 668 + ], + "spans": [ + { + "bbox": [ + 69, + 657, + 136, + 668 + ], + "type": "text", + "content": "4.1.3 Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 676, + 541, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 676, + 541, + 712 + ], + "spans": [ + { + "bbox": [ + 68, + 676, + 541, + 712 + ], + "type": "text", + "content": "We evaluate our data selection algorithm using DataComp [GIF+24], a standardized framework for evaluating data selection methods for multimodal models. Algorithm 1 greatly improves on the state-of-the-art for the benchmark. Below, we describe the setting, outline our method, and conclude with our results." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 73, + 89, + 405, + 198 + ], + "blocks": [ + { + "bbox": [ + 77, + 75, + 401, + 87 + ], + "lines": [ + { + "bbox": [ + 77, + 75, + 401, + 87 + ], + "spans": [ + { + "bbox": [ + 77, + 75, + 401, + 87 + ], + "type": "text", + "content": "Algorithm 1: Dataset selection using using metagradient descent (MGD)." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "lines": [ + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "spans": [ + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": "Input: initial data counts " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "\\mathbf{c}\\in \\mathbb{Z}_{\\geq 0}^{n}" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " , learning algorithm " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " output function Hyperparameters: step size " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " # opt steps " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " iteration number " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "t\\gets 1" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " do \n2 " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "\\mathbf{z}\\gets \\mathbf{0}_n / /" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " Build input to surrogate \n3 " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "\\mathbf{g}\\leftarrow \\frac{\\partial\\phi(\\mathcal{A}_c'(\\mathbf{z}))}{\\partial\\mathbf{z}} / /" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " Calculate metagradient using REPLAY \n4 m<- sample from Bernoulli(p) // Sample indices to step on \n5 c<- c-sign(g) " + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 73, + 89, + 405, + 198 + ], + "type": "text", + "content": " m// Take optimization step \n6 Return c// Return final data counts" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 67, + 223, + 541, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 223, + 541, + 319 + ], + "spans": [ + { + "bbox": [ + 67, + 223, + 541, + 319 + ], + "type": "text", + "content": "Setting. DataComp [GIF+24] is a multimodal model training competition and benchmark for evaluating dataset selection methods. DataComp provides a fixed learning algorithm chosen in advance by the organizers and a large fixed candidate pool of internet data. The goal is to choose a subset of the candidate pool—possibly with repeated datapoints—that yields the best-performing model after training with the given learning algorithm, as measured by a predetermined set of 38 benchmarks. Given a submission subset, the mean score on the evaluation datasets for a model trained with that subset is taken as the final \"score.\" DataComp offers four separate \"scales\" requiring different amounts of compute; we focus on the small scale in this paper due to compute limitations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 334, + 541, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 334, + 541, + 419 + ], + "spans": [ + { + "bbox": [ + 67, + 334, + 541, + 419 + ], + "type": "text", + "content": "Method. We select data with MGD (Algorithm 1) to minimize loss on data on a \"target set\" that is distributionally similar to the DataComp benchmark tasks, and select hyperparameters with a held-out \"validation set.\" In particular, we construct target and validation sets by taking samples from the DataComp evaluation tasks with extra samples available beyond those used in the DataComp test set (e.g., ImageNet, one of the tasks in DataComp, has a training set in addition to the test set evaluated in DataComp). See Appendix C for the exact details of the target and validation sets, the precise hyperparameters used with Algorithm 1, and a discussion on scalability (including further engineering details on executing our algorithm efficiently)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 433, + 541, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 433, + 541, + 482 + ], + "spans": [ + { + "bbox": [ + 67, + 433, + 541, + 482 + ], + "type": "text", + "content": "Results. MGD greatly outperforms the current state-of-the-art: the difference in accuracy between MGD and the current best method is roughly as large as the difference between the previous state-of-the-art (EcoDatum [Eco24]) and training on randomly chosen data (cf. Figure 6). Inspecting scores over the course of the optimization in Figure 6, we find that only a few steps are necessary to outperform previous methods." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 78, + 491, + 291, + 613 + ], + "blocks": [ + { + "bbox": [ + 78, + 491, + 291, + 613 + ], + "lines": [ + { + "bbox": [ + 78, + 491, + 291, + 613 + ], + "spans": [ + { + "bbox": [ + 78, + 491, + 291, + 613 + ], + "type": "image", + "image_path": "b4f07dfbec991e71985782d6905bc8288383a71ad6a7e6b4aaf74a949165576e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 624, + 541, + 674 + ], + "lines": [ + { + "bbox": [ + 67, + 624, + 541, + 674 + ], + "spans": [ + { + "bbox": [ + 67, + 624, + 541, + 674 + ], + "type": "text", + "content": "Figure 6: MGD dataset selection greatly outperforms existing methods (improving over the previous SOTA by as much as the previous SOTA improves over no filtering at all). We compare DataComp scores for MGD (over optimization steps), training on the entire candidate pool, the best baseline originally proposed by DataComp, and the previous SOTA [Eco24]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 303, + 492, + 524, + 566 + ], + "blocks": [ + { + "bbox": [ + 303, + 492, + 524, + 566 + ], + "lines": [ + { + "bbox": [ + 303, + 492, + 524, + 566 + ], + "spans": [ + { + "bbox": [ + 303, + 492, + 524, + 566 + ], + "type": "table", + "html": "
MethodScoreΔ
- - - Baseline: No filtering0.13-
- - - Best baseline from [GIF+24]0.17+0.04
- - - Previous SOTA [Eco24]0.18+0.05
- - - MGD-DS (ours)0.22+0.09
", + "image_path": "f7cdf24f42ff7038688bd05478168802f6a6fa09213f5ffad00db62ef65980fa.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 86, + 72, + 294, + 185 + ], + "blocks": [ + { + "bbox": [ + 86, + 72, + 294, + 185 + ], + "lines": [ + { + "bbox": [ + 86, + 72, + 294, + 185 + ], + "spans": [ + { + "bbox": [ + 86, + 72, + 294, + 185 + ], + "type": "image", + "image_path": "c5d7306e1433a0b6c6b42b22995ae8e6a7ebb4b28513e507342f46c61081428d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 194, + 541, + 232 + ], + "lines": [ + { + "bbox": [ + 67, + 194, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 194, + 541, + 232 + ], + "type": "text", + "content": "Figure 7: MGD dataset selection outperforms baselines. Comparing to training on all the data: it achieves over double the margin of improvement of LESS on MMLU, and improves by " + }, + { + "bbox": [ + 67, + 194, + 541, + 232 + ], + "type": "inline_equation", + "content": "+1.5\\%" + }, + { + "bbox": [ + 67, + 194, + 541, + 232 + ], + "type": "text", + "content": " on BBH (where LESS does not improve at all). The " + }, + { + "bbox": [ + 67, + 194, + 541, + 232 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 67, + 194, + 541, + 232 + ], + "type": "text", + "content": " column denotes improvement over not filtering." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 302, + 72, + 539, + 155 + ], + "blocks": [ + { + "bbox": [ + 302, + 72, + 539, + 155 + ], + "lines": [ + { + "bbox": [ + 302, + 72, + 539, + 155 + ], + "spans": [ + { + "bbox": [ + 302, + 72, + 539, + 155 + ], + "type": "table", + "html": "
BBH [SSS+22]MMLU [HBB+20]
Acc.ΔAcc.Δ
All Data35.2%-41.2%-
■ LESS35.2%-0.0%41.8%+0.5%
■ MGD-DS36.7%+1.5%42.5%+1.3%
", + "image_path": "93a1935980d353a47f17bc2136d959a36b3a20b7b844279a9d48017c1ff7548b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 250, + 280, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 250, + 280, + 264 + ], + "spans": [ + { + "bbox": [ + 68, + 250, + 280, + 264 + ], + "type": "text", + "content": "4.2 Selecting instruction-tuning data" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 270, + 545, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 270, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 67, + 270, + 545, + 330 + ], + "type": "text", + "content": "In our second application, we select training data for instruction fine-tuning (IFT) using the same MGD-based method detailed in Algorithm 1 of Section 4.1. As with multimodal data, training on the \"right\" post-training data (such as the \"right\" IFT data) can greatly impact deployment-time model performance [LFX+24; DJP+24; TGZ+23]. MGD improves over baselines at choosing IFT data for MMLU [HBK+21], a general knowledge task, and BBH [SSS+22], a reasoning/chain-of-thought task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 330, + 542, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 330, + 542, + 354 + ], + "spans": [ + { + "bbox": [ + 68, + 330, + 542, + 354 + ], + "type": "text", + "content": "To overview this section: we start by detailing the setting, then describe the specifics of our MGD instantiation before concluding with results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 369, + 541, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 369, + 541, + 453 + ], + "spans": [ + { + "bbox": [ + 67, + 369, + 541, + 453 + ], + "type": "text", + "content": "Setting. We adopt the setting of LESS [XMG+24]. Here, the goal is to select a training data subset from four combined IFT datasets (Flan V2 [LHV+23], CoT [WWS+22], DOLLY [CHM+23], and Open Assistant 1 [KKR+24]) to maximize accuracy on a given target task. We consider two target tasks from LESS: MMLU (which comprises multiple choice questions spanning a variety of disciplines) and BBH (a 23 task subset of BIG-Bench [SRR+22]). In this setup, the data selector can access samples from each task built from the in-context learning prompts. Following Xia et al. [XMG+24], we fine-tune a 128-width LoRA [HY20] (in our work, on Gemma-2B [TMH+24]). See Appendix D for full details on the tasks and learning algorithm." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 468, + 541, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 468, + 541, + 517 + ], + "spans": [ + { + "bbox": [ + 67, + 468, + 541, + 517 + ], + "type": "text", + "content": "Method. We split up the available task samples into two sets—a \"target\" set and a \"validation\" set—then select data with MGD (via Algorithm 1) by minimizing causal language modeling loss on the \"target\" set of samples. We select hyperparameters like step size and number of SGD iterations with the validation set; see Appendix D for more details." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 530, + 541, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 530, + 541, + 601 + ], + "spans": [ + { + "bbox": [ + 67, + 530, + 541, + 601 + ], + "type": "text", + "content": "Results. Comparing with two baselines—training on all the data and training with data selected with LESS [XMG+24]—MGD yields strictly better training dataset selections for each target task (cf. Figure 7). MGD improves most on BBH, a reasoning task, compared to the best baseline " + }, + { + "bbox": [ + 67, + 530, + 541, + 601 + ], + "type": "inline_equation", + "content": "(+1.5\\%)" + }, + { + "bbox": [ + 67, + 530, + 541, + 601 + ], + "type": "text", + "content": " accuracy). On MMLU, a knowledge-based task, we outperform baselines by slightly less compared to the best baseline " + }, + { + "bbox": [ + 67, + 530, + 541, + 601 + ], + "type": "inline_equation", + "content": "(+0.8\\%)" + }, + { + "bbox": [ + 67, + 530, + 541, + 601 + ], + "type": "text", + "content": "; one explanation is that selecting IFT data lends more control over reasoning than over intrinsic knowledge available in the LM." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 603, + 541, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 603, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 67, + 603, + 541, + 662 + ], + "type": "text", + "content": "Beyond raw accuracy, we inspect losses across each step of the optimization process. Overall, our method improves validation loss over MGD steps (cf. Appendix Figures 13), but also exhibits signs of overfitting. Given intuition from overparameterized learning, we might expect this behavior: we optimize a total of 270,679 \"weights\"—each corresponding to a count for a datapoint—to minimize loss on only a handful of test samples (cf. Table 3)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 678, + 340, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 678, + 340, + 693 + ], + "spans": [ + { + "bbox": [ + 68, + 678, + 340, + 693 + ], + "type": "text", + "content": "4.3 Accuracy-degrading (Huber) data poisoning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "content": "The goal of an accuracy-degrading data poisoning attack is to degrade the performance of a machine learning model by corrupting a small fraction of its training data. Here, the considered threat model is as follows." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": "The attacker is given a training set " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "\\mathbf{X} = \\{x_{1},\\dots,x_{n}\\}" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": " drawn from a distribution " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": ", and a function " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "\\theta (\\cdot)" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": " mapping training data to model parameters (representing the learning algorithm used by the victim). The attacker's goal is to return a new training set " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "\\mathbf{X}'" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": " that differs from " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": " in at most " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "\\varepsilon \\cdot n" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": " datapoints while inducing model parameters " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "\\theta (\\mathbf{X}')" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": " that perform as poorly as possible on a freshly drawn test set " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 68, + 72, + 539, + 119 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 120, + 414, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 120, + 414, + 133 + ], + "spans": [ + { + "bbox": [ + 83, + 120, + 414, + 133 + ], + "type": "text", + "content": "Formally, the adversary aims to solve the following optimization problem:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 239, + 140, + 541, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 140, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 239, + 140, + 541, + 163 + ], + "type": "interline_equation", + "content": "\\arg \\max _ {\\tilde {x} _ {1}, \\dots , \\tilde {x} _ {n _ {p}}} \\mathbb {E} _ {x \\sim P} [ \\ell (x; \\theta (\\mathbf {X} ^ {\\prime})) ], \\tag {9}", + "image_path": "0de69f09eb0ea62160d63ffd2008ef1c27d809b2adc1eb7b7e9d86299c20f1de.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "spans": [ + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{\\prime} = \\{\\widetilde{x}_1,\\dots ,\\widetilde{x}_{n_p},x_{n_p + 1},\\dots ,x_n\\}" + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "inline_equation", + "content": "n_p = \\lfloor \\varepsilon n\\rfloor" + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "text", + "content": " . Note that our goal is to degrade the overall model performance on a test set " + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{test}" + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "text", + "content": " drawn from " + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "text", + "content": " (in particular, the test set " + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{test}" + }, + { + "bbox": [ + 67, + 172, + 541, + 233 + ], + "type": "text", + "content": " is unknown to the adversary). In this way, this setting resembles the Huber contamination model in statistics [Hub64], and is strictly more challenging than the usual data poisoning settings in deep learning (e.g., backdoor attacks [GDG17] or attacks that target specific test examples [KL17])." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 233, + 541, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 233, + 541, + 305 + ], + "spans": [ + { + "bbox": [ + 68, + 233, + 541, + 305 + ], + "type": "text", + "content": "For large-scale machine learning models, finding strong adversaries has proven challenging—standard loss-minimizing learning algorithms seem quite robust to maliciously-inserted data [LKY23]. In fact, the first non-trivial accuracy degradation data poisoning attacks on deep models were pioneered by Lu et al. [LKY22] and later improved upon by the same set of authors [LKY23]. Broadly speaking, even constructing attacks that degrade the overall performance of a learning algorithm by more than the adversarial budget " + }, + { + "bbox": [ + 68, + 233, + 541, + 305 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 68, + 233, + 541, + 305 + ], + "type": "text", + "content": " has proven challenging." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 319, + 129, + 332 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 319, + 129, + 332 + ], + "spans": [ + { + "bbox": [ + 69, + 319, + 129, + 332 + ], + "type": "text", + "content": "4.3.1 Setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "spans": [ + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "text", + "content": "We observe that (9) is a continuous optimization problem to which we can directly apply our metagradient framework, approximating the expectation over " + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "text", + "content": " by a finite-sample average over a validation set " + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{val}" + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "text", + "content": ". In particular, given a (randomly shuffled) training set " + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "text", + "content": " and validation set " + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{val}" + }, + { + "bbox": [ + 68, + 338, + 541, + 388 + ], + "type": "text", + "content": ", we set up the following metaparameter optimization problem (see Section 2.1):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 394, + 540, + 460 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 77, + 394, + 408, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 394, + 408, + 407 + ], + "spans": [ + { + "bbox": [ + 77, + 394, + 408, + 407 + ], + "type": "text", + "content": "(a) the metaparameter " + }, + { + "bbox": [ + 77, + 394, + 408, + 407 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathcal{X}^{n_p}" + }, + { + "bbox": [ + 77, + 394, + 408, + 407 + ], + "type": "text", + "content": " is a tensor of " + }, + { + "bbox": [ + 77, + 394, + 408, + 407 + ], + "type": "inline_equation", + "content": "n_p = \\lfloor \\varepsilon n \\rfloor" + }, + { + "bbox": [ + 77, + 394, + 408, + 407 + ], + "type": "text", + "content": " poisoned samples;" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "spans": [ + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "content": "(b) the algorithm " + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "content": " maps metaparameters " + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "content": " to a trained model " + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(\\mathbf{z})" + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "content": " by replacing the first " + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "inline_equation", + "content": "n_p" + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "content": " samples in " + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "content": " with the samples in " + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 76, + 415, + 540, + 441 + ], + "type": "text", + "content": " and then training on the resulting dataset;" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 448, + 405, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 448, + 405, + 460 + ], + "spans": [ + { + "bbox": [ + 77, + 448, + 405, + 460 + ], + "type": "text", + "content": "(c) the output function " + }, + { + "bbox": [ + 77, + 448, + 405, + 460 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 77, + 448, + 405, + 460 + ], + "type": "text", + "content": " evaluates average loss on the validation set " + }, + { + "bbox": [ + 77, + 448, + 405, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{val}" + }, + { + "bbox": [ + 77, + 448, + 405, + 460 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 475, + 149, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 475, + 149, + 487 + ], + "spans": [ + { + "bbox": [ + 69, + 475, + 149, + 487 + ], + "type": "text", + "content": "4.3.2 Algorithm" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "content": "To apply our first-order methods to this problem, we start by initializing the poisoned data to be exactly the first " + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "inline_equation", + "content": "n_p" + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "content": " samples in " + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{z}^{(0)} \\coloneqq \\{\\widetilde{x}_i^{(0)} = x_i : i \\in [n_p]\\}" + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "content": ". Then, for " + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "inline_equation", + "content": "t = 1, \\dots, T" + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "content": ", we sample a minibatch " + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{val}^{(t)}" + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{val}" + }, + { + "bbox": [ + 68, + 494, + 541, + 534 + ], + "type": "text", + "content": " and use REPLAY to compute the metagradient" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 228, + 542, + 380, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 542, + 380, + 586 + ], + "spans": [ + { + "bbox": [ + 228, + 542, + 380, + 586 + ], + "type": "interline_equation", + "content": "\\mathbf{g}_{t} = \\frac{d}{d\\mathbf{z}}\\left(\\sum_{x\\in \\mathbf{X}_{val}^{(t)}}\\ell (x;\\mathcal{A}(\\mathbf{z}^{(t - 1)}))\\right),", + "image_path": "47d23c287ed5f746ff659469e2ecd37d7dffa6ecd055b8a1e70ea3974fc87d26.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 594, + 354, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 594, + 354, + 608 + ], + "spans": [ + { + "bbox": [ + 68, + 594, + 354, + 608 + ], + "type": "text", + "content": "and update the poisoned data using (projected) gradient ascent:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 228, + 616, + 380, + 636 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 616, + 380, + 636 + ], + "spans": [ + { + "bbox": [ + 228, + 616, + 380, + 636 + ], + "type": "interline_equation", + "content": "\\mathbf {z} ^ {(t)} = \\Pi_ {\\mathcal {X}} \\left(\\mathbf {z} ^ {(t - 1)} + \\eta \\cdot \\operatorname {s i g n} (\\mathbf {g} _ {t})\\right),", + "image_path": "65a1e9d072408c3c3d25eda4daa1b1e739d7581616f6c83ad9fe1690f7769bee.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "spans": [ + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{X}}" + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "text", + "content": " is the projection operator onto the sample space " + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "text", + "content": ". (For example, when " + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "text", + "content": " is the space of image-label pairs, " + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{X}}" + }, + { + "bbox": [ + 68, + 644, + 541, + 670 + ], + "type": "text", + "content": " clips images' pixel values to [0, 1] and ensures labels are valid probability distributions.)" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 79, + 676, + 492, + 688 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 676, + 492, + 688 + ], + "spans": [ + { + "bbox": [ + 79, + 676, + 492, + 688 + ], + "type": "text", + "content": "In principle, the adversary can also decide which samples to poison, but for simplicity we consider this \"fixed\" case." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 68, + 541, + 130 + ], + "blocks": [ + { + "bbox": [ + 72, + 68, + 541, + 130 + ], + "lines": [ + { + "bbox": [ + 72, + 68, + 541, + 130 + ], + "spans": [ + { + "bbox": [ + 72, + 68, + 541, + 130 + ], + "type": "image", + "image_path": "bee2bdf66cc8b88534900202c77d8a154c55594c42b87de51729a3c9fc1551b5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 138, + 430, + 152 + ], + "lines": [ + { + "bbox": [ + 179, + 138, + 430, + 152 + ], + "spans": [ + { + "bbox": [ + 179, + 138, + 430, + 152 + ], + "type": "text", + "content": "Figure 8: Examples of poisoned images from Section 4.3." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 79, + 162, + 296, + 286 + ], + "blocks": [ + { + "bbox": [ + 79, + 162, + 296, + 286 + ], + "lines": [ + { + "bbox": [ + 79, + 162, + 296, + 286 + ], + "spans": [ + { + "bbox": [ + 79, + 162, + 296, + 286 + ], + "type": "image", + "image_path": "599688cbc30a73e86945ceef1aaa6104947a8bf4fb89679b2b4e04e12dbef15b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 297, + 541, + 360 + ], + "lines": [ + { + "bbox": [ + 67, + 297, + 541, + 360 + ], + "spans": [ + { + "bbox": [ + 67, + 297, + 541, + 360 + ], + "type": "text", + "content": "Figure 9: For each iteration of MGD (" + }, + { + "bbox": [ + 67, + 297, + 541, + 360 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 297, + 541, + 360 + ], + "type": "text", + "content": "-axis), we train a new model from random initialization on a randomly shuffled training set with the current iterate of poisoned data injected. We evaluate the test accuracy (" + }, + { + "bbox": [ + 67, + 297, + 541, + 360 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 297, + 541, + 360 + ], + "type": "text", + "content": "-axis), and use REPLAY to compute the metagradient. MGD outperforms the best known attack [LKY23] by an order of magnitude and (for reference) results in a model that has the same accuracy as a single-layer neural network trained on random image features [CNL11]." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 299, + 161, + 543, + 240 + ], + "blocks": [ + { + "bbox": [ + 299, + 161, + 543, + 240 + ], + "lines": [ + { + "bbox": [ + 299, + 161, + 543, + 240 + ], + "spans": [ + { + "bbox": [ + 299, + 161, + 543, + 240 + ], + "type": "table", + "html": "
ModelAcc.Δ
- - Original model92.0%-
- - GradCancel [LKY23]91.2%-0.80%
- MGD-DP (ours)78.1%-13.9%
1-layer NN (for reference) [CNL11]83.3%-8.7%
", + "image_path": "5f56febcc07b1551f91d149adb7bbceab2088e31bcd8e08cd237fa6bf1e3b1d6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 378, + 151, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 378, + 151, + 389 + ], + "spans": [ + { + "bbox": [ + 69, + 378, + 151, + 389 + ], + "type": "text", + "content": "4.3.3 Evaluation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 396, + 541, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 541, + 444 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 541, + 444 + ], + "type": "text", + "content": "We use the CIFAR-10 dataset which consists of 60,000 total images each labeled as one of 10 classes. We partition the data into 40,000 training examples, 10,000 validation examples, and 10,000 test examples. We consider a simple 12-epoch CIFAR-10 training procedure, which reaches " + }, + { + "bbox": [ + 67, + 396, + 541, + 444 + ], + "type": "inline_equation", + "content": "92.4\\%" + }, + { + "bbox": [ + 67, + 396, + 541, + 444 + ], + "type": "text", + "content": " accuracy on the CIFAR-10 test set when applied to the 40,000 training examples. See Appendix E for training hyperparameters." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 445, + 541, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 445, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 67, + 445, + 541, + 518 + ], + "type": "text", + "content": "As described above, we allow the adversary to modify (in-place) a fixed, " + }, + { + "bbox": [ + 67, + 445, + 541, + 518 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 67, + 445, + 541, + 518 + ], + "type": "text", + "content": "-fraction of the training data (in our case, " + }, + { + "bbox": [ + 67, + 445, + 541, + 518 + ], + "type": "inline_equation", + "content": "2.5\\%" + }, + { + "bbox": [ + 67, + 445, + 541, + 518 + ], + "type": "text", + "content": ") subject to the constraint that the poisoned images still lay in the valid (normalized) image range of [0, 1]. We compare our approach—direct optimization of the data poisoning objective using metagratings—to the state-of-the-art \"Gradient Cancelling\" (GradCancel) method of Lu et al. [LKY23]. In short, GradCancel is a two-step method which first finds a poorly performing model, then finds poisoned data that induces this model as a minimizer of the training loss. We present the full method in Appendix E." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "spans": [ + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "text", + "content": "Results. We find that metagradients enable state-of-the-art data poisoning attacks, degrading accuracy by " + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "text", + "content": ". In particular, when allowed to corrupt 1000 of the 40,000 training samples " + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "inline_equation", + "content": "(2.5\\%)" + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "text", + "content": ", our method reduces test set accuracy to " + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "inline_equation", + "content": "78\\%" + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "text", + "content": " — for reference, the accuracy of a single-layer neural networked trained on the unmodified CIFAR-10 training set is " + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "inline_equation", + "content": "83\\%" + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "text", + "content": ". The strongest existing data poisoning attack, GradCancel, only reduces test set accuracy by less than " + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 67, + 532, + 541, + 605 + ], + "type": "text", + "content": ". In Figure 8, we visualize the poisoned images and labels found by our method. In Figure 9, we visualize the minibatch loss at each step of the optimization process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "text", + "content": "Remark 4 (Poisoning non-smooth learning algorithms). Recall that to apply metagradient descent, we alter the learning algorithm " + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "text", + "content": " to be metasmooth (see Section 3.1). This involves making modifications such as switching out max pooling layers for average pooling layers, moving batch normalization layers before activations, and scaling down the last layer's output by a factor of 10. It is natural to ask: how much does the efficacy of our method depend on this smoothness? After all, in practice the adversary cannot control the learning algorithm. To answer this question, we take the poison samples generated by MGD and insert them into the training set of a corresponding standard (i.e., non-metasmooth) learning algorithm. We find that our method still significantly degrades the performance of the model, from " + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "inline_equation", + "content": "92.8\\%" + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "inline_equation", + "content": "82.6\\%" + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "text", + "content": " (a drop of " + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "inline_equation", + "content": "10.2\\%" + }, + { + "bbox": [ + 67, + 611, + 541, + 707 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 79, + 711, + 540, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 711, + 540, + 723 + ], + "spans": [ + { + "bbox": [ + 79, + 711, + 540, + 723 + ], + "type": "text", + "content": "2Lu et al. [LKY23] report a larger drop; the discrepancy is due to our constraint that poisoned data are valid bounded RGB images." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 94, + 68, + 375, + 204 + ], + "blocks": [ + { + "bbox": [ + 94, + 68, + 375, + 204 + ], + "lines": [ + { + "bbox": [ + 94, + 68, + 375, + 204 + ], + "spans": [ + { + "bbox": [ + 94, + 68, + 375, + 204 + ], + "type": "image", + "image_path": "ff507e583af2cf8d875c5175cfaf529dfdc7148e3aef2836098edd006a592450.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 213, + 541, + 251 + ], + "lines": [ + { + "bbox": [ + 67, + 213, + 541, + 251 + ], + "spans": [ + { + "bbox": [ + 67, + 213, + 541, + 251 + ], + "type": "text", + "content": "Figure 10: Target and test accuracies of MGD's learning rate schedule over time closely match or exceed those found by a grid search over hundreds of combinations of hyperparameters. " + }, + { + "bbox": [ + 67, + 213, + 541, + 251 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 67, + 213, + 541, + 251 + ], + "type": "text", + "content": " confidence intervals are plotted for MGD's results." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 395, + 70, + 517, + 135 + ], + "blocks": [ + { + "bbox": [ + 395, + 70, + 517, + 135 + ], + "lines": [ + { + "bbox": [ + 395, + 70, + 517, + 135 + ], + "spans": [ + { + "bbox": [ + 395, + 70, + 517, + 135 + ], + "type": "image", + "image_path": "983c55704fec1c3570e69e9f79f79845d462ac252b5f882ff32f4fe1718609cd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 270, + 276, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 270, + 276, + 285 + ], + "spans": [ + { + "bbox": [ + 69, + 270, + 276, + 285 + ], + "type": "text", + "content": "4.4 Finding a learning rate schedule" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 290, + 541, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 290, + 541, + 339 + ], + "spans": [ + { + "bbox": [ + 67, + 290, + 541, + 339 + ], + "type": "text", + "content": "As a final application, we optimize the learning rate schedule of stochastic gradient descent (SGD) for training a CIFAR-10 classifier. By following the metagradients with respect to the learning rate at each step of training, our procedure matches grid searching over standard learning rate schedules—despite starting with naive hyperparameters (a flat learning rate)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 339, + 541, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 339, + 541, + 399 + ], + "spans": [ + { + "bbox": [ + 67, + 339, + 541, + 399 + ], + "type": "text", + "content": "Unlike the other applications discussed here, metagradients do not unlock state-of-the-art performance. Instead, we discuss this application to illustrate the flexibility of REPLAY, and in particular its ability to optimize metaparameters that do not directly affect the loss landscape (i.e., that only affect the model via the optimization trajectory). As we discuss in Section 6, approximate metagradient estimators cannot apply to these metaparameters." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 413, + 135, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 413, + 135, + 426 + ], + "spans": [ + { + "bbox": [ + 69, + 413, + 135, + 426 + ], + "type": "text", + "content": "4.4.1 Setting" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "text", + "content": "To put learning rate schedule optimization into the metagradient framework, we parameterize a schedule as a vector " + }, + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "inline_equation", + "content": "\\eta \\in \\mathbb{R}^k" + }, + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "text", + "content": " comprising " + }, + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "text", + "content": " evenly-spaced keypoints, so that the learning rate at iteration " + }, + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 432, + 541, + 458 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 186, + 465, + 541, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 465, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 186, + 465, + 541, + 495 + ], + "type": "interline_equation", + "content": "\\eta (t) = \\eta_ {\\lfloor k t / T \\rfloor} + \\frac {k t / T - \\lfloor k t / T \\rfloor}{\\lceil k t / T \\rceil - \\lfloor k t / T \\rfloor} \\left(\\eta_ {\\lceil k t / T \\rceil} - \\eta_ {\\lfloor k t / T \\rfloor}\\right), \\tag {10}", + "image_path": "f2d9ea4d2966d2c562e632e151129164699c105fdb85a742fbc656549ac3eaca.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 500, + 288, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 500, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 67, + 500, + 288, + 514 + ], + "type": "text", + "content": "i.e., a linear interpolation between the keypoints." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 519, + 541, + 585 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 77, + 519, + 328, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 519, + 328, + 533 + ], + "spans": [ + { + "bbox": [ + 77, + 519, + 328, + 533 + ], + "type": "text", + "content": "(a) the metaparameter " + }, + { + "bbox": [ + 77, + 519, + 328, + 533 + ], + "type": "inline_equation", + "content": "\\eta \\in \\mathbb{R}^k" + }, + { + "bbox": [ + 77, + 519, + 328, + 533 + ], + "type": "text", + "content": " is a vector of " + }, + { + "bbox": [ + 77, + 519, + 328, + 533 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 77, + 519, + 328, + 533 + ], + "type": "text", + "content": " keypoints;" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "spans": [ + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "text", + "content": "(b) the algorithm " + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "text", + "content": " maps metaparameters " + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "text", + "content": " to a trained model " + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(\\eta)" + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "text", + "content": " by training a model for " + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 76, + 540, + 541, + 564 + ], + "type": "text", + "content": " iterations with the learning rate schedule defined by (10);" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 77, + 571, + 405, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 571, + 405, + 585 + ], + "spans": [ + { + "bbox": [ + 77, + 571, + 405, + 585 + ], + "type": "text", + "content": "(c) the output function " + }, + { + "bbox": [ + 77, + 571, + 405, + 585 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 77, + 571, + 405, + 585 + ], + "type": "text", + "content": " evaluates average loss on the validation set " + }, + { + "bbox": [ + 77, + 571, + 405, + 585 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{val}" + }, + { + "bbox": [ + 77, + 571, + 405, + 585 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 599, + 149, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 599, + 149, + 612 + ], + "spans": [ + { + "bbox": [ + 69, + 599, + 149, + 612 + ], + "type": "text", + "content": "4.4.2 Algorithm" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 617, + 541, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 617, + 541, + 654 + ], + "spans": [ + { + "bbox": [ + 67, + 617, + 541, + 654 + ], + "type": "text", + "content": "Following the theme of the rest of this section, we optimize the metaparameter " + }, + { + "bbox": [ + 67, + 617, + 541, + 654 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 67, + 617, + 541, + 654 + ], + "type": "text", + "content": " directly using MGD. In particular, we initialize the keypoints to be a flat learning rate schedule, and then update the keypoints using the metagradient with respect to the validation loss," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 214, + 663, + 394, + 684 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 663, + 394, + 684 + ], + "spans": [ + { + "bbox": [ + 214, + 663, + 394, + 684 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\eta} ^ {(t + 1)} = \\boldsymbol {\\eta} ^ {(t)} - \\alpha \\cdot \\operatorname {s i g n} \\left(\\nabla_ {\\boldsymbol {\\eta}} \\phi (\\mathcal {A} (\\boldsymbol {\\eta} ^ {(t)}))\\right).", + "image_path": "6148db51da6bfd8bdedea2bdec794c882a9e35cd0cacac66509df7f4a72d5cdb.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 151, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 151, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 151, + 83 + ], + "type": "text", + "content": "4.4.3 Evaluation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 541, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 541, + 128 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 541, + 128 + ], + "type": "text", + "content": "We aim to select the learning rate schedule that minimizes the expected test set loss. To do so, we reserve " + }, + { + "bbox": [ + 67, + 91, + 541, + 128 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 67, + 91, + 541, + 128 + ], + "type": "text", + "content": " of the CIFAR-10 test set as a \"validation set\" on which we select hyperparameters. We then use the remaining " + }, + { + "bbox": [ + 67, + 91, + 541, + 128 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 67, + 91, + 541, + 128 + ], + "type": "text", + "content": " as a test set. We compare the following two approaches:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 135, + 538, + 227 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 83, + 135, + 538, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 135, + 538, + 183 + ], + "spans": [ + { + "bbox": [ + 83, + 135, + 538, + 183 + ], + "type": "text", + "content": "- Grid search: We construct a grid over different one cycle learning rate schedules, varying the peak learning rate, starting learning rate, ending learning rate, and peak learning rate time. In total, we consider over 1,000 different learning rate schedules. We use the reserved " + }, + { + "bbox": [ + 83, + 135, + 538, + 183 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 83, + 135, + 538, + 183 + ], + "type": "text", + "content": " of the test set to select the best learning rate schedule from the grid." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 190, + 538, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 190, + 538, + 227 + ], + "spans": [ + { + "bbox": [ + 83, + 190, + 538, + 227 + ], + "type": "text", + "content": "- Metagradient descent (MGD): We run 50 steps of MGD starting from a highly suboptimal flat learning rate schedule, aiming to minimize loss on the reserved " + }, + { + "bbox": [ + 83, + 190, + 538, + 227 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 83, + 190, + 538, + 227 + ], + "type": "text", + "content": " of the test set. We use the last iteration of MGD as our learned learning rate schedule." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 234, + 541, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 234, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 67, + 234, + 541, + 258 + ], + "type": "text", + "content": "We evaluate the performance of each final learning rate schedule on the held-out " + }, + { + "bbox": [ + 67, + 234, + 541, + 258 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 67, + 234, + 541, + 258 + ], + "type": "text", + "content": " test set and average the results over the same set of 5 unseen random seeds." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 274, + 541, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 274, + 541, + 334 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 541, + 334 + ], + "type": "text", + "content": "Results. Comparing our learned hyperparameter schedule to grid search, as shown in Figure 10, our learned schedule using only 50 steps of MGD matches the performance of the state-of-the-art onecycle schedule found via grid search over more than 1000 configurations. An important caveat, however, is that these numbers are not directly comparable: grid search can be run in parallel across many machines, while steps of MGD must be run sequentially." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 334, + 541, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 334, + 541, + 394 + ], + "spans": [ + { + "bbox": [ + 67, + 334, + 541, + 394 + ], + "type": "text", + "content": "In practice, we do not advise using MGD for optimizing low-dimensional hyperparameters, especially ones that have been thoroughly optimized by grid search (such as CIFAR-10 learning rate schedules [SN17; Pag18; LA19; Jor24]). Still, an interesting avenue for future work is to study the utility of MGD for optimizing high-dimensional hyperparameters that are less well-studied, such as per-parameter/layer learning rates/weight decays for language models, attention hyperparameters, or gradient preconditioners." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 413, + 167, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 413, + 167, + 427 + ], + "spans": [ + { + "bbox": [ + 69, + 413, + 167, + 427 + ], + "type": "text", + "content": "5 Discussion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 438, + 493, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 438, + 493, + 451 + ], + "spans": [ + { + "bbox": [ + 67, + 438, + 493, + 451 + ], + "type": "text", + "content": "In this section, we first present the main limitations of our method and outline future directions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 465, + 541, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 465, + 541, + 551 + ], + "spans": [ + { + "bbox": [ + 67, + 465, + 541, + 551 + ], + "type": "text", + "content": "Limitations. Although REPLAY is more efficient than existing methods at computing metagradients, it is still non-trivially more expensive than simply training a model once. The main reason is that metagradients require making a backwards pass over a backwards pass. This operation necessarily requires 2-3 times the operations of a backwards pass; furthermore, our current implementation requires float32/tensorfloat32 operations. Finally, standard training operations are often made more efficient by specialized software (e.g., via FlashAttention [DFE+22]); no such software (yet) exists for backwards-over-backwards operations. Beyond computational issues, successfully applying metagradients requires smooth model training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 564, + 541, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 564, + 541, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 564, + 541, + 673 + ], + "type": "text", + "content": "Metasmoothness: connections and future directions. While Section 3 describes a general procedure for finding metasmooth learning algorithms, an important future direction is to further explore and understand metasmoothness. This includes, for example: (a) characterizing the relationship between metasmoothness and numerical stability (and potentially using techniques from the latter to improve the former); (b) devising improved optimizers and/or architectures that lead directly to metasmooth learning algorithms (akin to skip connections or stable initialization in architecture design); (c) formalizing connections between metasmoothness and other optimization-related phenomena in deep learning [LM20; CKL+22]. A related but separate direction is to explore the possibility of using techniques from non-smooth optimization [Cla90] to perform metagradient descent on non-metasmooth learning algorithms." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 157 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 157 + ], + "type": "text", + "content": "**Applying metagradients.** Our methods apply to any ML task that requires optimizing with respect to a metaparameter. These include: poisoning data (generated or simply hosted on the internet) so that it cannot be trained on without permission (i.e., by maximizing training loss with respect to the text); selecting better training data at various stages of the model training lifecycle; and designing better model training routines and architectures with first-order methods. Another direction of future work lies in mitigating the computational limitations of our algorithm. Both (a) small-scale proxy-models [HBM+22; EFM24] and (b) low-hanging engineering improvements can likely make calculating metagradients much more efficient." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 175, + 183, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 175, + 183, + 190 + ], + "spans": [ + { + "bbox": [ + 69, + 175, + 183, + 190 + ], + "type": "text", + "content": "6 Related work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 201, + 394, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 201, + 394, + 215 + ], + "spans": [ + { + "bbox": [ + 68, + 201, + 394, + 215 + ], + "type": "text", + "content": "We overview previous work on calculating and applying meta-gradients." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 228, + 243, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 228, + 243, + 243 + ], + "spans": [ + { + "bbox": [ + 68, + 228, + 243, + 243 + ], + "type": "text", + "content": "6.1 Calculating metagradients" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 248, + 542, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 248, + 542, + 285 + ], + "spans": [ + { + "bbox": [ + 67, + 248, + 542, + 285 + ], + "type": "text", + "content": "Previous work estimates the metagradient for large-scale models via one of two broad families of methods: implicit differentiation and automatic (explicit) differentiation. Note that in previous literature, synonyms for metagradient include \"hyper-gradient\" and \"outer gradient.\"" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "spans": [ + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "text", + "content": "Implicit differentiation. One family of methods aims to approximate the metagradient. To illustrate the idea behind such approaches, suppose that the learning algorithm " + }, + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "text", + "content": " returns a model state " + }, + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "text", + "content": " that minimizes a strongly convex loss function " + }, + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(z,\\theta)" + }, + { + "bbox": [ + 67, + 299, + 541, + 336 + ], + "type": "text", + "content": ". Here, the implicit function theorem tells us that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 343, + 542, + 425 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 343, + 542, + 425 + ], + "spans": [ + { + "bbox": [ + 140, + 343, + 542, + 425 + ], + "type": "interline_equation", + "content": "\\nabla_ {z} f (z) = \\overbrace {\\left(\\frac {d \\phi}{d \\theta} \\right| _ {\\theta = \\mathcal {A} (z)} ^ {\\text {w r t . f i n a l p a r a m s}} \\underbrace {\\left. \\left(\\frac {\\partial^ {2} \\mathcal {L} (z , \\theta)}{\\partial \\theta^ {2}} \\right| _ {\\theta = \\mathcal {A} (z)}\\right) ^ {- 1}} _ {p \\times p \\text {i n v e r s e H e s s i a n o f l o s s w r t . f i n a l p a r a m s}} ^ {1 \\times p \\text {g r a d i e n t o f o u t p u t w r t . f i n a l p a r a m s}} \\overbrace {\\left. \\left(\\frac {\\partial^ {2} \\mathcal {L} (z , \\theta)}{\\partial \\theta \\partial z} \\right| _ {\\theta = \\mathcal {A} (z)}\\right) ^ {- 1}} ^ {p \\times n \\text {J a c o b i a n o f l o s s g r a d i e n t w r t . m e t a p a r a m e t e r s}}. \\tag {11}", + "image_path": "b096a068d0c0ca04838893d62a64c9f0a5b5e122b9e5dcd36bf25ef95f656ef2.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 432, + 543, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 432, + 543, + 542 + ], + "spans": [ + { + "bbox": [ + 67, + 432, + 543, + 542 + ], + "type": "text", + "content": "The form of (11) yields efficient and accurate estimators for metagradients of models learned by minimizing a strongly convex loss [BKB+20; BKM+22; KDJ20; BBC+22; SGB+22]. Such approaches can extend to estimate metagradients of large-scale, non-convex learning algorithms [Ben00; KL17; RFK+19; FAL17; LVD20; CH20; BNL+22], but lose any correctness guarantees. Indeed, applying this class of methods in large-scale settings is challenging as doing so requires (a) assuming conditions on the learning algorithm (e.g., Hessian invertibility, continuous differentiability) and (b) efficiently approximating the inverse Hessian (in practice, typically at the cost of estimate accuracy). Finally, implicit function-based approaches are fundamentally limited in that they can only differentiate with respect to metaparameters expressed in the loss function (e.g., these methods can differentiate with respect to the weight decay, but not learning rate)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 555, + 542, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 542, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 542, + 723 + ], + "type": "text", + "content": "Automatic (explicit) differentiation. Beyond implicit differentiation approaches, there is a long line of work on directly calculating metagradients with AD (see Section 2). Previous work has used AD to estimate metagradients of learning algorithms ranging from those with convex objectives to small neural networks [HNM19; MDA15; FDF+17; MS21; ZSP+21; CXR+22; SGB+22]. As detailed in Section 2, the primary challenge with (reverse-mode) AD-based approaches to meta-differentiation is storing the intermediate products required for the backward pass. To circumvent this challenge, previous work either (a) only considers settings that are small enough that is possible to differentiate while requiring space that is linear in the number of iterations (i.e., 2 layer networks on MNIST), (b) uses forward-mode AD [FDF+17; MS21; CXR+22] (which requires no extra storage at the cost of additional compute that scales linearly with metaparameter dimension), (c) only approximates the metagradient by calculating over only a few training steps [LSY18; CH20; FAL17], or uses (d) a reversible learning algorithm [MDA15]. The fourth category is a promising direction for reducing space requirements when computing large-scale metagradients, but current approaches require (a) representing model parameters in a fixed-precision format (which current large-scale learning algorithms do not support) in addition to restricting the algorithm to be reversible (e.g.," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 124 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 124 + ], + "type": "text", + "content": "SGD and standard GD do not qualify). A common thread is that algorithms computing metagradient with AD often suffer from numerical instability and overflow issues [MS21; SGB+22]. In relation to previous work on AD, REPLAY (Section 2) can be seen as a strategy for choosing gradient checkpointing [CAC+81; BCT92; ZP00; GW08; CXZ+16] locations in the compute graph (an NP-complete task in general [Nau08])." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 136, + 234, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 136, + 234, + 152 + ], + "spans": [ + { + "bbox": [ + 67, + 136, + 234, + 152 + ], + "type": "text", + "content": "6.2 Applying metagradients" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 156, + 543, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 156, + 543, + 242 + ], + "spans": [ + { + "bbox": [ + 67, + 156, + 543, + 242 + ], + "type": "text", + "content": "Previous work applies metagradients to optimize training setup, including distillation [MDA15; LVD20], training data selection [HNM19; EFM24], meta-learning [FAL17; RFK+19; HAM+21], learning rate/weight decay selection [MS21; CXR+22], tuning data augmentation [LVD20], and architecture search [MDA15; LSY18; ZSP+21]. Beyond optimizing metagradients, methods in data attribution apply metagradients to (Taylor) estimate the effect of dropping training data on model predictions [KL17; GBA+23; PGI+23]. To the Previous works either (a) calculate metagradients directly with AD (made feasible by working in a very small-scale learning setting) or (b) estimate the metagradient with an implicit function-based approach." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 259, + 170, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 259, + 170, + 274 + ], + "spans": [ + { + "bbox": [ + 67, + 259, + 170, + 274 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 285, + 544, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 544, + 382 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 544, + 382 + ], + "type": "text", + "content": "In this work we add metagradients to the large-scale machine learning toolkit. To do so, we overcome two challenges: (a) calculating metagradients at scale and (b) modifying learning algorithms to be metasmooth—i.e., to admit metagradients that locally predict model behavior. We then successfully calculate and apply metagradients for large-scale models (up to 2B parameters) to select data for CLIP pretraining and instruction fine-tuning, to (Huber) poison training data to decrease overall model accuracy, and search for high-dimensional hyperparameters (per-iteration learning rates). Given the successful applications of metagradients in these settings, we are excited to see what unlocking metagradients enables in other areas of machine learning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 400, + 226, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 400, + 226, + 417 + ], + "spans": [ + { + "bbox": [ + 67, + 400, + 226, + 417 + ], + "type": "text", + "content": "8 Acknowledgements" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 426, + 542, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 426, + 542, + 486 + ], + "spans": [ + { + "bbox": [ + 67, + 426, + 542, + 486 + ], + "type": "text", + "content": "Work supported in part by the NSF grant DMS-2134108 and Open Philanthropy, and in part by NSF Grant No. 2346519. This work is also supported in part by the Alan Turing Institute, and the U.S. Department of Energy. The authors would like to thank Alex Damian, Harshay Shah, Jesse Michel, Joel Flynn, Manolis Zampetakis, Noah Moroze, Piotr Indyk, Sam Hopkins, Sung Min (Sam) Park, and Sarah Cen for helpful references as well as discussions and feedback on early versions of this work." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 312, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 751 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 69, + 145, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 69, + 145, + 84 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 145, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 95, + 561, + 704 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 70, + 95, + 583, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 95, + 583, + 132 + ], + "spans": [ + { + "bbox": [ + 70, + 95, + 583, + 132 + ], + "type": "text", + "content": "[ATS+23] Amro Abbas, Kushal Tirumala, Dániel Simig, Surya Ganguli, and Ari S Morcos. \"SemDeDup: Data-efficient learning at web-scale through semantic dedduplication\". In: arXiv preprint arXiv:2303.09540 (2023)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 135, + 541, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 135, + 541, + 159 + ], + "spans": [ + { + "bbox": [ + 70, + 135, + 541, + 159 + ], + "type": "text", + "content": "[BAC+21] Sara Beery, Arushi Agarwal, Elijah Cole, and Vighnesh Birodkar. \"The iWildCam 2021 competition dataset\". In: arXiv preprint arXiv:2105.03494. 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 162, + 541, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 162, + 541, + 201 + ], + "spans": [ + { + "bbox": [ + 70, + 162, + 541, + 201 + ], + "type": "text", + "content": "[BBC+22] Mathieu Blondel, Quentin Berthet, Marco Cuturi, Roy Frostig, Stephan Hoyer, Felipe Llinares-López, Fabian Pedregosa, and Jean-Philippe Vert. \"Efficient and modular implicit differentiation\". In: Advances in neural information processing systems 35 (2022), pp. 5230-5242." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 203, + 548, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 203, + 548, + 239 + ], + "spans": [ + { + "bbox": [ + 70, + 203, + 548, + 239 + ], + "type": "text", + "content": "[BBY+22] Yonatan Bitton, Nitzan Bitton Guetta, Ron Yosef, Yuval Elovici, Mohit Bansal, Gabriel Stanovsky, and Roy Schwartz. \"WinoGAViL: Gamified association benchmark to challenge vision-and-language models\". In: Advances in Neural Information Processing Systems. 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 243, + 541, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 243, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 243, + 541, + 280 + ], + "type": "text", + "content": "[BCT92] Preston Briggs, Keith D Cooper, and Linda Torczon. \"Rematerialization\". In: Proceedings of the ACM SIGPLAN 1992 conference on Programming language design and implementation. 1992, pp. 311-321." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 282, + 541, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 282, + 541, + 307 + ], + "spans": [ + { + "bbox": [ + 70, + 282, + 541, + 307 + ], + "type": "text", + "content": "[Ben00] Yoshua Bengio. \"Gradient-based optimization of hyperparameters\". In: Neural computation 12.8 (2000), pp. 1889-1900." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 310, + 544, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 310, + 544, + 360 + ], + "spans": [ + { + "bbox": [ + 70, + 310, + 544, + 360 + ], + "type": "text", + "content": "[BGM+18] Peter Bandi, Oscar Geessink, Quirine Manson, Marcory Van Dijk, Maschenka Balkenhol, Meyke Hermsen, Babak Ehteshami Bejnordi, Byungjae Lee, Kyunghyun Paeng, Aoxiao Zhong, et al. \"From detection of individual metastases to classification of lymph node status at the patient level: the CAMELYON17 challenge\". In: IEEE Transactions on Medical Imaging (2018)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 362, + 541, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 362, + 541, + 387 + ], + "spans": [ + { + "bbox": [ + 70, + 362, + 541, + 387 + ], + "type": "text", + "content": "[BGV14] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. \"Food-101-mining discriminative components with random forests\". In: European conference on computer vision. 2014." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 390, + 541, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 390, + 541, + 427 + ], + "spans": [ + { + "bbox": [ + 70, + 390, + 541, + 427 + ], + "type": "text", + "content": "[BKB+20] Quentin Bertrand, Quentin Klopfenstein, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. \"Implicit differentiation of lasso-type models for hyperparameter optimization\". In: International Conference on Machine Learning. PMLR. 2020, pp. 810-821." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 430, + 541, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 430, + 541, + 479 + ], + "spans": [ + { + "bbox": [ + 70, + 430, + 541, + 479 + ], + "type": "text", + "content": "[BKM+22] Quentin Bertrand, Quentin Klopfenstein, Mathurin Massias, Mathieu Blondel, Samuel Vaiter, Alexandre Gramfort, and Joseph Salmon. \"Implicit differentiation for fast hyperparameter selection in non-smooth convex learning\". In: Journal of Machine Learning Research 23.149 (2022), pp. 1-43." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 482, + 541, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 482, + 541, + 529 + ], + "spans": [ + { + "bbox": [ + 70, + 482, + 541, + 529 + ], + "type": "text", + "content": "[BMA+19] Andrei Barbu, David Mayo, Julian Alverio, William Luo, Christopher Wang, Dan Gutfreund, Josh Tenenbaum, and Boris Katz. \"ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models\". In: Neural Information Processing Systems (NeurIPS). 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 533, + 541, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 533, + 541, + 558 + ], + "spans": [ + { + "bbox": [ + 70, + 533, + 541, + 558 + ], + "type": "text", + "content": "[BNL+22] Juhan Bae, Nathan Ng, Alston Lo, Marzyeh Ghassemi, and Roger Grosse. \"If Influence Functions are the Answer, Then What is the Question?\" In: ArXiv preprint arXiv:2209.05364. 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 561, + 541, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 561, + 541, + 586 + ], + "spans": [ + { + "bbox": [ + 70, + 561, + 541, + 586 + ], + "type": "text", + "content": "[BSF94] Yoshua Bengio, Patrice Simard, and Paolo Frasconi. \"Learning long-term dependencies with gradient descent is difficult\". In: IEEE Transactions on Neural Networks. 1994." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 589, + 541, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 589, + 541, + 624 + ], + "spans": [ + { + "bbox": [ + 70, + 589, + 541, + 624 + ], + "type": "text", + "content": "[CAC+81] Gregory J Chaitin, Marc A Auslander, Ashok K Chandra, John Cocke, Martin E Hopkins, and Peter W Markstein. \"Register allocation via coloring\". In: Computer languages 6.1 (1981), pp. 47-57." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 628, + 541, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 628, + 541, + 666 + ], + "spans": [ + { + "bbox": [ + 70, + 628, + 541, + 666 + ], + "type": "text", + "content": "[CFW+18] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. \"Functional Map of the World\". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). June 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 669, + 561, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 669, + 561, + 704 + ], + "spans": [ + { + "bbox": [ + 70, + 669, + 561, + 704 + ], + "type": "text", + "content": "[CH20] Xiangning Chen and Cho-Jui Hsieh. \"Stabilizing differentiable architecture search via perturbation-based regularization\". In: International conference on machine learning. PMLR. 2020, pp. 1554-1565." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 544, + 715 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "type": "text", + "content": "[CHL17] Gong Cheng, Junwei Han, and Xiaoqiang Lu. \"Remote sensing image scene classification: Benchmark and state of the art\". In: Proceedings of the IEEE. 2017." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 99, + 544, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 99, + 544, + 148 + ], + "spans": [ + { + "bbox": [ + 69, + 99, + 544, + 148 + ], + "type": "text", + "content": "[CHM+23] Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free Dolly: Introducing the World's First Truly Open Instruction-Tuned LLM. 2023. URL: https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm (visited on 06/30/2023)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 152, + 543, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 152, + 543, + 188 + ], + "spans": [ + { + "bbox": [ + 70, + 152, + 543, + 188 + ], + "type": "text", + "content": "[CKL+22] Jeremy M. Cohen, Simran Kaur, Yuanzhi Li, J. Zico Kolter, and Ameet Talwalkar. Gradient Descent on Neural Networks Typically Occurs at the Edge of Stability. 2022. arXiv: 2103.00065 [cs.LG]. URL: https://arxiv.org/abs/2103.00065." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 191, + 417, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 191, + 417, + 205 + ], + "spans": [ + { + "bbox": [ + 70, + 191, + 417, + 205 + ], + "type": "text", + "content": "[Cla90] Frank H Clarke. Optimization and nonsmooth analysis. SIAM, 1990." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 208, + 541, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 208, + 541, + 244 + ], + "spans": [ + { + "bbox": [ + 70, + 208, + 541, + 244 + ], + "type": "text", + "content": "[CMK+14] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. \"Describing textures in the wild\". In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 247, + 541, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 247, + 541, + 283 + ], + "spans": [ + { + "bbox": [ + 70, + 247, + 541, + 283 + ], + "type": "text", + "content": "[CNL11] Adam Coates, Andrew Ng, and Honglak Lee. \"An analysis of single-layer networks in unsupervised feature learning\". In: Proceedings of the fourteenth international conference on artificial intelligence and statistics. 2011." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 287, + 541, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 287, + 541, + 322 + ], + "spans": [ + { + "bbox": [ + 70, + 287, + 541, + 322 + ], + "type": "text", + "content": "[CXR+22] Kartik Chandra, Audrey Xie, Jonathan Ragan-Kelley, and Erik Meijer. \"Gradient descent: The ultimate optimizer\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 8214-8225." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 327, + 541, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 327, + 541, + 364 + ], + "spans": [ + { + "bbox": [ + 70, + 327, + 541, + 364 + ], + "type": "text", + "content": "[ CXZ+16] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin. \"Training Deep Nets with Sublinear Memory Cost\". In: CoRR abs/1604.06174 (2016). arXiv: 1604.06174. URL: http://arxiv.org/abs/1604.06174." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 367, + 541, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 367, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 70, + 367, + 541, + 392 + ], + "type": "text", + "content": "[DDS+09] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. \"Imagenet: A large-scale hierarchical image database\". In: Computer Vision and Pattern Recognition (CVPR). 2009." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 395, + 541, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 395, + 541, + 432 + ], + "spans": [ + { + "bbox": [ + 70, + 395, + 541, + 432 + ], + "type": "text", + "content": "[DFE+22] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. 2022. arXiv: 2205.14135 [cs.LG]. URL: https://arxiv.org/abs/2205.14135." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 435, + 541, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 435, + 541, + 471 + ], + "spans": [ + { + "bbox": [ + 70, + 435, + 541, + 471 + ], + "type": "text", + "content": "[DJP+24] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. \"The llama 3 herd of models\". In: arXiv preprint arXiv:2407.21783 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 475, + 541, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 475, + 541, + 498 + ], + "spans": [ + { + "bbox": [ + 70, + 475, + 541, + 498 + ], + "type": "text", + "content": "[ Eco24] Team EcoDatum. EcoDatum DataComp-small submission. https://www.datacomp.ai/dcclip/leaderboard.html. 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 502, + 541, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 502, + 541, + 526 + ], + "spans": [ + { + "bbox": [ + 70, + 502, + 541, + 526 + ], + "type": "text", + "content": "[EFM24] Logan Engstrom, Axel Feldmann, and Aleksander Madry. \"DsDm: Model-Aware Dataset Selection with Datamodels\". In: 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 530, + 541, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 530, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 70, + 530, + 541, + 555 + ], + "type": "text", + "content": "[EVW+10] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. \"The Pascal Visual Object Classes (VOC) Challenge\". In: International Journal of Computer Vision. 2010." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 559, + 541, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 559, + 541, + 594 + ], + "spans": [ + { + "bbox": [ + 70, + 559, + 541, + 594 + ], + "type": "text", + "content": "[FAL17] Chelsea Finn, Pieter Abbeel, and Sergey Levine. \"Model-agnostic meta-learning for fast adaptation of deep networks\". In: International conference on machine learning. PMLR. 2017, pp. 1126-1135." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 597, + 541, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 597, + 541, + 635 + ], + "spans": [ + { + "bbox": [ + 70, + 597, + 541, + 635 + ], + "type": "text", + "content": "[FDF+17] Luca Franceschi, Michele Donini, Paolo Frasconi, and Massimiliano Pontil. \"Forward and reverse gradient-based hyperparameter optimization\". In: International Conference on Machine Learning (ICML). 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 639, + 541, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 639, + 541, + 675 + ], + "spans": [ + { + "bbox": [ + 70, + 639, + 541, + 675 + ], + "type": "text", + "content": "[FFP04] Li Fei-Fei, Rob Fergus, and Pietro Perona. \"Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories\". In: 2004 conference on computer vision and pattern recognition workshop. IEEE. 2004, pp. 178-178." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 678, + 541, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 678, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 70, + 678, + 541, + 715 + ], + "type": "text", + "content": "[FIW+22] Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. \"Data Determines Distributional Robustness in Contrastive Language Image Pre-training (CLIP)\". In: ICML. 2022." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 553, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 69, + 71, + 541, + 109 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 541, + 109 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 541, + 109 + ], + "type": "text", + "content": "[GBA+23] Roger Grosse, Juhan Bae, Cem Anil, Nelson Elhage, Alex Tamkin, Amirhossein Tajdini, Benoit Steiner, Dustin Li, Esin Durmus, Ethan Perez, et al. \"Studying large language model generalization with influence functions\". In: arXiv preprint arXiv:2308.03296 (2023)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 111, + 541, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 111, + 541, + 137 + ], + "spans": [ + { + "bbox": [ + 69, + 111, + 541, + 137 + ], + "type": "text", + "content": "[GDG17] Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. \"Badnets: Identifying Vulnerabilities in the Machine Learning Model Supply Chain\". In: arXiv preprint arXiv:1708.06733 (2017)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 138, + 541, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 138, + 541, + 188 + ], + "spans": [ + { + "bbox": [ + 69, + 138, + 541, + 188 + ], + "type": "text", + "content": "[GIF+24] Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. \"DataComp: In search of the next generation of multimodal datasets\". In: Advances in Neural Information Processing Systems. 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 190, + 541, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 190, + 541, + 228 + ], + "spans": [ + { + "bbox": [ + 69, + 190, + 541, + 228 + ], + "type": "text", + "content": "[GLU12] Andreas Geiger, Philip Lenz, and Raquel Urtasun. \"Are we ready for autonomous driving? The KITTI vision benchmark suite\". In: 2012 IEEE conference on computer vision and pattern recognition. 2012." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 230, + 541, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 230, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 70, + 230, + 541, + 255 + ], + "type": "text", + "content": "[GW08] Andreas Griewank and Andrea Walther. Evaluating derivatives: principles and techniques of algorithmic differentiation. SIAM, 2008." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 258, + 541, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 258, + 541, + 295 + ], + "spans": [ + { + "bbox": [ + 70, + 258, + 541, + 295 + ], + "type": "text", + "content": "[HAM+21] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey. \"Meta-learning in neural networks: A survey\". In: IEEE transactions on pattern analysis and machine intelligence 44.9 (2021), pp. 5149-5169." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 297, + 541, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 297, + 541, + 334 + ], + "spans": [ + { + "bbox": [ + 70, + 297, + 541, + 334 + ], + "type": "text", + "content": "[HBB+20] Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. \"Measuring massive multitask language understanding\". In: arXiv preprint arXiv:2009.03300 (2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 337, + 541, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 337, + 541, + 374 + ], + "spans": [ + { + "bbox": [ + 70, + 337, + 541, + 374 + ], + "type": "text", + "content": "[HBD+19] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. \"EuroSAT: A novel dataset and deep learning benchmark for land use and land cover classification\". In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing. 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 376, + 553, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 376, + 553, + 414 + ], + "spans": [ + { + "bbox": [ + 70, + 376, + 553, + 414 + ], + "type": "text", + "content": "[HBK+21] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. \"Measuring Mathematical Problem Solving With the MATH Dataset\". In: NeurIPS (2021)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 416, + 541, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 416, + 541, + 464 + ], + "spans": [ + { + "bbox": [ + 70, + 416, + 541, + 464 + ], + "type": "text", + "content": "[HBM+20] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, Dawn Song, Jacob Steinhardt, and Justin Gilmer. The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization. 2020. arXiv: 2006.16241 [cs.CV]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 468, + 541, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 468, + 541, + 505 + ], + "spans": [ + { + "bbox": [ + 70, + 468, + 541, + 505 + ], + "type": "text", + "content": "[HBM+22] Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. \"Training compute-optimal large language models\". In: arXiv preprint arXiv:2203.15556. 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 507, + 541, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 507, + 541, + 533 + ], + "spans": [ + { + "bbox": [ + 69, + 507, + 541, + 533 + ], + "type": "text", + "content": "[HNM19] Satoshi Hara, Atsushi Nitanda, and Takanori Maehara. \"Data cleansing for models trained with SGD\". In: Advances in Neural Information Processing Systems 32 (2019)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 535, + 541, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 535, + 541, + 559 + ], + "spans": [ + { + "bbox": [ + 70, + 535, + 541, + 559 + ], + "type": "text", + "content": "[Hub64] Peter J. Huber. \"Robust estimation of a location parameter\". In: The Annals of Mathematical Statistics. 1964." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 563, + 541, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 563, + 541, + 589 + ], + "spans": [ + { + "bbox": [ + 70, + 563, + 541, + 589 + ], + "type": "text", + "content": "[HY20] Jiaoyang Huang and Horng-Tzer Yau. \"Dynamics of Deep Neural Networks and Neural Tangent Hierarchy\". In: Proceedings of the 37th International Conference on Machine Learning. 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 590, + 541, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 590, + 541, + 616 + ], + "spans": [ + { + "bbox": [ + 69, + 590, + 541, + 616 + ], + "type": "text", + "content": "[HZB+19] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. \"Natural adversarial examples\". In: arXiv preprint arXiv:1907.07174 (2019)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 618, + 541, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 618, + 541, + 666 + ], + "spans": [ + { + "bbox": [ + 69, + 618, + 541, + 666 + ], + "type": "text", + "content": "[JHV+17] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. \"CLEVR: A diagnostic dataset for compositional language and elementary visual reasoning\". In: Proceedings of the IEEE conference on computer vision and pattern recognition. 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 670, + 541, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 670, + 541, + 684 + ], + "spans": [ + { + "bbox": [ + 69, + 670, + 541, + 684 + ], + "type": "text", + "content": "[Kor24] Keller Jordan. \"94 percent on CIFAR-10 in 3.29 Seconds on a Single GPU\". In: (2024)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 686, + 550, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 686, + 550, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 686, + 550, + 723 + ], + "type": "text", + "content": "[JS08] Yaochu Jin and Bernhard Sendhoff. \"Pareto-based multiobjective machine learning: An overview and case studies\". In: IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews) 38.3 (2008), pp. 397-415." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 541, + 717 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "type": "text", + "content": "[KB15] Diederik P. Kingma and Jimmy Ba. \"Adam: A Method for Stochastic Optimization\". In: International Conference on Learning Representations (ICLR). 2015." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 99, + 541, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 99, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 69, + 99, + 541, + 125 + ], + "type": "text", + "content": "[KDJ20] MJ Zico Kolter, David Duvenaud, and Matt Johnson. \"Deep implicit layers-neural odes, deep equilibrium models, and beyond, 2020\". In: NeurIPS Tutorial (2020)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 127, + 541, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 127, + 541, + 177 + ], + "spans": [ + { + "bbox": [ + 69, + 127, + 541, + 177 + ], + "type": "text", + "content": "[KKR+24] Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi Rui Tam, Keith Stevens, Abdullah Barhoum, Duc Nguyen, Oliver Stanley, Richard Nagyfi, et al. \"Open-assistant conversations-democratizing large language model alignment\". In: Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 178, + 541, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 178, + 541, + 205 + ], + "spans": [ + { + "bbox": [ + 70, + 178, + 541, + 205 + ], + "type": "text", + "content": "[Pang Wei Koh and Percy Liang. \"Understanding Black-box Predictions via Influence Functions\". In: International Conference on Machine Learning. 2017.]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 206, + 541, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 206, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 70, + 206, + 541, + 232 + ], + "type": "text", + "content": "[Kri09] Alex Krizhevsky. \"Learning Multiple Layers of Features from Tiny Images\". In: Technical report. 2009." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 235, + 541, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 235, + 541, + 271 + ], + "spans": [ + { + "bbox": [ + 70, + 235, + 541, + 271 + ], + "type": "text", + "content": "[KSD+13] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. \"3d object representations for fine-grained categorization\". In: Proceedings of the IEEE international conference on computer vision workshops. 2013." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 274, + 541, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 274, + 541, + 323 + ], + "spans": [ + { + "bbox": [ + 70, + 274, + 541, + 323 + ], + "type": "text", + "content": "[KSM+20] Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsubramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Sara Beery, et al. \"WILDS: A Benchmark of in-the-Wild Distribution Shifts\". In: arXiv preprint arXiv:2012.07421 (2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 326, + 534, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 326, + 534, + 340 + ], + "spans": [ + { + "bbox": [ + 70, + 326, + 534, + 340 + ], + "type": "text", + "content": "[LA19] Zhiyuan Li and Sanjeev Arora. An Exponential Learning Rate Schedule for Deep Learning. 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 342, + 500, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 342, + 500, + 356 + ], + "spans": [ + { + "bbox": [ + 70, + 342, + 500, + 356 + ], + "type": "text", + "content": "[LeC98] Yann LeCun. \"The MNIST database of handwritten digits\". In: Technical report. 1998." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 358, + 541, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 358, + 541, + 396 + ], + "spans": [ + { + "bbox": [ + 70, + 358, + 541, + 396 + ], + "type": "text", + "content": "[LFX+24] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. \"Deepseek-v3 technical report\". In: arXiv preprint arXiv:2412.19437. 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 398, + 541, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 398, + 541, + 447 + ], + "spans": [ + { + "bbox": [ + 70, + 398, + 541, + 447 + ], + "type": "text", + "content": "[LHV+23] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. \"The flan collection: Designing data and methods for effective instruction tuning\". In: International Conference on Machine Learning. PMLR. 2023, pp. 22631-22648." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 449, + 541, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 449, + 541, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 449, + 541, + 475 + ], + "type": "text", + "content": "[LIE+22] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/. 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 478, + 541, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 478, + 541, + 503 + ], + "spans": [ + { + "bbox": [ + 70, + 478, + 541, + 503 + ], + "type": "text", + "content": "[LKY22] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. \"Indiscriminate Data Poisoning Attacks on Neural Networks\". In: arXiv preprint arXiv:2204.09092 (2022)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 506, + 541, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 506, + 541, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 506, + 541, + 544 + ], + "type": "text", + "content": "[LY23] Yiwei Lu, Gautam Kamath, and Yaoliang Yu. \"Exploring the limits of model-targeted indiscriminate data poisoning attacks\". In: International Conference on Machine Learning. PMLR. 2023, pp. 22856-22879." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 545, + 541, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 545, + 541, + 571 + ], + "spans": [ + { + "bbox": [ + 69, + 545, + 541, + 571 + ], + "type": "text", + "content": "[LM20] Guillaume Leclerc and Aleksander Madry. \"The two regimes of deep network training\". In: arXiv preprint arXiv:2002.10376. 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 573, + 541, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 573, + 541, + 610 + ], + "spans": [ + { + "bbox": [ + 69, + 573, + 541, + 610 + ], + "type": "text", + "content": "[LMB+14] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C Lawrence Zitnick. \"Microsoft coco: Common objects in context\". In: European conference on computer vision (ECCV). 2014." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 613, + 541, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 613, + 541, + 639 + ], + "spans": [ + { + "bbox": [ + 70, + 613, + 541, + 639 + ], + "type": "text", + "content": "[LSY18] Hanxiao Liu, Karen Simonyan, and Yiming Yang. \"Darts: Differentiable architecture search\". In: arXiv preprint arXiv:1806.09055 (2018)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 641, + 541, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 641, + 541, + 679 + ], + "spans": [ + { + "bbox": [ + 69, + 641, + 541, + 679 + ], + "type": "text", + "content": "[LVD20] Jonathan Lorraine, Paul Vicol, and David Duvenaud. \"Optimizing millions of hyperparameters by implicit differentiation\". In: International conference on artificial intelligence and statistics. PMLR. 2020, pp. 1540-1552." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 681, + 541, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 681, + 541, + 717 + ], + "spans": [ + { + "bbox": [ + 69, + 681, + 541, + 717 + ], + "type": "text", + "content": "[MDA15] Dougal Maclaurin, David Duvenaud, and Ryan Adams. \"Gradient-based hyperparameter optimization through reversible learning\". In: International conference on machine learning (ICML). 2015." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 559, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 541, + 97 + ], + "type": "text", + "content": "[MRK+13] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. \"Fine-grained visual classification of aircraft\". In: arXiv preprint arXiv:1306.5151 (2013)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 98, + 541, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 98, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 69, + 98, + 541, + 125 + ], + "type": "text", + "content": "[MS21] Paul Micaelli and Amos J Storkey. \"Gradient-based hyperparameter optimization over long horizons\". In: Advances in Neural Information Processing Systems 34 (2021), pp. 10798-10809." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "type": "text", + "content": "[Nau08] Uwe Naumann. \"Optimal Jacobian accumulation is NP-complete\". In: Math. Program. 112.2 (Apr. 2008), pp. 427-441. ISSN: 0025-5610." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 153, + 541, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 153, + 541, + 191 + ], + "spans": [ + { + "bbox": [ + 70, + 153, + 541, + 191 + ], + "type": "text", + "content": "[NWC+11] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Baolin Wu, Andrew Y Ng, et al. \"Reading digits in natural images with unsupervised feature learning\". In: NIPS workshop on deep learning and unsupervised feature learning. 2011." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 192, + 541, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 192, + 541, + 231 + ], + "spans": [ + { + "bbox": [ + 70, + 192, + 541, + 231 + ], + "type": "text", + "content": "[NZ08] Maria-Elena Nilsback and Andrew Zisserman. \"Automated flower classification over a large number of classes\". In: 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing. 2008." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 232, + 541, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 232, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 70, + 232, + 541, + 258 + ], + "type": "text", + "content": "[OR00] James M Ortega and Werner C Rheinboldt. Iterative solution of nonlinear equations in several variables. SIAM, 2000." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 259, + 559, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 259, + 559, + 285 + ], + "spans": [ + { + "bbox": [ + 70, + 259, + 559, + 285 + ], + "type": "text", + "content": "[Pag18] David Page. CIFAR-10 Fast. GitHub Repository. Oct. 2018. URL: https://github.com/davidcpage/cifar10-fast." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 286, + 541, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 286, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 286, + 541, + 312 + ], + "type": "text", + "content": "[Pea96] Barak A Pearlmutter. \"An investigation of the gradient descent process in neural networks\". In: PhD thesis, Carnegie Mellon University. 1996." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 314, + 541, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 314, + 541, + 341 + ], + "spans": [ + { + "bbox": [ + 70, + 314, + 541, + 341 + ], + "type": "text", + "content": "[PGI+23] Sung Min Park, Kristian Georgiev, Andrew Ilyas, Guillaume Leclerc, and Aleksander Madry. \"TRAK: Attributing Model Behavior at Scale\". In: *Arxiv preprint arXiv:2303.14186*. 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 342, + 541, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 342, + 541, + 368 + ], + "spans": [ + { + "bbox": [ + 70, + 342, + 541, + 368 + ], + "type": "text", + "content": "[PVZ+12] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. \"Cats and dogs\". In: 2012 IEEE conference on computer vision and pattern recognition. IEEE. 2012, pp. 3498-3505." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 369, + 541, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 369, + 541, + 419 + ], + "spans": [ + { + "bbox": [ + 70, + 369, + 541, + 419 + ], + "type": "text", + "content": "[RDK+22] William A Gaviria Rojas, Sudnya Diamos, Keertan Ranjan Kini, David Kanter, Vijay Janapa Reddi, and Cody Coleman. \"The dollar street dataset: Images representing the geographic and socioeconomic diversity of the world\". In: Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track. 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 421, + 541, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 421, + 541, + 447 + ], + "spans": [ + { + "bbox": [ + 70, + 421, + 541, + 447 + ], + "type": "text", + "content": "[RFK+19] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and Sergey Levine. \"Meta-learning with implicit gradients\". In: Advances in neural information processing systems 32 (2019)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 449, + 541, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 449, + 541, + 486 + ], + "spans": [ + { + "bbox": [ + 70, + 449, + 541, + 486 + ], + "type": "text", + "content": "[RKH+21] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. \"Learning transferable visual models from natural language supervision\". In: arXiv preprint arXiv:2103.00020. 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 487, + 541, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 487, + 541, + 526 + ], + "spans": [ + { + "bbox": [ + 70, + 487, + 541, + 526 + ], + "type": "text", + "content": "[RLZ+24] Vikram V Ramaswamy, Sing Yu Lin, Dora Zhao, Aaron Adcock, Laurens van der Maaten, Deepti Ghadiyaram, and Olga Russakovsky. \"GeoDE: a geographically diverse evaluation dataset for object recognition\". In: Advances in Neural Information Processing Systems. 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 528, + 541, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 528, + 541, + 564 + ], + "spans": [ + { + "bbox": [ + 69, + 528, + 541, + 564 + ], + "type": "text", + "content": "[RRS+19] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. \"Do ImageNet Classifiers Generalize to ImageNet?\" In: International Conference on Machine Learning (ICML). 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 566, + 541, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 566, + 541, + 604 + ], + "spans": [ + { + "bbox": [ + 69, + 566, + 541, + 604 + ], + "type": "text", + "content": "[SGB+22] Damien Scieur, Gauthier Gidel, Quentin Bertrand, and Fabian Pedregosa. \"The curse of un-rolling: Rate of differentiating through optimization\". In: Advances in Neural Information Processing Systems 35 (2022), pp. 17133–17145." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 606, + 541, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 606, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 70, + 606, + 541, + 632 + ], + "type": "text", + "content": "[SN17] Leslie N. Smith Smith and Topin Nicholay. \"Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates\". In: ArXiv preprint arXiv:1708.07120. 2017." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 634, + 541, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 634, + 541, + 683 + ], + "spans": [ + { + "bbox": [ + 69, + 634, + 541, + 683 + ], + "type": "text", + "content": "[SRR+22] Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. “Beyond the imitation game: Quantifying and extrapolating the capabilities of language models”. In: arXiv preprint arXiv:2206.04615 (2022)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 685, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 685, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 685, + 541, + 723 + ], + "type": "text", + "content": "[SSS+11] Johannes Stallkamp, Marc Schlipsing, Jan Salmen, and Christian Igel. \"The German traffic sign recognition benchmark: a multi-class classification competition\". In: The 2011 international joint conference on neural networks. 2011." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 550, + 690 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 71, + 541, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 71, + 541, + 121 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 541, + 121 + ], + "type": "text", + "content": "[SSS+22] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. \"Challenging big-bench tasks and whether chain-of-thought can solve them\". In: arXiv preprint arXiv:2210.09261 (2022)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 124, + 541, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 124, + 541, + 160 + ], + "spans": [ + { + "bbox": [ + 70, + 124, + 541, + 160 + ], + "type": "text", + "content": "[TGZ+23] Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford Alpaca: An Instruction-following LLaMA model. https://github.com/tatsu-lab/stanford_alpaca.2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 163, + 541, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 163, + 541, + 201 + ], + "spans": [ + { + "bbox": [ + 70, + 163, + 541, + 201 + ], + "type": "text", + "content": "[TMH+24] Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Riviere, Mihir Sanjay Kale, Juliette Love, et al. \"Gemma: Open models based on gemini research and technology\". In: arXiv preprint arXiv:2403.08295 (2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 203, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 203, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 70, + 203, + 545, + 239 + ], + "type": "text", + "content": "[TSF+16] Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. \"YFCC100M: The New Data in Multimedia Research\". In: Communications of the ACM (2016)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 243, + 541, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 243, + 541, + 292 + ], + "spans": [ + { + "bbox": [ + 70, + 243, + 541, + 292 + ], + "type": "text", + "content": "[VLW+18] Bastiaan S Veeling, Jasper Linmans, Jim Winkens, Taco Cohen, and Max Welling. \"Rotation equivariant CNNs for digital pathology\". In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2018: 21st International Conference, Granada, Spain, September 16-20, 2018, Proceedings, Part II 11. 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 295, + 541, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 295, + 541, + 308 + ], + "spans": [ + { + "bbox": [ + 70, + 295, + 541, + 308 + ], + "type": "text", + "content": "[Web24] Team Webdataset. webdataset. 2024. URL: https://www.github.com/webdataset/webdataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 311, + 541, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 311, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 70, + 311, + 541, + 335 + ], + "type": "text", + "content": "[Wer90] Paul J Werbos. \"Backpropagation through time: what it does and how to do it\". In: Proceedings of the IEEE 78.10 (1990), pp. 1550-1560." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 338, + 541, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 338, + 541, + 374 + ], + "spans": [ + { + "bbox": [ + 70, + 338, + 541, + 374 + ], + "type": "text", + "content": "[WGX+19] Haohan Wang, Songwei Ge, Eric P Xing, and Zachary C Lipton. \"Learning robust global representations by penalizing local predictive power\". In: Neural Information Processing Systems (NeurIPS) (2019)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 378, + 541, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 378, + 541, + 415 + ], + "spans": [ + { + "bbox": [ + 70, + 378, + 541, + 415 + ], + "type": "text", + "content": "[WWS+22] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. \"Chain-of-thought prompting elicits reasoning in large language models\". In: Advances in neural information processing systems 35 (2022), pp. 24824-24837." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 418, + 550, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 418, + 550, + 454 + ], + "spans": [ + { + "bbox": [ + 70, + 418, + 550, + 454 + ], + "type": "text", + "content": "[XHE+10] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. \"Sun database: Large-scale scene recognition from abbey to zoo\". In: Computer Vision and Pattern Recognition (CVPR). 2010." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 457, + 541, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 457, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 70, + 457, + 541, + 495 + ], + "type": "text", + "content": "[XMG+24] Mengzhou Xia, Sadhika Malladi, Suchin Gururangan, Sanjeev Arora, and Danqi Chen. \"Less: Selecting influential data for targeted instruction tuning\". In: arXiv preprint arXiv:2402.04333 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 498, + 541, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 498, + 541, + 535 + ], + "spans": [ + { + "bbox": [ + 70, + 498, + 541, + 535 + ], + "type": "text", + "content": "[YLH+14] Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. \"From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions\". In: Transactions of the Association for Computational Linguistics. 2014." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 537, + 541, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 537, + 541, + 597 + ], + "spans": [ + { + "bbox": [ + 70, + 537, + 541, + 597 + ], + "type": "text", + "content": "[ZP00] Geoffrey Zweig and Mukund Padmanabhan. \"Exact alpha-beta computation in logarithmic space with application to MAP word graph construction\". In: Sixth International Conference on Spoken Language Processing, ICSLP 2000 / INTERSPEECH 2000, Beijing, China, October 16-20, 2000. ISCA, 2000, pp. 855-858. DOI: 10.21437/ICSLP.2000-404. URL: https://doi.org/10.21437/ICSLP.2000-404." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 601, + 541, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 601, + 541, + 650 + ], + "spans": [ + { + "bbox": [ + 70, + 601, + 541, + 650 + ], + "type": "text", + "content": "[ZPK+19] Xiaohua Zhai, Joan Puigcerver, Alexander Kolesnikov, Pierre Ruyssen, Carlos Riquelme, Mario Lucic, Josip Djolonga, Andre Susano Pinto, Maxim Neumann, Alexey Dosovitskiy, et al. \"A large-scale study of representation learning with the visual task adaptation benchmark\". In: arXiv preprint arXiv:1910.04867. 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 653, + 541, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 653, + 541, + 690 + ], + "spans": [ + { + "bbox": [ + 70, + 653, + 541, + 690 + ], + "type": "text", + "content": "[ZSP+21] Miao Zhang, Steven W Su, Shirui Pan, Xiaojun Chang, Ehsan M Abbasnejad, and Reza Haffari. \"idarts: Differentiable architecture search with stochastic implicit gradients\". In: International Conference on Machine Learning. PMLR. 2021, pp. 12557-12566." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 70, + 358, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 70, + 358, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 358, + 87 + ], + "type": "text", + "content": "A Calculating metagradients with REPLAY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 95, + 541, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 95, + 541, + 120 + ], + "spans": [ + { + "bbox": [ + 68, + 95, + 541, + 120 + ], + "type": "text", + "content": "This appendix contains supplementary material for Section 2. We describe two algorithms in detail: stepwise AD, and our own algorithm REPLAY. Refer to Section 2 for the notation used in this appendix." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 135, + 232, + 149 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 135, + 232, + 149 + ], + "spans": [ + { + "bbox": [ + 69, + 135, + 232, + 149 + ], + "type": "text", + "content": "A.1 Warmup: Step-wise AD" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 155, + 541, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 155, + 541, + 192 + ], + "spans": [ + { + "bbox": [ + 68, + 155, + 541, + 192 + ], + "type": "text", + "content": "We fully describe step-wise AD in Algorithm 2. The algorithm requires storing all " + }, + { + "bbox": [ + 68, + 155, + 541, + 192 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 68, + 155, + 541, + 192 + ], + "type": "text", + "content": " optimizer states, but requires constant memory overhead for each AD call (as each AD call is over a single step), making it feasible to compute for small setups." + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 71, + 222, + 508, + 415 + ], + "blocks": [ + { + "bbox": [ + 77, + 208, + 270, + 220 + ], + "lines": [ + { + "bbox": [ + 77, + 208, + 270, + 220 + ], + "spans": [ + { + "bbox": [ + 77, + 208, + 270, + 220 + ], + "type": "text", + "content": "Algorithm 2: metagradients in " + }, + { + "bbox": [ + 77, + 208, + 270, + 220 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(T)" + }, + { + "bbox": [ + 77, + 208, + 270, + 220 + ], + "type": "text", + "content": " space." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "lines": [ + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "spans": [ + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": "1 // Store each optimizer state on disk \n2 " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\{s_i\\}_{i=0}^T \\leftarrow" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " Train model via " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "A(z)" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " \n3 \n4 // Variables; shorthand for " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\frac{\\partial f(z)}{\\partial z}" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\frac{\\partial f(z)}{\\partial s_T}" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " \n5 " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\bar{z} \\gets 0" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " \n6 " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\bar{s}_T \\leftarrow \\frac{\\partial g(s_T)}{\\partial s_T} \\quad //" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " One reverse-mode AD call \n7 \n8 // Reverse-mode differentiate step-by-step \n9 for " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "s_i \\gets s_{T-1}" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " do \n10 // One reverse-mode AD call. Left: " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\nabla_{s_i}f" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": ". Right: contribution to " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\nabla_{z}f" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": ". \n11 " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\bar{s}_i \\gets \\bar{s}_{i+1} \\cdot \\frac{\\partial h_i(s_i, z)}{\\partial s_i}, \\quad \\bar{z}_i \\gets \\bar{s}_{i+1} \\cdot \\frac{\\partial h_i(s_i, z)}{\\partial z}" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " \n12 \n13 " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\bar{z} \\gets \\bar{z} + \\bar{z}_i \\quad //" + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "text", + "content": " Accumulate metagradient \n14 \n15 Return " + }, + { + "bbox": [ + 71, + 222, + 508, + 415 + ], + "type": "inline_equation", + "content": "\\bar{z}" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 449, + 145, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 449, + 145, + 460 + ], + "spans": [ + { + "bbox": [ + 69, + 449, + 145, + 460 + ], + "type": "text", + "content": "A.2 REPLAY" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "spans": [ + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "text", + "content": "We now describe REPLAY, our method for calculating metagradients. For a free parameter " + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "inline_equation", + "content": "k \\in \\mathbb{N}" + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "text", + "content": ", REPLAY requires storing " + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(k\\log_k(T))" + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "text", + "content": " optimizer states and an additional " + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log_k(T))" + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "text", + "content": " factor of computation. The free parameter " + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "text", + "content": " controls the trade-off between storage and required compute. We fully describe REPLAY in Algorithm 3. REPLAY modifies Algorithm 2 by retrieving the optimizer states in reverse order using a " + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 469, + 541, + 529 + ], + "type": "text", + "content": "-ary tree structure in lieu of a list of all the stored states." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 544, + 171, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 544, + 171, + 556 + ], + "spans": [ + { + "bbox": [ + 69, + 544, + 171, + 556 + ], + "type": "text", + "content": "A.2.1 Lazy " + }, + { + "bbox": [ + 69, + 544, + 171, + 556 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 69, + 544, + 171, + 556 + ], + "type": "text", + "content": "-ary tree" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "spans": [ + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "content": "We now describe the " + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "content": "-ary tree structure that underlies REPLAY; for a visual reference of this tree with " + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "inline_equation", + "content": "k = 2" + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "content": ", see Figure 3. For ease of analysis we parameterize the total number of states as " + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "inline_equation", + "content": "n = T + 1" + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "content": " (and therefore take " + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "inline_equation", + "content": "n - 1" + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "content": " total training steps) when describing this data structure, and assume WLOG that " + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "content": " is an integer power of " + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 563, + 541, + 635 + ], + "type": "text", + "content": ". At a high level, traversing this tree recursively replays retraining to recover all the optimizer states in reverse order, while deleting states that are no longer needed. We call this tree \"lazy\" because it retransmits only when required to obtain states that are not yet retrieved." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "spans": [ + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": "The tree is a complete " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": "-ary tree with " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": " leaves (and therefore " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "\\log_k(n)" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": " depth) structured as follows. We start at the root, then recursively define the rest of the tree. Every node in the tree represents a single optimizer state. The root represents state " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": ". To recursively define the remaining nodes: each non-leaf node " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": " at depth " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": " has " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": " equally spaced (in terms of state number) children starting—from left to right—at state " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": " and ending at " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "s_{i+n/k^{d+1}}" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": ". This means that the leaves correspond—from left to right—to the states " + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "inline_equation", + "content": "s_0, s_1, \\ldots, s_{n-1}" + }, + { + "bbox": [ + 68, + 635, + 541, + 708 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 167 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 167 + ], + "type": "text", + "content": "We reduce the problem of iterating over the states in reverse to the problem of reverse in-order traversing this tree and yielding just the leaves—this is exactly the states in reverse order. A reverse in-order traversal for this " + }, + { + "bbox": [ + 67, + 72, + 543, + 167 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 72, + 543, + 167 + ], + "type": "text", + "content": "-ary tree requires repeatedly: recursively traversing child nodes from largest to smallest, then visiting the parent node. We design the specifics of this traversal to maximize space and compute efficiency. To access the children of a parent node at traversal time, we replay model training from the smallest child state (which is stored in the parent state) to the largest child state and store all the children. We perform this operation recursively each time we traverse a node. After traversing the node's left side (i.e., after ascending from this node), we delete all its child states." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "spans": [ + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": "Reverse in-order traversing this tree requires storing at most " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "k \\log_k(n)" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " optimizer states at a time, and in aggregate requires retraining the model " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "\\log_k(n)" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " times. The argument for each is straightforward. Storage: the traversal requires storing at most " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " states for each level that it descends (we store " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " states whenever we first traverse to a parent node) and we remove " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " states for each level that the traversal ascends (we remove " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " states after we are done with the left traversal of a parent). Compute: we replay training to reinstantiate the children of every parent node a single time. The " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "k^d" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " parent nodes at level " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " each require replaying " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(n / k^d)" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " states to reinstantiate children. Therefore, in a traversal, each level requires " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(n) (k^d \\cdot n / k^d)" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " optimizer steps. There are " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "\\log_k(n)" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " levels with parent nodes, which means a total of " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(n \\log_k(n))" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " optimizer steps, or a multiplicative factor of " + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log_k(n))" + }, + { + "bbox": [ + 67, + 167, + 541, + 277 + ], + "type": "text", + "content": " steps compared to model training." + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 72, + 308, + 508, + 488 + ], + "blocks": [ + { + "bbox": [ + 77, + 292, + 343, + 305 + ], + "lines": [ + { + "bbox": [ + 77, + 292, + 343, + 305 + ], + "spans": [ + { + "bbox": [ + 77, + 292, + 343, + 305 + ], + "type": "text", + "content": "Algorithm 3: REPLAY. metagradients in " + }, + { + "bbox": [ + 77, + 292, + 343, + 305 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(k\\log_k(T))" + }, + { + "bbox": [ + 77, + 292, + 343, + 305 + ], + "type": "text", + "content": " space." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "lines": [ + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "spans": [ + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": "1 " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "T\\gets" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " Lazy " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " -ary tree for " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(z)" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " // Make lazy " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " -ary tree of Appendix A.2 \n2 \n3 // Variables; shorthand for " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\frac{\\partial f(z)}{\\partial z}" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\frac{\\partial f(z)}{\\partial s_T}" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " \n4 " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\bar{z}\\gets 0" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " \n5 " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\bar{s}_T\\gets \\frac{\\partial g(s_T)}{\\partial s_T}" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " // One reverse-mode AD call \n6 \n7 // Reverse-mode differentiate step-by-step; traverse " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " instead of stored states \n8 for " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "s_i\\gets s_{T - 1}" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "s_0\\in" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " reverse_inorder_traversal(T) do \n9 // One reverse-mode AD call. Left: " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\nabla_{s_i}f" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " . Right: contribution to " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\nabla_{z}f" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " at i. \n10 " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\bar{s}_i\\gets \\bar{s}_{i + 1}\\cdot \\frac{\\partial h_i(s_i,z)}{\\partial s_i},\\quad \\bar{z}_i\\gets \\bar{s}_{i + 1}\\cdot \\frac{\\partial h_i(s_i,z)}{\\partial z}" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " \n11 \n12 " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\bar{z}\\gets \\bar{z} +\\bar{z}_i" + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "text", + "content": " // Accumulate metagradient \n13 \n14 Return " + }, + { + "bbox": [ + 72, + 308, + 508, + 488 + ], + "type": "inline_equation", + "content": "\\bar{z}" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 69, + 253, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 69, + 253, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 69, + 253, + 87 + ], + "type": "text", + "content": "B Smooth Model Training" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 95, + 191, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 95, + 191, + 111 + ], + "spans": [ + { + "bbox": [ + 69, + 95, + 191, + 111 + ], + "type": "text", + "content": "B.1 Omitted Figures" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 75, + 125, + 531, + 239 + ], + "blocks": [ + { + "bbox": [ + 75, + 125, + 531, + 239 + ], + "lines": [ + { + "bbox": [ + 75, + 125, + 531, + 239 + ], + "spans": [ + { + "bbox": [ + 75, + 125, + 531, + 239 + ], + "type": "image", + "image_path": "af3a8f87efacd4acaa3c56185ccfd5ffdb24bf0c973fc76594255668269e6e61.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 247, + 541, + 270 + ], + "lines": [ + { + "bbox": [ + 68, + 247, + 541, + 270 + ], + "spans": [ + { + "bbox": [ + 68, + 247, + 541, + 270 + ], + "type": "text", + "content": "Figure 11: The factors affecting metasmoothness of training a ResNet-9 on the CIFAR-10 dataset. See §3 for details." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 97, + 87, + 297, + 223 + ], + "blocks": [ + { + "bbox": [ + 137, + 72, + 271, + 86 + ], + "lines": [ + { + "bbox": [ + 137, + 72, + 271, + 86 + ], + "spans": [ + { + "bbox": [ + 137, + 72, + 271, + 86 + ], + "type": "text", + "content": "Non-smooth (Example #1118)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 87, + 297, + 223 + ], + "lines": [ + { + "bbox": [ + 97, + 87, + 297, + 223 + ], + "spans": [ + { + "bbox": [ + 97, + 87, + 297, + 223 + ], + "type": "image", + "image_path": "ed91df62b477bd91c24737b738d49397bea204557c82454c3f8740aacb9e13e6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 87, + 509, + 222 + ], + "blocks": [ + { + "bbox": [ + 360, + 72, + 473, + 86 + ], + "lines": [ + { + "bbox": [ + 360, + 72, + 473, + 86 + ], + "spans": [ + { + "bbox": [ + 360, + 72, + 473, + 86 + ], + "type": "text", + "content": "Smooth (Example #1118)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 87, + 509, + 222 + ], + "lines": [ + { + "bbox": [ + 310, + 87, + 509, + 222 + ], + "spans": [ + { + "bbox": [ + 310, + 87, + 509, + 222 + ], + "type": "image", + "image_path": "b1658df434e665ee2b41168eec4c27970ddad44acffd12d60b48a59d7a89737a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 97, + 248, + 296, + 381 + ], + "blocks": [ + { + "bbox": [ + 137, + 232, + 271, + 246 + ], + "lines": [ + { + "bbox": [ + 137, + 232, + 271, + 246 + ], + "spans": [ + { + "bbox": [ + 137, + 232, + 271, + 246 + ], + "type": "text", + "content": "Non-smooth (Example #3349)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 248, + 296, + 381 + ], + "lines": [ + { + "bbox": [ + 97, + 248, + 296, + 381 + ], + "spans": [ + { + "bbox": [ + 97, + 248, + 296, + 381 + ], + "type": "image", + "image_path": "49bd3d8ee76b74e6943123a4c519082b91d14235276199ab531f34a03a34cc78.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 311, + 248, + 509, + 381 + ], + "blocks": [ + { + "bbox": [ + 361, + 232, + 473, + 246 + ], + "lines": [ + { + "bbox": [ + 361, + 232, + 473, + 246 + ], + "spans": [ + { + "bbox": [ + 361, + 232, + 473, + 246 + ], + "type": "text", + "content": "Smooth (Example #3349)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 311, + 248, + 509, + 381 + ], + "lines": [ + { + "bbox": [ + 311, + 248, + 509, + 381 + ], + "spans": [ + { + "bbox": [ + 311, + 248, + 509, + 381 + ], + "type": "image", + "image_path": "719d47ec6f96ce91103c9fde603e154cba7baa02ad50f8e48186c6c41b3a97ff.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 97, + 408, + 296, + 542 + ], + "blocks": [ + { + "bbox": [ + 135, + 393, + 272, + 407 + ], + "lines": [ + { + "bbox": [ + 135, + 393, + 272, + 407 + ], + "spans": [ + { + "bbox": [ + 135, + 393, + 272, + 407 + ], + "type": "text", + "content": "Non-smooth (Example #10600)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 408, + 296, + 542 + ], + "lines": [ + { + "bbox": [ + 97, + 408, + 296, + 542 + ], + "spans": [ + { + "bbox": [ + 97, + 408, + 296, + 542 + ], + "type": "image", + "image_path": "72ebdc565733f1e246ef1530655df2fd0a0556ed6198f739627b7b58781f7021.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 311, + 408, + 509, + 542 + ], + "blocks": [ + { + "bbox": [ + 358, + 393, + 475, + 407 + ], + "lines": [ + { + "bbox": [ + 358, + 393, + 475, + 407 + ], + "spans": [ + { + "bbox": [ + 358, + 393, + 475, + 407 + ], + "type": "text", + "content": "Smooth (Example #10600)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 311, + 408, + 509, + 542 + ], + "lines": [ + { + "bbox": [ + 311, + 408, + 509, + 542 + ], + "spans": [ + { + "bbox": [ + 311, + 408, + 509, + 542 + ], + "type": "image", + "image_path": "7fbbbef73dee36330195f6d0fc527ad0a7c1c618fee58e5669d44bef1d5bb793.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 97, + 569, + 296, + 702 + ], + "blocks": [ + { + "bbox": [ + 135, + 554, + 272, + 567 + ], + "lines": [ + { + "bbox": [ + 135, + 554, + 272, + 567 + ], + "spans": [ + { + "bbox": [ + 135, + 554, + 272, + 567 + ], + "type": "text", + "content": "Non-smooth (Example #15578)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 569, + 296, + 702 + ], + "lines": [ + { + "bbox": [ + 97, + 569, + 296, + 702 + ], + "spans": [ + { + "bbox": [ + 97, + 569, + 296, + 702 + ], + "type": "image", + "image_path": "a5641668a6941e25c895bd575ef0e74375dbeaa1da5fcb13884916b4ecfa5360.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 713, + 419, + 727 + ], + "lines": [ + { + "bbox": [ + 189, + 713, + 419, + 727 + ], + "spans": [ + { + "bbox": [ + 189, + 713, + 419, + 727 + ], + "type": "text", + "content": "Figure 12: Additional loss landscape visualizations." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 311, + 569, + 509, + 702 + ], + "blocks": [ + { + "bbox": [ + 358, + 554, + 475, + 567 + ], + "lines": [ + { + "bbox": [ + 358, + 554, + 475, + 567 + ], + "spans": [ + { + "bbox": [ + 358, + 554, + 475, + 567 + ], + "type": "text", + "content": "Smooth (Example #15578)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 311, + 569, + 509, + 702 + ], + "lines": [ + { + "bbox": [ + 311, + 569, + 509, + 702 + ], + "spans": [ + { + "bbox": [ + 311, + 569, + 509, + 702 + ], + "type": "image", + "image_path": "87b96baf0a2e4bfad4e1c9d8f8d653181ea5e5f71efa5c492a82e9d1324d2dd6.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 69, + 290, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 69, + 290, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 69, + 290, + 87 + ], + "type": "text", + "content": "C Metagradients for DataComp" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 95, + 542, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 95, + 542, + 133 + ], + "spans": [ + { + "bbox": [ + 68, + 95, + 542, + 133 + ], + "type": "text", + "content": "This appendix contains pseudocode for the main algorithm used to do dataset selection for DataComp. It also contains additional implementation details on how metagradients were applied to CLIP, and how they were specifically applied to the DataComp setting." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 147, + 268, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 147, + 268, + 162 + ], + "spans": [ + { + "bbox": [ + 69, + 147, + 268, + 162 + ], + "type": "text", + "content": "C.1 Dataset Selection Using MGD" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "spans": [ + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": "When implementing Algorithm 1, there are several differences from the pseudocode below: firstly, rather than selecting " + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{m}" + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": " fully randomly every step, we randomly select a shard comprising fraction " + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": " of the data and take steps on all datapoints in the shard (see Section C.2). To mitigate overfitting, we also bake a \"minibatch fraction\" " + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": " into our model output function " + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": ". For example, if " + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": " calculates model loss on the ImageNet train set, each time " + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": " is called, we randomly sample fraction " + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 68, + 167, + 542, + 240 + ], + "type": "text", + "content": " of the ImageNet train set to evaluate on." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "spans": [ + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": "Adapting the CLIP loss function to our surrogate learning algorithm. Here, we explain how dataweights are incorporated into the CLIP loss function—the formulation given in Section 4.1 is actually slightly simplified and incorrect, as it does not account for cross terms in the CLIP contrastive loss. As a refresher, we first state the \"vanilla\" CLIP loss function, " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": ", as it is defined in [RKH+21]. Letting " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": " be the batch size and " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": " be the embedding dimension, and " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": " be the training batch at timestep " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": ". Recall that the CLIP model internally has two \"submodules\": and image embedder, and a text embedder. We then use these to obtain image embeddings " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "E_{I} \\in \\mathbb{R}^{b \\times d}" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": " and text embeddings " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "E_{T} \\in \\mathbb{R}^{b \\times d}" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": ". We then compute the image-wise scores, or logits, for this batch as " + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "inline_equation", + "content": "S = E_{I}E_{T}^{\\top}^{3}" + }, + { + "bbox": [ + 68, + 254, + 541, + 359 + ], + "type": "text", + "content": ". Then, we can define the CLIP loss (as a function of the logits) as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 245, + 359, + 365, + 382 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 359, + 365, + 382 + ], + "spans": [ + { + "bbox": [ + 245, + 359, + 365, + 382 + ], + "type": "interline_equation", + "content": "L (S) = \\frac {1}{2} (L _ {I} (S) + L _ {T} (S)),", + "image_path": "2d84dbd682390cd2231744dd8853e9648b3b71fba726778d07a6907663c26341.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 386, + 436, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 386, + 436, + 399 + ], + "spans": [ + { + "bbox": [ + 68, + 386, + 436, + 399 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 386, + 436, + 399 + ], + "type": "inline_equation", + "content": "L_{I}" + }, + { + "bbox": [ + 68, + 386, + 436, + 399 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 386, + 436, + 399 + ], + "type": "inline_equation", + "content": "L_{T}" + }, + { + "bbox": [ + 68, + 386, + 436, + 399 + ], + "type": "text", + "content": " are row-wise and column-wise cross-entropy losses, respectively:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 145, + 407, + 463, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 407, + 463, + 441 + ], + "spans": [ + { + "bbox": [ + 145, + 407, + 463, + 441 + ], + "type": "interline_equation", + "content": "L _ {I} (S) = \\sum_ {i = 1} ^ {b} \\log \\left(\\frac {\\exp (S _ {i , i})}{\\sum_ {j = 1} ^ {b} \\exp (S _ {i , j})}\\right), \\quad L _ {T} (S) = \\sum_ {i = 1} ^ {b} \\log \\left(\\frac {\\exp (S _ {i , i})}{\\sum_ {j = 1} ^ {b} \\exp (S _ {j , i})}\\right).", + "image_path": "9a290638571b9b78e345f3a543e518242910b82563f2b50d1dbb4658ebbf5b4c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "spans": [ + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": "We now wish to relax " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": " into a new function " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": " that supports an additional input " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^n" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "\\frac{\\partial L'}{\\partial \\mathbf{z}}" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": " resembles the metagradients with respect to dataweights. In order to do this, we imagine expanding passing the entire dataset " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": " into our embedder to obtain " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "E_I'" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "E_{T'}'" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": " and take our new logits " + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "inline_equation", + "content": "S' = E_I'E_T'^{\\top} \\in \\mathbb{R}^{n \\times n}" + }, + { + "bbox": [ + 68, + 450, + 540, + 487 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": "There are some additional key conditions our relaxation " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": " should satisfy. Particularly: when " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{z} = \\mathbf{0}_n" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": ", we should recover the normal CLIP loss " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": ", and when " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": " is all 0's except for a single entry " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": " should act as if " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": " had been appended to the original batch " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": ". In addition, " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": " should always have meaningful partials with respect to " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": ", even when some values in " + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 68, + 487, + 541, + 534 + ], + "type": "text", + "content": " are 0." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "spans": [ + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "text", + "content": "Letting " + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{i = j}" + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{i\\neq j}" + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "text", + "content": " be indicator variables and letting " + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_k\\in \\{0,1\\} ^n" + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "text", + "content": " be the indicator vector for the " + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 535, + 541, + 559 + ], + "type": "text", + "content": " -th batch, we find that the definition" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 233, + 559, + 375, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 559, + 375, + 572 + ], + "spans": [ + { + "bbox": [ + 233, + 559, + 375, + 572 + ], + "type": "interline_equation", + "content": "L ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) = L _ {I} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) + L _ {T} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right),", + "image_path": "7192b26eb08c7aecbd682eeca7dfd8fe1edf185435ebe5ea3482c61644ac07e2.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 578, + 99, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 578, + 99, + 588 + ], + "spans": [ + { + "bbox": [ + 69, + 578, + 99, + 588 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 146, + 586, + 460, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 586, + 460, + 619 + ], + "spans": [ + { + "bbox": [ + 146, + 586, + 460, + 619 + ], + "type": "interline_equation", + "content": "L _ {I} ^ {\\prime} \\left(S ^ {\\prime}, \\mathbf {z}\\right) = \\sum_ {i = 1} ^ {n} \\left(z _ {i} + \\left(\\mathbf {1} _ {k}\\right) _ {i}\\right) \\log \\left(\\frac {\\exp \\left(S _ {i , i} ^ {\\prime}\\right)}{\\sum_ {j = 1} ^ {n} \\exp \\left(S _ {i , j} ^ {\\prime}\\right) \\left(\\mathbf {1} _ {i = j} + \\mathbf {1} _ {i \\neq j} \\left(z _ {j} + \\left(\\mathbf {1} _ {k}\\right) _ {j}\\right)\\right)}\\right)", + "image_path": "6c10546608a77c03434a358377fe9f99d67f5f521f84b43c195a9db66ce3fe3a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 624, + 89, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 624, + 89, + 634 + ], + "spans": [ + { + "bbox": [ + 69, + 624, + 89, + 634 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 145, + 632, + 461, + 665 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 632, + 461, + 665 + ], + "spans": [ + { + "bbox": [ + 145, + 632, + 461, + 665 + ], + "type": "interline_equation", + "content": "L _ {T} ^ {\\prime} (S ^ {\\prime}, \\mathbf {z}) = \\sum_ {i = 1} ^ {b} \\left(z _ {i} + \\left(\\mathbf {1} _ {k}\\right) _ {i}\\right) \\log \\left(\\frac {\\exp \\left(S _ {i , i} ^ {\\prime}\\right)}{\\sum_ {j = 1} ^ {n} \\exp \\left(S _ {j , i} ^ {\\prime}\\right) \\left(\\mathbf {1} _ {i = j} + \\mathbf {1} _ {i \\neq j} \\left(z _ {j} + \\left(\\mathbf {1} _ {k}\\right) _ {j}\\right)\\right)}\\right)", + "image_path": "168b7e1a3985608aa32a7de01e5cada2b24b954a4e045cde1ad9fe397a71c91e.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 669, + 178, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 669, + 178, + 681 + ], + "spans": [ + { + "bbox": [ + 69, + 669, + 178, + 681 + ], + "type": "text", + "content": "satisfy these conditions." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 68, + 689, + 541, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 689, + 541, + 710 + ], + "spans": [ + { + "bbox": [ + 68, + 689, + 541, + 710 + ], + "type": "text", + "content": "3The CLIP model scales these logits by a temperature parameter " + }, + { + "bbox": [ + 68, + 689, + 541, + 710 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 68, + 689, + 541, + 710 + ], + "type": "text", + "content": " before applying the softmax. While we omit " + }, + { + "bbox": [ + 68, + 689, + 541, + 710 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 68, + 689, + 541, + 710 + ], + "type": "text", + "content": " in our definitions, it can be easily incorporated. All our experiments use temperature scaling." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": "Finally, we let define the loss for the entire batch " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "\\ell'" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " as a function of " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " and model parameters " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " which outputs the loss calculated according to " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " above. To summarize, letting " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^{(t)}" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " denote the " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": "-th training batch, the loss function " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "\\ell_t" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " at step " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " of our surrogate learning algorithm " + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "inline_equation", + "content": "\\mathcal{A}'" + }, + { + "bbox": [ + 67, + 72, + 542, + 110 + ], + "type": "text", + "content": " for CLIP training is:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 239, + 117, + 371, + 149 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 117, + 371, + 149 + ], + "spans": [ + { + "bbox": [ + 239, + 117, + 371, + 149 + ], + "type": "interline_equation", + "content": "\\ell_ {t} ^ {\\prime} (\\theta) := \\left\\{ \\begin{array}{l l} \\ell (\\mathbf {x} ^ {(t)}; \\theta) & \\text {i f} t \\neq k \\\\ \\ell^ {\\prime} (\\mathbf {z}; \\theta) & \\text {i f} t = k. \\end{array} \\right.", + "image_path": "8f3d63cc5291ccdaf6cff091223516c32e3aa9bda6f62ef719868f5a4defa364.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 154, + 544, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 154, + 544, + 179 + ], + "spans": [ + { + "bbox": [ + 67, + 154, + 544, + 179 + ], + "type": "text", + "content": "We find that this empirically works well for obtaining meaningful metagradients with respect to dataweights in the CLIP setting, and yields to strong dataset selection results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 193, + 313, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 193, + 313, + 209 + ], + "spans": [ + { + "bbox": [ + 67, + 193, + 313, + 209 + ], + "type": "text", + "content": "C.2 Scaling MGD for CLIP and DataComp" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 213, + 541, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 213, + 541, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 213, + 541, + 275 + ], + "type": "text", + "content": "MGD is highly scalable, allowing it to be applied to large-scale settings like training CLIP models. In particular, computing metagratings is only up to a constant factor more expensive than training a model normally. Here, we outline challenges we faced in scaling MGD in this setting, and how they were resolved. Specifically, we will explain how we efficiently calculated metagratings for CLIP models and efficiently tracked/shuffled our dataset selection from step-to-step despite its large storage footprint." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 288, + 541, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 288, + 541, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 288, + 541, + 373 + ], + "type": "text", + "content": "Computing metagradient. Due to the large batch size used in the CLIP contrastive loss, we implement manual gradient checkpointing to make the operations computationally feasible on our hardware. The most memory-intensive operation are model forward passes (and its gradients): obtaining the image and label embeddings given raw pixel data and tokens. So, we manually make gradient checkpoints before this operation, allowing us to run the embedder in minibatches to avoid memory issues. This setup also naturally lends itself to parallelization across multiple GPU's, which we make use of to further speed up our computations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 386, + 541, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 386, + 541, + 506 + ], + "spans": [ + { + "bbox": [ + 67, + 386, + 541, + 506 + ], + "type": "text", + "content": "Loading, writing, and storing data. Due to the data-intensive nature of training large models like CLIP and our need to frequently produce new datasets at each optimization step, we found that using the web-dataset [Web24] format given by DataComp was restrictively slow. To circumvent this, we rewrote all data following the format of FFCV [LIE+22], allowing us to load and write data much faster. Specifically, we divided the entire candidate pool into 8 base shards. Once we trained a model, we choose one of the 8 shards, compute metagradient corresponding to all datapoints in the shard, take a gradient step on them, and rewrite the shard. This roughly corresponds to " + }, + { + "bbox": [ + 67, + 386, + 541, + 506 + ], + "type": "inline_equation", + "content": "p = \\frac{1}{8}" + }, + { + "bbox": [ + 67, + 386, + 541, + 506 + ], + "type": "text", + "content": " in Algorithm 1, which we empirically worked well for optimizing. In following steps, we always choose one of the 8 original shards to calculate metagradient for—this ensures that points removed from the dataset in some optimization step can return if they have a negative metagradient." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "text", + "content": "We also observed that always stepping on the sign causes the sizes of the shards to grow over time: stepping based on the sign of the metagradient does not decrease the weight on a positive-weight datapoint if its dataweight is already 0, so our steps are biased towards increasing the size of the shards. To combat this blowup, after some number of optimization steps, we choose a fixed shard size and enforce that subsequent steps must not change the size of the shards—the step size thereafter is controlled by hyperparameter " + }, + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "text", + "content": " representing the fraction of datapoints in a shard which are incremented. We experimented both with randomly sampling which points are added or removed, and stepping on the datapoints with the top " + }, + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "text", + "content": " and bottom " + }, + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 507, + 541, + 603 + ], + "type": "text", + "content": " metagradient; the latter seems to give empirically better performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 603, + 539, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 603, + 539, + 662 + ], + "spans": [ + { + "bbox": [ + 67, + 603, + 539, + 662 + ], + "type": "text", + "content": "To maintain randomness during shuffling, we implement an 8-way dataloader which would shuffle all 8 shards individually. Then, to sample a batch of " + }, + { + "bbox": [ + 67, + 603, + 539, + 662 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 67, + 603, + 539, + 662 + ], + "type": "text", + "content": " datapoints, we would sample " + }, + { + "bbox": [ + 67, + 603, + 539, + 662 + ], + "type": "inline_equation", + "content": "b / 8" + }, + { + "bbox": [ + 67, + 603, + 539, + 662 + ], + "type": "text", + "content": " datapoints from each shard and concatenate them to fill our batch. This works better than simply sampling our entire batch from a single shard, as (especially in later optimization steps) shards may contain a high number of duplicate datapoints, which causes CLIP's contrastive loss function to misbehave if they appear in the same batch." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 662, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 662, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 662, + 541, + 723 + ], + "type": "text", + "content": "To minimize disk space used, old shards can be deleted once they become \"stale\". Specifically, if shard s is rewritten into shard " + }, + { + "bbox": [ + 67, + 662, + 541, + 723 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 67, + 662, + 541, + 723 + ], + "type": "text", + "content": ", all future optimization steps will never read s again, and s can safely be deleted. Thus, when running MGD for a large number of steps and potentially rewriting each shard multiple times, the total disk space used by our algorithm is constant in the number of steps we take: it stores the 8 most recently written shards on disk at any given time, and any other shards are deleted to save space." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 362, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 362, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 362, + 86 + ], + "type": "text", + "content": "C.3 Details Pertaining to the DataComp Benchmark" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 541, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 541, + 163 + ], + "type": "text", + "content": "Setting. We provide a brief summary of the DataComp competition here, and we refer readers to the original paper [GIF+24]. DataComp is a framework to compare different training dataset selection techniques. Participants submit a training dataset (which, for our purposes, is a subset of a larger dataset), upon which a CLIP model is trained from scratch with a fixed learning algorithm, model architecture, and number of training steps. We focus on DataComp-small, which has a candidate pool of 12.8 million samples. The number of training steps in this case is also fixed at 12.8 million samples." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 163, + 541, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 163, + 541, + 187 + ], + "spans": [ + { + "bbox": [ + 68, + 163, + 541, + 187 + ], + "type": "text", + "content": "We try to match the optimization hyperparameters enforced by DataComp as closely as possible. As a refresher, our ADAM[KB15] update step can be written as" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 209, + 196, + 541, + 212 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 196, + 541, + 212 + ], + "spans": [ + { + "bbox": [ + 209, + 196, + 541, + 212 + ], + "type": "interline_equation", + "content": "\\theta_ {t + 1} = - \\alpha_ {t} \\cdot \\left(m _ {t} / \\left(\\sqrt {v _ {t} + \\varepsilon_ {\\mathrm {r o o t}}} + \\varepsilon\\right) + \\lambda \\theta_ {t}\\right) \\tag {12}", + "image_path": "42f5fec1da7593b8a9cc14368f981db0362526c501634b3dc3bde2e396ff808c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "spans": [ + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "m_{t}" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "v_{t}" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " are running estimates of the first and second moments of the gradients, respectively, " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " represents weight decay, " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " represents the learning rate, and " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}}" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " are hyperparameters to avoid blowup. Our training hyperparameters can be found in Table 1 and are identical to those mandated by DataComp-small, aside from a positive " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}}" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " added for numerical stability. The values of " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}}" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 219, + 541, + 278 + ], + "type": "text", + "content": " (the step at which metagradients are calculated) were chosen to empirically maximize metasmoothness." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 217, + 310, + 389, + 494 + ], + "blocks": [ + { + "bbox": [ + 162, + 289, + 447, + 303 + ], + "lines": [ + { + "bbox": [ + 162, + 289, + 447, + 303 + ], + "spans": [ + { + "bbox": [ + 162, + 289, + 447, + 303 + ], + "type": "text", + "content": "Table 1: Hyperparameters for the CLIP DataComp experiments." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 217, + 310, + 389, + 494 + ], + "lines": [ + { + "bbox": [ + 217, + 310, + 389, + 494 + ], + "spans": [ + { + "bbox": [ + 217, + 310, + 389, + 494 + ], + "type": "table", + "html": "
HyperparameterValue
DataComp Scalesmall
ModelViT-B/32
Train compute (MACs)9.5 × 1016
Pool size12.8M
# samples seen12.8M
Batch size4096
Training batches3125
k2800
Learning rate5 × 10-4
AdamW β10.9
AdamW β20.98
AdamW εroot1 × 10-17
Warmup500
", + "image_path": "1d53986854c1c4e6619d16a37b54e29fc0b88bdf3520d589c05111d30ec1c831.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 505, + 541, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 505, + 541, + 589 + ], + "spans": [ + { + "bbox": [ + 67, + 505, + 541, + 589 + ], + "type": "text", + "content": "Our experiments are also run on an incomplete subset of the entire DataComp candidate pool. DataComp did not store the raw image and text files when assembling their dataset; they only stored a list of URL's to download data from. Due to the nature of the internet, for various reasons, some of these URL's no longer point to the same data (or no longer point to any data at all). Thus, after ignoring these broken links, our candidate pool is only around " + }, + { + "bbox": [ + 67, + 505, + 541, + 589 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 67, + 505, + 541, + 589 + ], + "type": "text", + "content": " of the size of the original DataComp candidate pool when it was collected in 2023. All our results are obtained by running our methods on this subset of the DataComp pool." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 604, + 541, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 604, + 541, + 687 + ], + "spans": [ + { + "bbox": [ + 67, + 604, + 541, + 687 + ], + "type": "text", + "content": "Evaluation tasks. In order to ensure that our method is truly improving trained models' performances on the entire target distribution and not overfitting to the target set, for each of the 38 evaluation tasks used by DataComp, we attempted to separately create a disjoint target and validation set (DataComp only creates test sets for each task). Thus, metagradients were computed on the target sets and model performance was evaluated on the validation set, before submitting with the official DataComp script and evaluating on the test sets. This ensures that our method's generalization ability is being evaluated, and we are not overfitting to our target set." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 688, + 540, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 688, + 540, + 711 + ], + "spans": [ + { + "bbox": [ + 68, + 688, + 540, + 711 + ], + "type": "text", + "content": "For various reasons, creating target splits was not possible for all 38 tasks; we summarize our setup in Table 2." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 105, + 541, + 430 + ], + "blocks": [ + { + "bbox": [ + 69, + 70, + 541, + 95 + ], + "lines": [ + { + "bbox": [ + 69, + 70, + 541, + 95 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 541, + 95 + ], + "type": "text", + "content": "Table 2: All DataComp evaluation tasks. The \"Target set\" column refers to whether metagradients were taken on the target set corresponding to this dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 105, + 541, + 430 + ], + "lines": [ + { + "bbox": [ + 70, + 105, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 70, + 105, + 541, + 430 + ], + "type": "table", + "html": "
DatasetTaskTest sizeTrain sizeVal sizeMain metricTarget set
Caltech-101 [FFP04]Object recognition60852754306mean per class
CIFAR-10 [Kri09]Visual recognition10000450005000accuracy
CIFAR-100 [Kri09]Visual recognition10000450005000accuracy
CLEVR Counts [JHV+17; ZPK+19]Counting15000650005000accuracy
CLEVR Distance [JHV+17; ZPK+19]Distance prediction15000650005000accuracy
Country211 [RKH+21; TSF+16]Geolocation21100379804220accuracy
DTD [CMK+14]Texture classification18803384376accuracy
EuroSAT [HBD+19; ZPK+19]Satellite imagery recognition5400194402160accuracy
FGVC Aircraft [MRK+13]Aircraft recognition33336001666mean per class
Food-101 [BGV14]Food recognition25250707505000accuracy
GTSRB [SSS+11]Traffic sign recognition12630352893920accuracy
ImageNet 1k [DDS+09]Visual recognition5000012761675000accuracy
ImageNet Sketch [WGX+19]Visual recognition50889N/AN/Aaccuracy*
ImageNet V2 [RKS+19]Visual recognition10000N/AN/Aaccuracy*
ImageNet-A [HZB+19]Visual recognition7500N/AN/Aaccuracy*
ImageNet-O [HZB+19]Visual recognition2000N/AN/Aaccuracy*
ImageNet-R [HBM+20]Visual recognition30000N/AN/Aaccuracy*
KITTI distance [GLU12; ZPK+19]Distance prediction711N/AN/Aaccuracy
MNIST [LeC98]Digit recognition10000550005000accuracy
ObjectNet [BMA+19]Visual recognition18574N/AN/Aaccuracy*
Oxford Flowers-102 [NZ08]Flower recognition61491836204mean per class
Oxford-IIIT Pet [PVZ+12; ZPK+19]Pet classification36693312368mean per class
Pascal VOC 2007 [EVW+10]Object recognition14976140961566accuracy
PatchCamelyon [VLW+18; ZPK+19]Metastatic tissue cls.327682899125000accuracy
Rendered SST2 [ZPK+19]Sentiment classification18217013779accuracy
RESISC45 [CHL17; ZPK+19]Satellite imagery recognition6300226802520accuracy
Stanford Cars [KSD+13]Vehicle recognition80417329814accuracy
STL-10 [CNL11]Visual recognition80004500500accuracy
SUN-397 [XHE+10]Scene recognition108753N/AN/Aaccuracy
SVHN [NWC+11; ZPK+19]Digit recognition26032682575000accuracy
iWildCam [BAC+21; KSM+20]Animal recognition427911470845000macro F1 score
Camelyon17 [BGM+18; KSM+20]Metastatic tissue cls.850543659005000accuracy
FMoW [CFW+18; KSM+20]Satellite imagery recognition221081032615000worst-region acc.
Dollar Street [RDK+22]Object recognition3503138421537worst-income top-5 acc.
GeoDE [RLZ+24]Object recognition12438444884943worst-region acc.
Flickr30k [YLH+14]Image and text retrieval31014N/AN/AR@1§
MSCOCO [LMB+14]Image and text retrieval5000N/AN/AR@1§
WinoGAViL [BBY+22]Commonsense association3563N/AN/AJaccard score§
", + "image_path": "bbd757943fe4c1860ea2472611269a98d62befd2854367d7ed52cbad920bad35.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 682, + 423, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 682, + 423, + 693 + ], + "spans": [ + { + "bbox": [ + 80, + 682, + 423, + 693 + ], + "type": "text", + "content": "*No train or val set exists for this dataset, so we were unable to create disjoint target and val sets." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 81, + 693, + 307, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 693, + 307, + 700 + ], + "spans": [ + { + "bbox": [ + 81, + 693, + 307, + 700 + ], + "type": "text", + "content": "We were unable to use this dataset due to technical difficulties." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 81, + 702, + 539, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 702, + 539, + 712 + ], + "spans": [ + { + "bbox": [ + 81, + 702, + 539, + 712 + ], + "type": "text", + "content": "Both the train and val sets were used by DataComp to make their test set, so we were unable to create disjoint target and val sets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 81, + 712, + 257, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 712, + 257, + 722 + ], + "spans": [ + { + "bbox": [ + 81, + 712, + 257, + 722 + ], + "type": "inline_equation", + "content": "{}^{S}" + }, + { + "bbox": [ + 81, + 712, + 257, + 722 + ], + "type": "text", + "content": " Retrieval tasks were not used for metagradients." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 69, + 218, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 69, + 218, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 69, + 218, + 87 + ], + "type": "text", + "content": "D Selecting IFT data" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 95, + 541, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 95, + 541, + 120 + ], + "spans": [ + { + "bbox": [ + 67, + 95, + 541, + 120 + ], + "type": "text", + "content": "In this section, we describe the details of the IFT setting of Xia et al. [XMG+24], as well as the details of our method." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 133, + 541, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 541, + 196 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 541, + 196 + ], + "type": "text", + "content": "Setting. The setting contains a fixed data pool: instruction fine-tuning data from a data pool consisting of four combined IFT datasets (cf. Table 4 and Xia et al. [XMG+24] for more information). The goal is to select the data that yields the best possible task performance for a LoRA fine-tuning run. We adapt a LoRA to a Gemma-2B model (the pretraining-only Gemma-2B model) using the LoRA configuration from Xia et al. [XMG+24]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 209, + 541, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 209, + 541, + 259 + ], + "spans": [ + { + "bbox": [ + 67, + 209, + 541, + 259 + ], + "type": "text", + "content": "Data splits. See Table 3 for a description of the available data for each task, along with the task setup details. Xia et al. [XMG+24] constructed these extra samples by drawing from the ICL samples given in the tasks originally. Note that we drop TydiQA from the original work of Xia et al. [XMG+24] as there are not enough samples to select with (there is only one from each category, for a total of 7)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 272, + 541, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 272, + 541, + 368 + ], + "spans": [ + { + "bbox": [ + 67, + 272, + 541, + 368 + ], + "type": "text", + "content": "Method. We execute Algorithm 1 with " + }, + { + "bbox": [ + 67, + 272, + 541, + 368 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 272, + 541, + 368 + ], + "type": "text", + "content": " as 150 steps from the end of training and the Bernoulli parameter " + }, + { + "bbox": [ + 67, + 272, + 541, + 368 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 67, + 272, + 541, + 368 + ], + "type": "text", + "content": " controlling the step size as 0.2. At each step, we choose a \"minibatch\" with a size equal to half the target set and a quarter of the target set for BBH and MMLU, respectively (that is, we only select to optimize performance on a fraction of the target set at a time). We model select over iterates and hyperparameters by (a) choosing the top three steps in terms of validation loss for each run (b) selecting the best one in terms of full train set accuracy (including the part that we trained on). We perform this procedure—akin to Pareto optimization [JS08]—because the validation set is so small (as the overall set of samples is very small) that it is difficult to select models without overfitting otherwise." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 368, + 541, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 368, + 541, + 403 + ], + "spans": [ + { + "bbox": [ + 67, + 368, + 541, + 403 + ], + "type": "text", + "content": "We compare with two baselines: training on the full dataset (i.e., training on the entirety of all the data for a single epoch), and LESS (we use the data selected according to \"LESS-T\" [XMG+24], following the recommendation of 4 epochs)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": "For model training, we train with ADAM (" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.95" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.975" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": ", decoupled weight decay as " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "10^{-5}" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": ") and a one-cycle linear schedule starting at " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "10^{-6}" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": " of the maximum learning rate, reaching the peak over " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": " of training, then ending at 0.1 of the maximum learning rate. We insert a positive " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}}" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": " into the inverse square root term in the ADAM update to prevent metagradient (and to a lesser extent update) blowup (see Eq. 12). The model training is the same across selected data, except that we use " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}} = 10^{-7}" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": " for MGD-selected data and " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}} = 10^{-9}" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": " for the other runs (we select the optimal parameter for each class of method). We additionally hyperparameter select for the best learning rate across each baseline by minimizing validation set loss; LESS performs best with a smaller learning rate (0.00024 for BBH and 0.00012 for MMLU) than training on the full dataset or with MGD (0.0006 for both). We normalize the loss of each training sample by taking the mean across predicted tokens during training, and do not divide by the batch size (important for scaling the " + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}}" + }, + { + "bbox": [ + 67, + 403, + 541, + 536 + ], + "type": "text", + "content": " term, but otherwise ADAM is invariant to the scale)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "spans": [ + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "text", + "content": "Selecting smooth model training for MGD. For MGD runs, we jointly select learning rate and " + }, + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}}" + }, + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "text", + "content": " using the smoothness metric of Section 3. We find that the choice of " + }, + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{root}}" + }, + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "text", + "content": " term is important (just as the choice of " + }, + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 67, + 550, + 541, + 612 + ], + "type": "text", + "content": " is important in standard ADAM training); choosing a much larger term results in non-smooth training. We also find that metagradients are sensitive to learning rate schedule; choosing a much larger or smaller maximum learning rate results in non-smooth training." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 94, + 643, + 517, + 695 + ], + "blocks": [ + { + "bbox": [ + 113, + 621, + 496, + 635 + ], + "lines": [ + { + "bbox": [ + 113, + 621, + 496, + 635 + ], + "spans": [ + { + "bbox": [ + 113, + 621, + 496, + 635 + ], + "type": "text", + "content": "Table 3: Overview of datasets used in IFT dataset selection (from Xia et al. [XMG+24])." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 94, + 643, + 517, + 695 + ], + "lines": [ + { + "bbox": [ + 94, + 643, + 517, + 695 + ], + "spans": [ + { + "bbox": [ + 94, + 643, + 517, + 695 + ], + "type": "table", + "html": "
Dataset# Shot# Tasksn_targetn_valn_testAnswer TypeType of Task
MMLU5575722818,721Letter optionsKnowledge/Recall
BBH3232346920COT and answerReasoning
", + "image_path": "0ee01eb4fa01b3734087808afd61cf8ee91b915d35fec2c214b7e2bfd450099d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 91, + 582, + 167 + ], + "blocks": [ + { + "bbox": [ + 216, + 71, + 394, + 84 + ], + "lines": [ + { + "bbox": [ + 216, + 71, + 394, + 84 + ], + "spans": [ + { + "bbox": [ + 216, + 71, + 394, + 84 + ], + "type": "text", + "content": "Table 4: Details of IFT training datasets." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 91, + 582, + 167 + ], + "lines": [ + { + "bbox": [ + 70, + 91, + 582, + 167 + ], + "spans": [ + { + "bbox": [ + 70, + 91, + 582, + 167 + ], + "type": "table", + "html": "
Dataset# InstanceSourced fromPrompt Len.Completion Len.
FLAN V2100,000NLP datasets and human-written instructions355.731.2
CoT100,000NLP datasets and human-written CoTs26653.2
Dolly15,011Human-written from scratch118.191.3
Open Assistant 155,668Human-written from scratch34.8212.5
", + "image_path": "ff814a44df13818d805fb28d14ad88facd4bb1f5a2e442555905552236576ed4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 186, + 122, + 197 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 186, + 122, + 197 + ], + "spans": [ + { + "bbox": [ + 69, + 186, + 122, + 197 + ], + "type": "text", + "content": "IFT results" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 85, + 263, + 301, + 412 + ], + "blocks": [ + { + "bbox": [ + 85, + 263, + 301, + 412 + ], + "lines": [ + { + "bbox": [ + 85, + 263, + 301, + 412 + ], + "spans": [ + { + "bbox": [ + 85, + 263, + 301, + 412 + ], + "type": "image", + "image_path": "ab6a03663afe6a2c4a405104df1bd0065c08dd9c6f6cb7f26c3a6a34d3c3783e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 261, + 523, + 411 + ], + "blocks": [ + { + "bbox": [ + 67, + 205, + 541, + 255 + ], + "lines": [ + { + "bbox": [ + 67, + 205, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 67, + 205, + 541, + 255 + ], + "type": "text", + "content": "Figure 13: MGD dataset selection improves the validation loss over metagradient steps, demonstrating our method's efficacy. However, the gap between loss on samples MGD directly optimizes on and the validation samples widens over the number of iterates, and there is overfitting depending on the number of steps taken." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 261, + 523, + 411 + ], + "lines": [ + { + "bbox": [ + 307, + 261, + 523, + 411 + ], + "spans": [ + { + "bbox": [ + 307, + 261, + 523, + 411 + ], + "type": "image", + "image_path": "36df9944129f621ff0e1ecdf764695eb6b36f0e8d3eaba57ccb6745917a7a037.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 750 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 70, + 329, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 70, + 329, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 329, + 87 + ], + "type": "text", + "content": "E Accuracy-degrading data poisoning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 95, + 334, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 95, + 334, + 111 + ], + "spans": [ + { + "bbox": [ + 69, + 95, + 334, + 111 + ], + "type": "text", + "content": "E.1 Background on Gradient Cancelling attack" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 116, + 541, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 116, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 68, + 116, + 541, + 140 + ], + "type": "text", + "content": "We briefly review the Gradient Cancelling attack [LKY23] used as a baseline in our experiments. We refer the reader to the original paper for details. Here we highlight the key ideas." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 140, + 541, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 140, + 541, + 166 + ], + "spans": [ + { + "bbox": [ + 68, + 140, + 541, + 166 + ], + "type": "text", + "content": "At a high level: Gradient Cancelling (GC) explicitly aims at making a specific malicious parameter configuration reachable through retraining on the poisoned dataset. The attack operates in two phases:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 174, + 541, + 291 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 81, + 174, + 541, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 174, + 541, + 223 + ], + "spans": [ + { + "bbox": [ + 81, + 174, + 541, + 223 + ], + "type": "text", + "content": "1. Parameter Generation: The attacker generates a target malicious model parameter independently, often using a direct parameter corruption method like Gradient-based Parameter Corruption (GradPC) [LKY23]. The end result of this phase is a target model parameter " + }, + { + "bbox": [ + 81, + 174, + 541, + 223 + ], + "type": "inline_equation", + "content": "\\theta_{p}" + }, + { + "bbox": [ + 81, + 174, + 541, + 223 + ], + "type": "text", + "content": " that achieves low accuracy on the test set, but is close to the original parameter " + }, + { + "bbox": [ + 81, + 174, + 541, + 223 + ], + "type": "inline_equation", + "content": "\\theta_0" + }, + { + "bbox": [ + 81, + 174, + 541, + 223 + ], + "type": "text", + "content": " derived from training on the clean dataset." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 80, + 229, + 541, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 229, + 541, + 291 + ], + "spans": [ + { + "bbox": [ + 80, + 229, + 541, + 291 + ], + "type": "text", + "content": "2. Poison Data Crafting: In the second phase, GC finds values of the poison data that induce a near-zero gradient at the target parameter " + }, + { + "bbox": [ + 80, + 229, + 541, + 291 + ], + "type": "inline_equation", + "content": "\\theta_{p}" + }, + { + "bbox": [ + 80, + 229, + 541, + 291 + ], + "type": "text", + "content": ". This is achieved by solving a gradient cancellation optimization problem: specifically, GC minimizes the total gradient of the loss function (with respect to the model parameters) evaluated over the combined (clean and poisoned) dataset, aiming to ensure that the gradient at the malicious parameter " + }, + { + "bbox": [ + 80, + 229, + 541, + 291 + ], + "type": "inline_equation", + "content": "\\theta_{p}" + }, + { + "bbox": [ + 80, + 229, + 541, + 291 + ], + "type": "text", + "content": " approaches zero." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 305, + 265, + 321 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 305, + 265, + 321 + ], + "spans": [ + { + "bbox": [ + 69, + 305, + 265, + 321 + ], + "type": "text", + "content": "E.2 Metasmooth hyperparameters" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 189, + 371, + 421, + 650 + ], + "blocks": [ + { + "bbox": [ + 68, + 337, + 541, + 363 + ], + "lines": [ + { + "bbox": [ + 68, + 337, + 541, + 363 + ], + "spans": [ + { + "bbox": [ + 68, + 337, + 541, + 363 + ], + "type": "text", + "content": "Table 5: Hyperparameters used in the ResNet-9 [Jor24] CIFAR-10 poisoning experiments. The augmentations used are normalization, random horizontal flip, and random translate (2 pixels)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 189, + 371, + 421, + 650 + ], + "lines": [ + { + "bbox": [ + 189, + 371, + 421, + 650 + ], + "spans": [ + { + "bbox": [ + 189, + 371, + 421, + 650 + ], + "type": "table", + "html": "
HyperparameterValue
Learning rate0.5
β10.85
Weight decay10-5
Exclude BatchNormTrue
OptimizerSGD
Batch size250
Epochs18
Starting learning rate fraction0.5
Relative min. learning rate10000
Scheduler max. iterations50000
Nesterov momentumTrue
BatchNorm ε10-5
BatchNorm momentum0.5
Final biasTrue
Width multiplier2.0
Final scale0.125
Initial scale2.0
Batchnorm locationBefore activation
Activation functionGELU
Pooling typeAverage
Test-time augmentationTrue
", + "image_path": "b60c76d8a268a432841f42e53cfdbdc3bf20196ce9a9c75b40fd38fb39467449.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 69, + 202, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 69, + 202, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 69, + 202, + 87 + ], + "type": "text", + "content": "F LR optimization" + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 138, + 128, + 471, + 203 + ], + "blocks": [ + { + "bbox": [ + 92, + 106, + 517, + 120 + ], + "lines": [ + { + "bbox": [ + 92, + 106, + 517, + 120 + ], + "spans": [ + { + "bbox": [ + 92, + 106, + 517, + 120 + ], + "type": "text", + "content": "Table 6: The grid search was run over all 528 combinations of the hyperparameter values below." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 128, + 471, + 203 + ], + "lines": [ + { + "bbox": [ + 138, + 128, + 471, + 203 + ], + "spans": [ + { + "bbox": [ + 138, + 128, + 471, + 203 + ], + "type": "table", + "html": "
ParameterValues
Peak learning rate[7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0]
Initial LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
Final LR multiplier[0.05, 0.15, 0.25, 0.35, 0.45, 0.55]
LR peak time[0.25, 0.5, 0.75]
", + "image_path": "526431798295effe5a381666eea095e355d33e12f099452c4e2cca80fa48e073.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 147, + 245, + 465, + 430 + ], + "blocks": [ + { + "bbox": [ + 274, + 226, + 323, + 239 + ], + "lines": [ + { + "bbox": [ + 274, + 226, + 323, + 239 + ], + "spans": [ + { + "bbox": [ + 274, + 226, + 323, + 239 + ], + "type": "text", + "content": "MGD step" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 147, + 245, + 465, + 430 + ], + "lines": [ + { + "bbox": [ + 147, + 245, + 465, + 430 + ], + "spans": [ + { + "bbox": [ + 147, + 245, + 465, + 430 + ], + "type": "image", + "image_path": "fec6b010bfcae93afb53b0c60c137838b889e707f0941f8e9d65d3df201207b2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 199, + 441, + 411, + 454 + ], + "lines": [ + { + "bbox": [ + 199, + 441, + 411, + 454 + ], + "spans": [ + { + "bbox": [ + 199, + 441, + 411, + 454 + ], + "type": "text", + "content": "Figure 14: Graphs of our learned LR schedules." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_content_list.json b/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..fb0050d0d8c1618cf3a225bf644f3fee8c56cf82 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_content_list.json @@ -0,0 +1,1429 @@ +[ + { + "type": "text", + "text": "Empowering GraphRAG with Knowledge Filtering and Integration", + "text_level": 1, + "bbox": [ + 147, + 89, + 850, + 112 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kai Guo $^{1}$ , Harry Shomer $^{1}$ , Shenglai Zeng $^{1}$ , Haoyu Han $^{1}$ , Yu Wang $^{2}$ , Jiliang Tang $^{1}$", + "bbox": [ + 144, + 137, + 850, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Michigan State University $^{2}$ University of Oregon", + "bbox": [ + 280, + 156, + 712, + 173 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{guokai1, shomerha, zengshe1, hanhaoy1, tangjili} @msu.edu,", + "bbox": [ + 245, + 173, + 751, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{yuwang} @uoregon.edu", + "bbox": [ + 396, + 190, + 600, + 206 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, large language models (LLMs) have revolutionized the field of natural language processing. However, they often suffer from knowledge gaps and hallucinations. Graph retrieval-augmented generation (GraphRAG) enhances LLM reasoning by integrating structured knowledge from external graphs. However, we identify two key challenges that plague GraphRAG: (1) Retrieving noisy and irrelevant information can degrade performance and (2) Excessive reliance on external knowledge suppresses the model's intrinsic reasoning. To address these issues, we propose GraphRAG-FI (Filtering & Integration), consisting of GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering employs a two-stage filtering mechanism to refine retrieved information. GraphRAG-Integration employs a logits-based selection strategy to balance external knowledge from GraphRAG with the LLM's intrinsic reasoning, reducing over-reliance on retrievals. Experiments on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning performance across multiple backbone models, establishing a more reliable and effective GraphRAG framework.", + "bbox": [ + 144, + 281, + 460, + 665 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 671, + 260, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) have achieved remarkable success in NLP tasks, particularly in tasks that require complex reasoning (Havrilla et al.; Wu et al., 2023; Hao et al., 2023). However, despite their strengths, LLMs are prone to hallucinations, resulting in incorrect or poor reasoning (Ji et al., 2023; Huang et al., 2024; Sriramanan et al., 2025). GraphRAG techniques have emerged as a promising solution to this problem (Han et al., 2024; Zhang et al., 2025; He et al., 2025; Mavromatis and Karypis, 2024), by integrating relevant information from external graphs. Knowledge graphs, which store facts in the form of a graph, are commonly used for this problem. Specifically, relevant", + "bbox": [ + 112, + 696, + 490, + 921 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "facts (i.e., triples) or paths are extracted from the knowledge graph and used to enrich the context of the LLMs with structured and reliable information (Luo et al., 2024; Li et al., 2025; Ma et al., 2024). This approach has shown ability to improve the reasoning capabilities and reduce the presence of hallucinations in LLMs (Sun et al.; Li et al., 2025; Dong et al., 2024).", + "bbox": [ + 507, + 261, + 884, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To better assess the efficacy of GraphRAG, in Section 3 we conduct a preliminary study comparing its performance with an LLM-only model (i.e., LLM without GraphRAG). This comparison reveals both the advantages and limitations of GraphRAG. While GraphRAG improved reasoning accuracy by correcting some LLM errors, it also introduces some notable weaknesses. For example, incorporating external knowledge will sometimes cause questions that were originally answered correctly by the LLM to be misclassified. This highlights the dangers of retrieving irrelevant information. Furthermore, excessive retrieval compounds this issue by introducing both noise and redundant information, thus further hindering the reasoning process.", + "bbox": [ + 507, + 390, + 884, + 646 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Meanwhile, we find that LLM-only and GraphRAG can complement one another. Specifically, GraphRAG can enhance reasoning for those questions LLMs lack knowledge of; while excessive reliance on external information may cause the model to overlook internally known correct answers. These findings highlight two key limitations of existing GraphRAG methods. First, GraphRAG is highly susceptible to retrieving irrelevant or misleading information. Second, GraphRAG struggles to balance external retrieval with the LLM's internal knowledge, often missing parts of the answer that the LLM-only model can provide using its own knowledge.", + "bbox": [ + 507, + 648, + 884, + 872 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inspired by these findings, we propose a novel design that aims to address these issues. First, we aim to enhance the retrieval quality to better avoid", + "bbox": [ + 507, + 873, + 882, + 922 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13804v1 [cs.AI] 18 Mar 2025", + "bbox": [ + 21, + 310, + 60, + 725 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "retrieving irrelevant information. Second, we integrate GraphRAG with an LLM's intrinsic reasoning ability, thus only using GraphRAG when external knowledge is necessary. In particular, to mitigate the issue of retrieving irrelevant information, we introduce a two-stage filtering process. Furthermore, to mitigate GraphRAG from overrelying on retrieved information while underutilizing the LLM's inherent reasoning ability, we introduce a logits-based selection mechanism that dynamically integrates LLMs' standalone answers with GraphRAG's outputs. This approach ensures that the final response effectively balances external knowledge with the model's internal reasoning. The main contributions of our work are summarized as follows:", + "bbox": [ + 112, + 84, + 492, + 341 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We identify two key challenges in GraphRAG: (1) It is susceptible to errors by retrieving irrelevant or misleading information. (2) It overemphasizes the externally retrieved knowledge, at the expense of the intrinsic reasoning capabilities of LLMs.", + "- We introduce a novel approach that enhances GraphRAG by incorporating a two-stage filtering mechanism to refine the retrieved knowledge and dynamically integrate this knowledge with a LLMs' standalone reasoning capabilities.", + "- Extensive experiments on knowledge graph QA demonstrate the effectiveness of our method across multiple backbone models." + ], + "bbox": [ + 136, + 351, + 489, + 612 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related work", + "text_level": 1, + "bbox": [ + 112, + 623, + 265, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "GraphRAG. GraphRAG aims to address hallucinations and outdated knowledge in LLMs by incorporating additional information retrieved from external knowledge bases (Sun et al.; Li et al., 2025; Dong et al., 2024). G-R retriever (He et al., 2025) identifies relevant nodes and edges for a given query based on cosine similarity, and then constructs a subgraph to aid in the generation process. Similarly, RoG (Luo et al., 2024) introduces a planning-retrieval-reasoning framework, where it retrieves reasoning paths guided by a planning module and performs reasoning using these paths. On the other hand, GNN-RAG (Mavromatis and Karypis, 2024) leverages Graph Neural Networks (GNNs) (Kipf and Welling, 2016) to process the intricate graph structures within knowledge graphs, enabling effective retrieval. They also use", + "bbox": [ + 112, + 646, + 489, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "retrieval augmentation techniques to enhance diversity. However, the effectiveness of these methods is heavily dependent on the quality of the retrieved information, and their performance significantly declines when the retrieved graph data is either noisy or unrelated to the query (He et al., 2025).", + "bbox": [ + 507, + 84, + 884, + 180 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Filter Methods. Filtering attempts to only keep those pieces of retrieved information that are relevant to the given query (Gao et al., 2025). ChunkRAG (Singh et al., 2024) tries to improve RAG systems by assessing and filtering retrieved data at the chunk level, with each \"chunk\" representing a concise and coherent segment of a document. This method first applies semantic chunking to partition documents into meaningful sections. It then leverages LLM-based relevance scoring to evaluate how well each chunk aligns with the user query. Zeng et al. (2024b) thoroughly investigate LLM representation behaviors in relation to RAG, uncovering distinct patterns between positive and negative samples in the representation space. This distinction enables representation-based methods to achieve significantly better performance for certain tasks. Building on these insights, they introduce Rep-PCA, which employs representation classifiers for knowledge filtering. RoK (Wang et al., 2024) refines the reasoning paths within the subgraph by computing the average PageRank score for each path. Similarly, He et al. (2024) use PageRank to identify the most relevant entities.", + "bbox": [ + 507, + 181, + 884, + 567 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 Preliminary studies", + "text_level": 1, + "bbox": [ + 507, + 583, + 714, + 600 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To evaluate the effectiveness of GraphRAG, we compare the performance with and without retrieved external knowledge. Furthermore, we analyze the attention scores of the LLM to assess its ability to discern both the relevance and importance of the retrieved information. Lastly, we evaluate the performance of internal knowledge filtering.", + "bbox": [ + 507, + 611, + 882, + 724 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1 Experimental settings", + "text_level": 1, + "bbox": [ + 507, + 737, + 727, + 753 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we aim to study the importance of retrieving external information when using GraphRAG for knowledge graph QA. To do so, we report the QA performance when using: LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). For GraphRAG, we use RoG (Luo et al., 2024) and GNN-RAG (Mavromatis and Karypis, 2024). For the LLM-only experiments, we use the fine-tuned LLaMA 2-7B model, which is the same LLM used by RoG. The experiments are con", + "bbox": [ + 507, + 760, + 884, + 921 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/496e28bd577aa6e70383f3ab6939ca74c0779a5e7b7c2d509d916d5c878e76e6.jpg", + "image_caption": [ + "Figure 1: Category A includes cases where both GraphRAG and the LLM-only model are correct. Category B covers instances where GraphRAG outperforms the LLM-only model, while Category C includes cases where the LLM-only model performs better than GraphRAG. Category D represents cases where both models fail." + ], + "image_footnote": [], + "bbox": [ + 132, + 80, + 468, + 218 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "duced on two common datasets the WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018) datasets. In this study, we mainly use the F1 score to evaluate the performance.", + "bbox": [ + 112, + 351, + 487, + 417 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/318b8766a631ff1fd8611c77f5b5c3cc04ca1a589f8e03899978a762189be102.jpg", + "image_caption": [ + "Figure 2: The relationship between path number and average F1" + ], + "image_footnote": [], + "bbox": [ + 115, + 426, + 487, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 The Impact of GraphRAG", + "text_level": 1, + "bbox": [ + 112, + 643, + 369, + 658 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To understanding the effectiveness of GraphRAG, we compare prediction outcomes between LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). We categorize the results into four groups based on F1 scores, as shown in the Figure 1. Category A includes cases where both GraphRAG and the LLM-only model provide correct answers. Category B consists of instances where GraphRAG produces a more accurate answer than the LLM-only model. Category C includes cases where the LLM-only model outperforms GraphRAG. Finally, Category D represents instances where both GraphRAG and the LLM-only model fail to generate the correct answer. Figure 1 illustrates the key observations from our experiments. While GraphRAG enhances certain predictions, it also", + "bbox": [ + 112, + 663, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "introduces notable challenges that require further investigation.", + "bbox": [ + 507, + 84, + 880, + 116 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Positive Impact of GraphRAG GraphRAG can enhance the LLM's reasoning capabilities by correcting errors that the standalone model would typically commit. Notably, in the category B, $45.64\\%$ of previously incorrect responses were successfully rectified with the integration of GraphRAG. This highlights the advantage of leveraging structured knowledge graphs to boost LLM performance.", + "bbox": [ + 507, + 127, + 882, + 255 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Limited Impact of GraphRAG Category A contains those answers where both GraphRAG and LLM-only are correct. This show that GraphRAG can sometimes preserve the performance of a LLM when the LLM already possesses the correct knowledge. Conversely, category D, representing $9.03\\%$ of cases, corresponds to those cases where GraphRAG fails to enhance the model's accuracy. For this category, neither the standalone LLM nor GraphRAG are able to provide the correct answer. This pattern implies that GraphRAG does not always access or incorporate sufficiently informative or relevant knowledge.", + "bbox": [ + 507, + 266, + 884, + 475 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Negative Impact of GraphRAG A notable drawback of GraphRAG is that will occasionally degrade the performance of a standalone LLM. That is, it will sometimes lead to wrong predictions for queries that the standalone LLM originally got right. These instances are represented by category C and accounts for $16.89\\%$ of samples when evaluating via the F1 score. In these cases, GraphRAG misleads the model rather than improving it. This suggests that some of the retrieved information may be incorrect, noisy, or irrelevant, ultimately leading to poorer predictions. Therefore, in some cases, LLMs without GraphRAG outperform those with GraphRAG, because existing works have shown that LLMs tend to over-rely on external information (Ren et al., 2023; Tan et al., 2024; Wang et al., 2023; Ni et al., 2024; Zeng et al., 2024a). When retrieval is insufficient or the quality of retrieved knowledge is low, this reliance can degrade generation quality.", + "bbox": [ + 507, + 485, + 882, + 807 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3 The Impact of the Number of Retrieved Paths", + "text_level": 1, + "bbox": [ + 507, + 819, + 867, + 850 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Due to the structure of knowledge graphs, nodes with high degrees and numerous relational edges have a greater likelihood of yielding a large number of retrieved paths. In this subsection, we study", + "bbox": [ + 507, + 857, + 882, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the impact of the number of retrieved paths on performance. Figure 2 illustrates the relationship between the number of retrieved paths and the model's performance. Interestingly, as indicated by the smoothed line (blue), incorporating a moderate amount of retrieved information enhances performance. However, increasing the number of retrieved paths ultimately leads to a decline in performance. This trend (green line) suggests that retrieving too much information will introduce noise, making it harder for the model to use the correct and relevant knowledge for the task. This phenomenon thus highlights an important insight - more information does not necessarily indicate better performance. Instead, an overabundance of retrieved data can overwhelm the model with irrelevant details. This observation underscores the necessity for effective filtering mechanisms that can prioritize high-quality, relevant knowledge while discarding extraneous or misleading information.", + "bbox": [ + 112, + 84, + 492, + 407 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/5394ce95b061ee2998994297ff15b21d1f49085458e088ff35602ddff5e6c4a4.jpg", + "image_caption": [ + "3.4 Attention Reflects the Importance of Retrieved Information", + "Figure 3: Attention Scores for Retrieved Information With/Without Ground Truth" + ], + "image_footnote": [], + "bbox": [ + 124, + 468, + 478, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this subsection, we analyze the ability of the LLM to distinguish the importance of retrieved external knowledge. The attention scores of a LLM can provide a natural indicator of the relevance and significance of the retrieved knowledge (Yang et al., 2024; Ben-Artzy and Schwartz, 2024). The attention scores, derived from the model's internal mechanisms, effectively capture which pieces of information are most influential in reaching the final decision. Inspired by recent work (Chuang et al., 2023; Halawi et al., 2023), which suggests that attention scores in the middle layers are more effective. We examine the attention scores of the (middle + 2)-th layer in the LLM for each retrieved path. We obtain the attention scores for all retrieved paths and categorize them into two groups: (1) paths that", + "bbox": [ + 112, + 663, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "contain the ground truth and (2) paths that do not. We then compute the average attention score for each group and present the results in Figure 3. As demonstrated in Figure 3, there is a clear alignment between the attention scores and the ground truth labels, suggesting that these scores can be used to assess the relevance of retrieved information.", + "bbox": [ + 507, + 84, + 884, + 197 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This observation inspires a key insight: The attention scores highlight the most significant retrieved information, suggesting their potential use in filtering out noisy or irrelevant knowledge. Since retrieved information with lower attention scores contribute minimally to the final output, they can be pruned to streamline retrieval and enhance overall performance.", + "bbox": [ + 507, + 198, + 884, + 326 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.5 Internal Knowledge Filtering", + "text_level": 1, + "bbox": [ + 507, + 338, + 786, + 354 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Large language models (LLMs) generate responses that may contain both correct and incorrect information. To assess the reliability of these responses, we analyze the associated logits, which represent the model's confidence in its predictions. Typically, higher confidence correlates with correctness (Ma et al., 2025; Virk et al., 2024). Leveraging this property, we implement \"Internal Knowledge Filtering\", which uses the logits to help refine the answer selection. The logits of answer can be directly obtained from the LLM's output. Formally, let $A_{L}$ denote the sets of answer candidates from the LLM model. Furthermore, let it's corresponding logits after softmax function be given by $\\ell_{L}(a)$ . The filtering step is given by the following:", + "bbox": [ + 507, + 359, + 885, + 601 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nA _ {L} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {L} \\mid \\ell_ {L} (a) \\geq \\tau_ {L} \\right\\}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 612, + 882, + 632 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\tau_{L} = 1$ . This allows us to filter out the responses that the LLM has low-confidence in. The experimental results are shown in Table 1. We can clearly see that that leveraging logits to filter out low-confidence responses has a large positive effect on performance. In this way, we can reconsider intrinsic knowledge and apply this approach to GraphRAG to better balance internal and external knowledge base on logits.", + "bbox": [ + 507, + 644, + 885, + 789 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/8df0d0d841560df2b2c8fffc2d15d88d4b71b35a9f03a2f9f18a2b3fa59e808c.jpg", + "table_caption": [ + "Table 1: Impact of logits on LLM performance" + ], + "table_footnote": [], + "table_body": "
MethodsWebQSPCWQ
HitF1HitF1
LLM66.1549.9740.2734.17
LLM with Logits84.1776.7461.8358.19
", + "bbox": [ + 526, + 826, + 870, + 904 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.6 Discussions", + "text_level": 1, + "bbox": [ + 114, + 84, + 250, + 98 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this subsection, we summarize the key findings and discussions from our preliminary study. The performance issues observed in GraphRAG primarily arise from two key factors. (1) Noisy or Irrelevant Retrieval: Some retrieved paths contain irrelevant or misleading information. This negatively impacts the model's ability to properly answer the query. Furthermore, this noise can introduce conflicting or unnecessary information that hinders the decision-making process rather than improving it. (2) Lack of Consideration for LLM's Own Knowledge: GraphRAG does not always take into account the inherent reasoning ability of the LLM itself. In some cases, the retrieved information overrides the LLM's correct predictions, leading to performance degradation rather than enhancement. A more adaptive approach is needed to balance external knowledge retrieval with the model's internal knowledge.", + "bbox": [ + 112, + 105, + 489, + 413 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Method", + "text_level": 1, + "bbox": [ + 112, + 425, + 218, + 439 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Based on our analysis, we propose a new framework to address the identified challenges, guided by two key insights: (1) Filtering retrieved information: Given the tendency of GraphRAG to retrieve irrelevant or incorrect retrieved information, it is essential to refine the retrieved knowledge. (2) Properly leveraging the LLMs standalone capabilities: The LLM itself can often correctly answer some questions. It's thus necessary to effectively integrate and use the inherent reasoning ability of LLMs along with GraphRAG.", + "bbox": [ + 112, + 451, + 489, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "An overview of our framework GraphRAG-FI is given in Figure 4. It consists of two core components: GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering first refines the retrieved information by removing irrelevant or misleading knowledge. GraphRAG-Integration module balances the retrieved knowledge with the LLM's inherent reasoning ability, thereby mitigating the overuse of retrieved information that can negatively impact performance. In the following subsections, we will introduce each component of our framework in detail.", + "bbox": [ + 112, + 630, + 489, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 GraphRAG-Filtering", + "text_level": 1, + "bbox": [ + 112, + 834, + 327, + 850 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Let $P = \\{p_1, p_2, \\ldots, p_N\\}$ denote the set of $N$ retrieved paths or triplets, where each path $p_i$ is assigned an attention score $a_i$ . Then we design filtering via the following two stages.", + "bbox": [ + 112, + 856, + 487, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 1: Coarse Filtering using Attention: In the first stage, we perform a coarse filtering by retaining only those paths whose attention scores exceeds a threshold $\\tau$ . This is given formally by:", + "bbox": [ + 507, + 84, + 882, + 149 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP _ {\\text {c o a r s e}} = \\left\\{p _ {i} \\in P \\mid a _ {i} \\geq \\tau \\right\\}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 161, + 882, + 179 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 2: Fine Filtering via LLMs: After the initial coarse filtering, which significantly reduces the number of candidate paths, we perform a more precise evaluation with a LLM on the remaining subset. This two-stage filtering approach not only enhances the quality of the retrieved paths but also greatly reduces the overall cost by limiting the use of the LLM to only those paths deemed promising in the first stage. Let $f(p)$ represent the evaluation score provided by the LLM for a path $p$ , and let $\\tau'$ be the corresponding threshold. The final set of filtered paths is then given by:", + "bbox": [ + 507, + 191, + 884, + 385 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP _ {\\text {f i n a l}} = \\left\\{p \\in P _ {\\text {c o a r s e}} \\mid f (p) \\geq \\tau^ {\\prime} \\right\\}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 397, + 882, + 416 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $P_{\\mathrm{coarse}}$ is the set of paths that passed the coarse filtering stage, $\\tau^{\\prime}$ is not predefined but is determined by the LLM itself.", + "bbox": [ + 507, + 428, + 882, + 475 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Prompt Construction: After the two filtering stages, we incorporate the selected paths and query into the prompt to further guide the model's reasoning. The prompt contains the following two types of retrieved paths:", + "bbox": [ + 507, + 485, + 882, + 565 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- High Priority Paths: These are the final filtered paths given by $P_{\\mathrm{final}}$ , which are considered the most reliable.", + "- Additional Paths: We also consider the remaining paths included by the coarse filter but removed via the fine filter, $P_{\\mathrm{coarse}} - P_{\\mathrm{final}}$ . We conjecture that while they may not be as important as those paths in $P_{\\mathrm{final}}$ , they can still offer some useful supplementary context." + ], + "bbox": [ + 531, + 577, + 884, + 734 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The new prompt is then constructed by first inserting a header for the high-priority paths, followed by each path on a separate line. The same process is repeated for the additional paths. By structuring the prompt in this way, we are able to clearly delineate the paths by their priority. This ensures that the most critical information $(P_{\\mathrm{final}})$ is emphasized and processed first, while still incorporating the supplementary context from the additional paths. An example prompt is given in Appendix A.2.", + "bbox": [ + 507, + 744, + 884, + 921 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/175f4b8c514ad2758bb069413e6f2e98fab5a73ec12eebfcea3ab375c3e494a4.jpg", + "image_caption": [ + "Figure 4: An overview of the GraphRAG-FI framework." + ], + "image_footnote": [], + "bbox": [ + 115, + 80, + 884, + 305 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Integration with LLMs' Internal Knowledge", + "text_level": 1, + "bbox": [ + 112, + 354, + 416, + 387 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As noted in Section 3.2, in addition to ensuring we only retrieve high-quality information, we also want to retain internal knowledge of the LLMs. As such, we want to also integrate the capabilities of just the LLM into our framework. However, a challenge is knowing when to defer to which method. When do we trust the answers given by GraphRAG and when the standalone LLM? Furthermore, how do we fuse the answers given by both methods?", + "bbox": [ + 112, + 414, + 489, + 558 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To achieve this goal, we need a method to determine which answers produced by both LLM-only and GraphRAG are actually relevant. In Section 3.5, we found that the LLM's logits can provide a useful tool to refine the potential answers. That is, focusing only on those answers that are given a higher confidence is helpful. This naturally provides us with an easy way to focus on just the high-quality information. For both GraphRAG and the LLM-only model, we filter the answers based on their logits, ensuring that only high-confidence responses are retained. After this logits-based filtering, the refined answers from both sources are combined to produce the final answer, thereby enhancing robustness and accuracy.", + "bbox": [ + 112, + 571, + 489, + 813 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Formally, let $A_G$ and $A_L$ denote the sets of answer candidates from GraphRAG and the LLM-only model, respectively. We further use $a$ to indicate a single candidate answer in either set. Furthermore, let their corresponding logits after the softmax function be given by $\\ell_G(a)$ and $\\ell_L(a)$ . The", + "bbox": [ + 112, + 824, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "filtering step is given by the following:", + "bbox": [ + 509, + 354, + 800, + 370 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nA _ {G} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {G} \\mid \\ell_ {G} (a) \\geq \\tau_ {G} \\right\\}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 561, + 381, + 882, + 400 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nA _ {L} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {L} \\mid \\ell_ {L} (a) \\geq \\tau_ {L} \\right\\}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 561, + 401, + 882, + 422 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\tau_{G}$ and $\\tau_{L}$ are predefined thresholds, $\\tau_{L}$ is set to 1. Subsequently, the final answer is determined by combining the filtered sets:", + "bbox": [ + 507, + 434, + 882, + 482 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nA _ {\\text {f i n a l}} = \\operatorname {C o m b i n e} \\left(A _ {G} ^ {\\text {f i l t e r e d}}, A _ {L} ^ {\\text {f i l t e r e d}}\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 554, + 493, + 882, + 520 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\operatorname{Combine}(\\cdot)$ denotes the function that integrates the filtered answers into the final reliable output.", + "bbox": [ + 507, + 530, + 882, + 579 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Experiment", + "text_level": 1, + "bbox": [ + 507, + 590, + 648, + 607 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In our experiments, we seek to address the following research questions: RQ1: How effective is the proposed method when applied to state-of-the-art GraphRAG retrievers in the knowledge graph QA task? RQ2: How does the proposed method compare to other filtering approaches? RQ3: How does the performance change when more noisy information is introduced? and RQ4: What is the impact of the two modules on performance?", + "bbox": [ + 505, + 615, + 884, + 760 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Experiment Settings", + "text_level": 1, + "bbox": [ + 507, + 771, + 717, + 788 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. To assess the effectiveness of our method, we evaluate it on two widely recognized KGQA benchmark datasets: WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018). WebQSP contains 4,737 natural language questions that require reasoning over paths of up to two hops. In contrast, CWQ includes 34,699 more complex questions that necessitate multi-hop reasoning over", + "bbox": [ + 507, + 791, + 884, + 921 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/8fbeb86a47c75ef462e1cde623194a4293e71edd7e7770812d08d3233aab0bf8.jpg", + "table_caption": [ + "Table 2: Performance comparison with different baselines on the two KGQA datasets." + ], + "table_footnote": [], + "table_body": "
TypeMethodsWebQSPCWQ
HitF1HitF1
LLMsFlan-T5-xl(Chung et al., 2024)31.0-14.7-
Alpaca-7B(Taori et al., 2023)51.8-27.4-
LLaMA2-Chat-7B(Touvron et al., 2023)64.4-34.6-
ChatGPT66.8-39.9-
ChatGPT+CoT75.6-48.9-
LLMs+KGsROG86.7370.7561.9154.95
ROG + Similarity85.5069.3861.6254.38
ROG + PageRank85.4469.6061.3454.41
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-FI89.2573.8664.8255.12
GNN-RAG90.1173.2569.1060.55
GNN-RAG + Similarity89.6872.1768.5060.26
GNN-RAG + PageRank89.1871.9266.7558.73
GNN-RAG + GraphRAG-Filtering91.2874.7469.7060.96
GNN-RAG + GraphRAG-FI91.8975.9871.1260.34
SubgraphRAG76.9064.6553.8750.43
SubgraphRAG + Similarity72.7259.9852.0548.27
SubgraphRAG + PageRank61.7950.6546.7543.23
SubgraphRAG + GraphRAG-Filtering81.0168.4058.8254.71
SubgraphRAG + GraphRAG-FI81.0868.2858.9652.52
", + "bbox": [ + 159, + 107, + 836, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "up to four hops. Both datasets are built upon Freebase, which consists of around 88 million entities, 20 thousand relations, and 126 million triples. Further details on the datasets are provided in Appendix A.1.", + "bbox": [ + 112, + 502, + 489, + 583 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Retriever Backbones. Our framework adopts three existing retrieval methods as its backbone: path-based retrieval (ROG (Luo et al., 2024)), GNN (Mavromatis and Karypis, 2024)), and subgraph-based retrieval (SubgraphRAG (Li et al., 2025)). Path-based retrieval extracts relevant paths using heuristics or shortest-path algorithms, while GNN-based retrieval leverages a Graph Neural Network to learn and retrieve informative paths. In contrast, subgraph-based retrieval retrieves relevant subgraphs and encodes them as triples, enabling fine-grained relational reasoning. Therefore, both path-based and GNN-based methods generate paths as input for the LLM. Lastly, subgraph-based methods give triples (i.e., edges) as input to the LLM that take the form of $(h,r,t)$ . By considering these three methods, we are able to test our framework on a diverse set of retrieval methods.", + "bbox": [ + 112, + 590, + 489, + 879 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Filter Baselines. The most commonly used filtering methods for RAG are similarity-based ap", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "proaches used in (Gao et al., 2025). Similarity-based methods evaluate the relevance of retrieved information by measuring feature similarity. For retrieval over graphs, PageRank-based filtering is widely adopted (Wang et al., 2024). PageRank-based filtering leverages the graph structure to rank nodes based on their connectivity and importance. These methods provide a baseline filtering mechanism for refining the retrieved results.", + "bbox": [ + 507, + 502, + 884, + 646 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Implementation and Evaluation Metrics. We use LLaMA2-Chat-7B from ROG as the LLM backbone, which is instruction-finetuned on the training split of WebQSP and CWQ, as well as Freebase, for three epochs. For the similarity-based filter, we utilize SentenceTransformer ('all-MiniLM-L6-v2') to generate representations for retrieval. We evaluate our retrieval methods using both Hit Rate (Hit) and F1-score (F1). Hit Rate measures the proportion of relevant items successfully retrieved, reflecting retrieval effectiveness. F1-score balances precision and recall, providing a comprehensive assessment of retrieval quality. These metrics ensure a robust evaluation of retrieval performance. We adjust the thresholds $\\tau$ and $\\tau_{G}$ within the ranges [top 40, top 50] and [0.4, 0.5], respectively.", + "bbox": [ + 507, + 663, + 884, + 922 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 Main Results", + "text_level": 1, + "bbox": [ + 112, + 84, + 265, + 98 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we evaluate the performance of our method with various retrievers and compare it against baseline filter models.", + "bbox": [ + 112, + 105, + 487, + 151 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "RQ1: KGQA Performance Comparison. In this subsection, we apply our method to different retrievers, including the path-based retriever, GNN-based retriever, and subgraph-based retriever. The results presented in Table 2 demonstrate that our method consistently improves all retrievers, achieving an average improvement of $3.81\\%$ in Hit and $2.35\\%$ in F1 over ROG, $2.46\\%$ in Hit and $1.7\\%$ in F1 over GNN-RAG, and significant gains of $7.47\\%$ in Hit and $4.88\\%$ in F1 over SubgraphRAG across two datasets. These results demonstrate that our approach is effective across different retrieval paradigms, reinforcing its adaptability to various retrieval strategies in QA tasks.", + "bbox": [ + 112, + 154, + 487, + 376 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "RQ2: Comparison with other filter methods. We compare our method against other filtering baselines, with the results presented in Table 2. Our approach consistently outperforms competing methods across both datasets and retriever types. Specifically, for ROG, our method can achieve an average improvement of $4.78\\%$ in Hit and $3.95\\%$ in F1 compared to similarity-based filtering on both datasets. Furthermore, compared to the PageRank-based filtering method, our approach yields an average increase of $5.03\\%$ in Hit and $3.70\\%$ in F1 across both datasets. These results highlight the superiority of our method in enhancing retrieval effectiveness and overall performance.", + "bbox": [ + 112, + 379, + 489, + 602 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/126d7898f0d8ce0776605a29f6c763a81d49da74e920a4306526b091d6c2782d.jpg", + "table_caption": [ + "Table 3: Performance when adding more noise" + ], + "table_footnote": [], + "table_body": "
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG*85.8768.8160.4953.72
ROG* + GraphRAG-Filtering86.6173.0161.9155.67
", + "bbox": [ + 131, + 640, + 473, + 715 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 Robustness to Noise", + "text_level": 1, + "bbox": [ + 112, + 740, + 317, + 753 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this subsection, we evaluate robustness of different methods to noise. To evaluate the noise resistance of the backbone model and our filter method, we use GPT to generate 30 additional noise paths that contain both irrelevant and incorrect information. This information is then incorporated into the retrieved context. We then analyze the impact of this noise on performance. The experimental results presented in Table 3, ROG* represents the cases where noise is introduced. As the noise level", + "bbox": [ + 112, + 760, + 489, + 920 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "increases, the Hit score decreases by $2.29\\%$ , and the F1 score drops by $2.23\\%$ on the CWQ dataset, highlighting the model's sensitivity to noise. However, when applying our method, we observe a $2.23\\%$ improvement in Hit and a $3.63\\%$ improvement in F1 over $\\mathrm{ROG^{*}}$ on CWQ. These results demonstrate the effectiveness of our approach in mitigating the negative impact of noisy retrieval.", + "bbox": [ + 507, + 84, + 884, + 212 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4 Ablation Study", + "text_level": 1, + "bbox": [ + 507, + 223, + 673, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct an ablation study to analyze the effectiveness of the filtering module and integrating module in GraphRAG-FI. From the results in Table 4, we can see that GraphRAG-Filtering is useful for the ROG retriever, as it improves both the F1 and Hit scores. For example, GraphRAG-Filtering increases the F1 score by $4.19\\%$ and the Hit score by $3.15\\%$ on CWQ dataset. We also see a boost in performance for GraphRAG-Integration, with a $1.60\\%$ and $2.62\\%$ increase in F1 and Hit score, respectively, on WebQSP. These results demonstrate the effectiveness of our two components.", + "bbox": [ + 507, + 244, + 884, + 437 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b180c3b6429f6a2dae130d14350518b88d86c32cc66cbe1abb5b4d31d624d598.jpg", + "table_caption": [ + "Table 4: Ablation study." + ], + "table_footnote": [], + "table_body": "
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-Integration89.0071.8864.2555.19
ROG + GraphRAG-FI89.2573.8664.8255.12
", + "bbox": [ + 514, + 473, + 880, + 565 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 590, + 640, + 605 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we propose GraphRAG-FI (Filtering & Integration), an enhanced GraphRAG framework that addresses key challenges in graph retrieval-augmented generation. By incorporating GraphRAG-Filtering, which utilizes a two-stage filtering mechanism to refine retrieved information, and GraphRAG-Integration, which employs a logits-based selection strategy to balance retrieval and intrinsic reasoning, our approach mitigates the impact of noisy retrievals and excessive dependence on external knowledge. Experimental results on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning accuracy across multiple backbone models, establishing a more reliable and effective GraphRAG framework.", + "bbox": [ + 507, + 615, + 884, + 872 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 114, + 84, + 220, + 99 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we identify two key challenges in GraphRAG: (1) it is prone to errors due to the retrieval of irrelevant or misleading information, and (2) it places excessive emphasis on externally retrieved knowledge, which can diminish the intrinsic reasoning capabilities of LLMs. Future research will first explore a broader range of large language models to evaluate their effectiveness within GraphRAG. Additionally, further investigation into diverse filtering methods could enhance the refinement of retrieved information and reduce noise. More sophisticated fusion strategies may also be explored to dynamically balance external knowledge with the intrinsic reasoning of LLMs, enabling more effective information integration.", + "bbox": [ + 112, + 108, + 492, + 351 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 376, + 213, + 390 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Amit Ben-Artzy and Roy Schwartz. 2024. Attend first, consolidate later: On the importance of attention in different llm layers. arXiv preprint arXiv:2409.03621.", + "Yung-Sung Chuang, Yujia Xie, Hongyin Luo, Yoon Kim, James Glass, and Pengcheng He. 2023. Dola: Decoding by contrasting layers improves factuality in large language models. arXiv preprint arXiv:2309.03883.", + "Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2024. Scaling instruction-finetuned language models. Journal of Machine Learning Research, 25(70):1-53.", + "Jialin Dong, Bahare Fatemi, Bryan Perozzi, Lin F Yang, and Anton Tsitsulin. 2024. Don't forget to connect! improving rag with graph-based reranking. arXiv preprint arXiv:2405.18414.", + "Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. Frag: A flexible modular framework for retrieval-augmented generation based on knowledge graphs. arXiv preprint arXiv:2501.09957.", + "Danny Halawi, Jean-Stanislas Denain, and Jacob Steinhardt. 2023. Overthinking the truth: Understanding how language models process false demonstrations. arXiv preprint arXiv:2307.09476.", + "Haoyu Han, Yu Wang, Harry Shomer, Kai Guo, Jiayuan Ding, Yongjia Lei, Mahantesh Halappanavar, Ryan A Rossi, Subhabrata Mukherjee, Xianfeng Tang, et al. 2024. Retrieval-augmented generation with graphs (graphrag). arXiv preprint arXiv:2501.00309.", + "Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. 2023. Reasoning with language model is planning with world" + ], + "bbox": [ + 115, + 398, + 489, + 920 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "model. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173.", + "Alexander Havrilla, Sharath Chandra Rarparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. Gore: When, where, and how to improve llm reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*.", + "Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2025. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. Advances in Neural Information Processing Systems, 37:132876-132907.", + "Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh V Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2024. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. arXiv preprint arXiv:2402.07630.", + "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2024. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems.", + "Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. 2023. Towards mitigating llm hallucination via self reflection. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 1827-1843.", + "Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907.", + "Mufei Li, Siqi Miao, and Pan Li. 2025. Simple is effective: The roles of graphs and large language models in knowledge-graph-based retrieval-augmented generation. In International Conference on Learning Representations.", + "Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In International Conference on Learning Representations.", + "Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. 2025. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290.", + "Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, and Jian Guo. 2024. Think-on-graph 2.0: Deep and interpretable large language model reasoning with knowledge graph-guided retrieval. arXiv e-prints, pages arXiv-2407.", + "Costas Mavromatis and George Karypis. 2024. Gnrag: Graph neural retrieval for large language model reasoning. arXiv preprint arXiv:2405.20139." + ], + "bbox": [ + 510, + 85, + 884, + 920 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shiyu Ni, Keping Bi, Jiafeng Guo, and Xueqi Cheng. 2024. When do llms need retrieval augmentation? mitigating llms' overconfidence helps retrieval augmentation. arXiv preprint arXiv:2402.11457.", + "Ruiyang Ren, Yuhao Wang, Yingqi Qu, Wayne Xin Zhao, Jing Liu, Hao Tian, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2023. Investigating the factual knowledge boundary of large language models with retrieval augmentation. arXiv preprint arXiv:2307.11019.", + "Ishneet Sukhvinder Singh, Ritvik Aggarwal, Ibrahim Allahverdiyev, Muhammad Taha, Aslihan Akalin, Kevin Zhu, and Sean O'Brien. 2024. Chunkrag: Novel lmm-chunk filtering method for rag systems. arXiv preprint arXiv:2410.19572.", + "Gaurang Sriramanan, Siddhant Bharti, Vinu Sankar Sadasivan, Shoumik Saha, Priyatham Kattakinda, and Soheil Feizi. 2025. Llm-check: Investigating detection of hallucinations in large language models. Advances in Neural Information Processing Systems, 37:34188-34216.", + "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel Ni, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations.", + "Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643.", + "Hexiang Tan, Fei Sun, Wanli Yang, Yuanzhuo Wang, Qi Cao, and Xueqi Cheng. 2024. Blinded by generated contexts: How language models merge generated and retrieved contexts for open-domain qa? arXiv preprint arXiv:2401.11911.", + "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. 2023. Stanford alpaca: An instruction-following llama model.", + "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.", + "Yuvraj Virk, Premkumar Devanbu, and Toufique Ahmed. 2024. Enhancing trust in llm-generated code summaries with calibrated confidence scores. arXiv preprint arXiv:2404.19318.", + "Yile Wang, Peng Li, Maosong Sun, and Yang Liu. 2023. Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002." + ], + "bbox": [ + 115, + 85, + 489, + 919 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuqi Wang, Boran Jiang, Yi Luo, Dawei He, Peng Cheng, and Liangcai Gao. 2024. Reasoning on efficient knowledge paths: Knowledge graph guides large language model for domain question answering. arXiv preprint arXiv:2404.10384.", + "Xiaoqian Wu, Yong-Lu Li, Jianhua Sun, and Cewu Lu. 2023. Symbol-llm: leverage language models for symbolic system in visual human activity reasoning. Advances in Neural Information Processing Systems, 36:29680-29691.", + "Lijie Yang, Zhihao Zhang, Zhuofu Chen,zikun Li, and Zhihao Jia. 2024. Tidaldecode: Fast and accurate llm decoding with position persistent sparse attention. arXiv preprint arXiv:2410.05076.", + "Wen-tau Yih, Matthew Richardson, Christopher Meek, Ming-Wei Chang, and Jina Suh. 2016. The value of semantic parse labeling for knowledge base question answering. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 201-206.", + "Shenglai Zeng, Jiankun Zhang, Pengfei He, Yue Xing, Yiding Liu, Han Xu, Jie Ren, Shuaiqiang Wang, Dawei Yin, Yi Chang, et al. 2024a. The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag). arXiv preprint arXiv:2402.16893.", + "Shenglai Zeng, Jiankun Zhang, Bingheng Li, Yuping Lin, Tianqi Zheng, Dante Everaert, Hanqing Lu, Hui Liu, Yue Xing, Monica Xiao Cheng, et al. 2024b. Towards knowledge checking in retrieval-augmented generation: A representation perspective. arXiv preprint arXiv:2411.14572.", + "Qinggang Zhang, Shengyuan Chen, Yuanchen Bei, Zheng Yuan, Huachi Zhou, Zijin Hong, Junnan Dong, Hao Chen, Yi Chang, and Xiao Huang. 2025. A survey of graph retrieval-augmented generation for customized large language models. arXiv preprint arXiv:2501.13958." + ], + "bbox": [ + 510, + 85, + 880, + 631 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/167a42a5baf1a000a36b149db67dd2f9386cc0c431471e8215a23ca01506e714.jpg", + "table_caption": [ + "Table 5: Statistics of datasets." + ], + "table_footnote": [], + "table_body": "
Datasets#Train#TestMax #hop
WebQSP2,8261,6282
CWQ27,6393,5314
", + "bbox": [ + 141, + 107, + 460, + 160 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A Appendix", + "text_level": 1, + "bbox": [ + 114, + 183, + 236, + 200 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.1 Datasets", + "text_level": 1, + "bbox": [ + 114, + 209, + 231, + 222 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We utilize two benchmark KGQA datasets, WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018), as proposed in previous studies. Following ROG, we maintain the same training and testing splits. The dataset statistics are provided in Table 5.", + "bbox": [ + 112, + 229, + 487, + 324 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.2 Prompt Example", + "text_level": 1, + "bbox": [ + 114, + 336, + 297, + 351 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Prompts", + "text_level": 1, + "bbox": [ + 142, + 525, + 213, + 539 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Based on the reasoning paths, please answer the given question. Please keep the answer as simple as possible and return all the possible answers as a list.", + "bbox": [ + 141, + 551, + 460, + 614 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Reasoning Paths:", + "text_level": 1, + "bbox": [ + 142, + 615, + 278, + 630 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "High Priority Paths:", + "bbox": [ + 147, + 632, + 295, + 646 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Northern Colorado Bears football $\\rightarrow$ education.educational_institution.sports_teams $\\rightarrow$ University of Northern Colorado", + "bbox": [ + 142, + 648, + 460, + 694 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Additional Paths:", + "bbox": [ + 147, + 696, + 277, + 709 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Northern Colorado Bears football $\\rightarrow$ education.educational_institution.sports_teams $\\rightarrow$ University of Northern Colorado", + "bbox": [ + 142, + 712, + 460, + 758 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Greeley $\\rightarrow$ location.location.containedby", + "bbox": [ + 142, + 760, + 458, + 775 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "→ United States of America", + "bbox": [ + 142, + 777, + 352, + 789 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Greeley $\\rightarrow$ location.location.containedby $\\rightarrow$ Greeley Masonic Temple", + "bbox": [ + 142, + 793, + 458, + 822 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Question: What educational institution has a football sports team named Northern Colorado Bears is in Greeley, Colorado?", + "bbox": [ + 142, + 825, + 460, + 871 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Figure 5: An Example of Our Prompt", + "bbox": [ + 171, + 902, + 428, + 917 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_model.json b/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_model.json new file mode 100644 index 0000000000000000000000000000000000000000..62734dbda2e5d3775f16cd436ebca84d89dbef6f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_model.json @@ -0,0 +1,1905 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.311, + 0.061, + 0.726 + ], + "angle": 270, + "content": "arXiv:2503.13804v1 [cs.AI] 18 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.149, + 0.09, + 0.851, + 0.113 + ], + "angle": 0, + "content": "Empowering GraphRAG with Knowledge Filtering and Integration" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.139, + 0.851, + 0.158 + ], + "angle": 0, + "content": "Kai Guo\\(^{1}\\), Harry Shomer\\(^{1}\\), Shenglai Zeng\\(^{1}\\), Haoyu Han\\(^{1}\\), Yu Wang\\(^{2}\\), Jiliang Tang\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.282, + 0.157, + 0.714, + 0.174 + ], + "angle": 0, + "content": "\\(^{1}\\)Michigan State University \\(^{2}\\) University of Oregon" + }, + { + "type": "text", + "bbox": [ + 0.247, + 0.174, + 0.752, + 0.19 + ], + "angle": 0, + "content": "{guokai1, shomerha, zengshe1, hanhaoy1, tangjili} @msu.edu," + }, + { + "type": "text", + "bbox": [ + 0.398, + 0.191, + 0.601, + 0.207 + ], + "angle": 0, + "content": "{yuwang} @uoregon.edu" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.277 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.282, + 0.461, + 0.666 + ], + "angle": 0, + "content": "In recent years, large language models (LLMs) have revolutionized the field of natural language processing. However, they often suffer from knowledge gaps and hallucinations. Graph retrieval-augmented generation (GraphRAG) enhances LLM reasoning by integrating structured knowledge from external graphs. However, we identify two key challenges that plague GraphRAG: (1) Retrieving noisy and irrelevant information can degrade performance and (2) Excessive reliance on external knowledge suppresses the model's intrinsic reasoning. To address these issues, we propose GraphRAG-FI (Filtering & Integration), consisting of GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering employs a two-stage filtering mechanism to refine retrieved information. GraphRAG-Integration employs a logits-based selection strategy to balance external knowledge from GraphRAG with the LLM's intrinsic reasoning, reducing over-reliance on retrievals. Experiments on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning performance across multiple backbone models, establishing a more reliable and effective GraphRAG framework." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.672, + 0.262, + 0.687 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.697, + 0.491, + 0.922 + ], + "angle": 0, + "content": "Large language models (LLMs) have achieved remarkable success in NLP tasks, particularly in tasks that require complex reasoning (Havrilla et al.; Wu et al., 2023; Hao et al., 2023). However, despite their strengths, LLMs are prone to hallucinations, resulting in incorrect or poor reasoning (Ji et al., 2023; Huang et al., 2024; Sriramanan et al., 2025). GraphRAG techniques have emerged as a promising solution to this problem (Han et al., 2024; Zhang et al., 2025; He et al., 2025; Mavromatis and Karypis, 2024), by integrating relevant information from external graphs. Knowledge graphs, which store facts in the form of a graph, are commonly used for this problem. Specifically, relevant" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.262, + 0.885, + 0.39 + ], + "angle": 0, + "content": "facts (i.e., triples) or paths are extracted from the knowledge graph and used to enrich the context of the LLMs with structured and reliable information (Luo et al., 2024; Li et al., 2025; Ma et al., 2024). This approach has shown ability to improve the reasoning capabilities and reduce the presence of hallucinations in LLMs (Sun et al.; Li et al., 2025; Dong et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.391, + 0.885, + 0.648 + ], + "angle": 0, + "content": "To better assess the efficacy of GraphRAG, in Section 3 we conduct a preliminary study comparing its performance with an LLM-only model (i.e., LLM without GraphRAG). This comparison reveals both the advantages and limitations of GraphRAG. While GraphRAG improved reasoning accuracy by correcting some LLM errors, it also introduces some notable weaknesses. For example, incorporating external knowledge will sometimes cause questions that were originally answered correctly by the LLM to be misclassified. This highlights the dangers of retrieving irrelevant information. Furthermore, excessive retrieval compounds this issue by introducing both noise and redundant information, thus further hindering the reasoning process." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.649, + 0.885, + 0.873 + ], + "angle": 0, + "content": "Meanwhile, we find that LLM-only and GraphRAG can complement one another. Specifically, GraphRAG can enhance reasoning for those questions LLMs lack knowledge of; while excessive reliance on external information may cause the model to overlook internally known correct answers. These findings highlight two key limitations of existing GraphRAG methods. First, GraphRAG is highly susceptible to retrieving irrelevant or misleading information. Second, GraphRAG struggles to balance external retrieval with the LLM's internal knowledge, often missing parts of the answer that the LLM-only model can provide using its own knowledge." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.874, + 0.884, + 0.923 + ], + "angle": 0, + "content": "Inspired by these findings, we propose a novel design that aims to address these issues. First, we aim to enhance the retrieval quality to better avoid" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.342 + ], + "angle": 0, + "content": "retrieving irrelevant information. Second, we integrate GraphRAG with an LLM's intrinsic reasoning ability, thus only using GraphRAG when external knowledge is necessary. In particular, to mitigate the issue of retrieving irrelevant information, we introduce a two-stage filtering process. Furthermore, to mitigate GraphRAG from overrelying on retrieved information while underutilizing the LLM's inherent reasoning ability, we introduce a logits-based selection mechanism that dynamically integrates LLMs' standalone answers with GraphRAG's outputs. This approach ensures that the final response effectively balances external knowledge with the model's internal reasoning. The main contributions of our work are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.353, + 0.49, + 0.449 + ], + "angle": 0, + "content": "- We identify two key challenges in GraphRAG: (1) It is susceptible to errors by retrieving irrelevant or misleading information. (2) It overemphasizes the externally retrieved knowledge, at the expense of the intrinsic reasoning capabilities of LLMs." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.459, + 0.49, + 0.556 + ], + "angle": 0, + "content": "- We introduce a novel approach that enhances GraphRAG by incorporating a two-stage filtering mechanism to refine the retrieved knowledge and dynamically integrate this knowledge with a LLMs' standalone reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.566, + 0.488, + 0.613 + ], + "angle": 0, + "content": "- Extensive experiments on knowledge graph QA demonstrate the effectiveness of our method across multiple backbone models." + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.353, + 0.49, + 0.613 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.624, + 0.266, + 0.639 + ], + "angle": 0, + "content": "2 Related work" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.648, + 0.49, + 0.922 + ], + "angle": 0, + "content": "GraphRAG. GraphRAG aims to address hallucinations and outdated knowledge in LLMs by incorporating additional information retrieved from external knowledge bases (Sun et al.; Li et al., 2025; Dong et al., 2024). G-R retriever (He et al., 2025) identifies relevant nodes and edges for a given query based on cosine similarity, and then constructs a subgraph to aid in the generation process. Similarly, RoG (Luo et al., 2024) introduces a planning-retrieval-reasoning framework, where it retrieves reasoning paths guided by a planning module and performs reasoning using these paths. On the other hand, GNN-RAG (Mavromatis and Karypis, 2024) leverages Graph Neural Networks (GNNs) (Kipf and Welling, 2016) to process the intricate graph structures within knowledge graphs, enabling effective retrieval. They also use" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.181 + ], + "angle": 0, + "content": "retrieval augmentation techniques to enhance diversity. However, the effectiveness of these methods is heavily dependent on the quality of the retrieved information, and their performance significantly declines when the retrieved graph data is either noisy or unrelated to the query (He et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.183, + 0.885, + 0.568 + ], + "angle": 0, + "content": "Filter Methods. Filtering attempts to only keep those pieces of retrieved information that are relevant to the given query (Gao et al., 2025). ChunkRAG (Singh et al., 2024) tries to improve RAG systems by assessing and filtering retrieved data at the chunk level, with each \"chunk\" representing a concise and coherent segment of a document. This method first applies semantic chunking to partition documents into meaningful sections. It then leverages LLM-based relevance scoring to evaluate how well each chunk aligns with the user query. Zeng et al. (2024b) thoroughly investigate LLM representation behaviors in relation to RAG, uncovering distinct patterns between positive and negative samples in the representation space. This distinction enables representation-based methods to achieve significantly better performance for certain tasks. Building on these insights, they introduce Rep-PCA, which employs representation classifiers for knowledge filtering. RoK (Wang et al., 2024) refines the reasoning paths within the subgraph by computing the average PageRank score for each path. Similarly, He et al. (2024) use PageRank to identify the most relevant entities." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.584, + 0.715, + 0.601 + ], + "angle": 0, + "content": "3 Preliminary studies" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.612, + 0.884, + 0.725 + ], + "angle": 0, + "content": "To evaluate the effectiveness of GraphRAG, we compare the performance with and without retrieved external knowledge. Furthermore, we analyze the attention scores of the LLM to assess its ability to discern both the relevance and importance of the retrieved information. Lastly, we evaluate the performance of internal knowledge filtering." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.738, + 0.729, + 0.754 + ], + "angle": 0, + "content": "3.1 Experimental settings" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.885, + 0.922 + ], + "angle": 0, + "content": "In this section, we aim to study the importance of retrieving external information when using GraphRAG for knowledge graph QA. To do so, we report the QA performance when using: LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). For GraphRAG, we use RoG (Luo et al., 2024) and GNN-RAG (Mavromatis and Karypis, 2024). For the LLM-only experiments, we use the fine-tuned LLaMA 2-7B model, which is the same LLM used by RoG. The experiments are con" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.082, + 0.47, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.229, + 0.49, + 0.33 + ], + "angle": 0, + "content": "Figure 1: Category A includes cases where both GraphRAG and the LLM-only model are correct. Category B covers instances where GraphRAG outperforms the LLM-only model, while Category C includes cases where the LLM-only model performs better than GraphRAG. Category D represents cases where both models fail." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.353, + 0.489, + 0.418 + ], + "angle": 0, + "content": "duced on two common datasets the WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018) datasets. In this study, we mainly use the F1 score to evaluate the performance." + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.428, + 0.489, + 0.579 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.589, + 0.487, + 0.618 + ], + "angle": 0, + "content": "Figure 2: The relationship between path number and average F1" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.644, + 0.37, + 0.659 + ], + "angle": 0, + "content": "3.2 The Impact of GraphRAG" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.664, + 0.49, + 0.922 + ], + "angle": 0, + "content": "To understanding the effectiveness of GraphRAG, we compare prediction outcomes between LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). We categorize the results into four groups based on F1 scores, as shown in the Figure 1. Category A includes cases where both GraphRAG and the LLM-only model provide correct answers. Category B consists of instances where GraphRAG produces a more accurate answer than the LLM-only model. Category C includes cases where the LLM-only model outperforms GraphRAG. Finally, Category D represents instances where both GraphRAG and the LLM-only model fail to generate the correct answer. Figure 1 illustrates the key observations from our experiments. While GraphRAG enhances certain predictions, it also" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.085, + 0.882, + 0.117 + ], + "angle": 0, + "content": "introduces notable challenges that require further investigation." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.128, + 0.884, + 0.256 + ], + "angle": 0, + "content": "Positive Impact of GraphRAG GraphRAG can enhance the LLM's reasoning capabilities by correcting errors that the standalone model would typically commit. Notably, in the category B, \\(45.64\\%\\) of previously incorrect responses were successfully rectified with the integration of GraphRAG. This highlights the advantage of leveraging structured knowledge graphs to boost LLM performance." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.267, + 0.885, + 0.476 + ], + "angle": 0, + "content": "Limited Impact of GraphRAG Category A contains those answers where both GraphRAG and LLM-only are correct. This show that GraphRAG can sometimes preserve the performance of a LLM when the LLM already possesses the correct knowledge. Conversely, category D, representing \\(9.03\\%\\) of cases, corresponds to those cases where GraphRAG fails to enhance the model's accuracy. For this category, neither the standalone LLM nor GraphRAG are able to provide the correct answer. This pattern implies that GraphRAG does not always access or incorporate sufficiently informative or relevant knowledge." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.486, + 0.884, + 0.808 + ], + "angle": 0, + "content": "Negative Impact of GraphRAG A notable drawback of GraphRAG is that will occasionally degrade the performance of a standalone LLM. That is, it will sometimes lead to wrong predictions for queries that the standalone LLM originally got right. These instances are represented by category C and accounts for \\(16.89\\%\\) of samples when evaluating via the F1 score. In these cases, GraphRAG misleads the model rather than improving it. This suggests that some of the retrieved information may be incorrect, noisy, or irrelevant, ultimately leading to poorer predictions. Therefore, in some cases, LLMs without GraphRAG outperform those with GraphRAG, because existing works have shown that LLMs tend to over-rely on external information (Ren et al., 2023; Tan et al., 2024; Wang et al., 2023; Ni et al., 2024; Zeng et al., 2024a). When retrieval is insufficient or the quality of retrieved knowledge is low, this reliance can degrade generation quality." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.82, + 0.868, + 0.851 + ], + "angle": 0, + "content": "3.3 The Impact of the Number of Retrieved Paths" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.858, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Due to the structure of knowledge graphs, nodes with high degrees and numerous relational edges have a greater likelihood of yielding a large number of retrieved paths. In this subsection, we study" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.409 + ], + "angle": 0, + "content": "the impact of the number of retrieved paths on performance. Figure 2 illustrates the relationship between the number of retrieved paths and the model's performance. Interestingly, as indicated by the smoothed line (blue), incorporating a moderate amount of retrieved information enhances performance. However, increasing the number of retrieved paths ultimately leads to a decline in performance. This trend (green line) suggests that retrieving too much information will introduce noise, making it harder for the model to use the correct and relevant knowledge for the task. This phenomenon thus highlights an important insight - more information does not necessarily indicate better performance. Instead, an overabundance of retrieved data can overwhelm the model with irrelevant details. This observation underscores the necessity for effective filtering mechanisms that can prioritize high-quality, relevant knowledge while discarding extraneous or misleading information." + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.42, + 0.453, + 0.451 + ], + "angle": 0, + "content": "3.4 Attention Reflects the Importance of Retrieved Information" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.469, + 0.48, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.615, + 0.489, + 0.645 + ], + "angle": 0, + "content": "Figure 3: Attention Scores for Retrieved Information With/Without Ground Truth" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.664, + 0.49, + 0.922 + ], + "angle": 0, + "content": "In this subsection, we analyze the ability of the LLM to distinguish the importance of retrieved external knowledge. The attention scores of a LLM can provide a natural indicator of the relevance and significance of the retrieved knowledge (Yang et al., 2024; Ben-Artzy and Schwartz, 2024). The attention scores, derived from the model's internal mechanisms, effectively capture which pieces of information are most influential in reaching the final decision. Inspired by recent work (Chuang et al., 2023; Halawi et al., 2023), which suggests that attention scores in the middle layers are more effective. We examine the attention scores of the (middle + 2)-th layer in the LLM for each retrieved path. We obtain the attention scores for all retrieved paths and categorize them into two groups: (1) paths that" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.198 + ], + "angle": 0, + "content": "contain the ground truth and (2) paths that do not. We then compute the average attention score for each group and present the results in Figure 3. As demonstrated in Figure 3, there is a clear alignment between the attention scores and the ground truth labels, suggesting that these scores can be used to assess the relevance of retrieved information." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.199, + 0.885, + 0.327 + ], + "angle": 0, + "content": "This observation inspires a key insight: The attention scores highlight the most significant retrieved information, suggesting their potential use in filtering out noisy or irrelevant knowledge. Since retrieved information with lower attention scores contribute minimally to the final output, they can be pruned to streamline retrieval and enhance overall performance." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.339, + 0.787, + 0.355 + ], + "angle": 0, + "content": "3.5 Internal Knowledge Filtering" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.36, + 0.886, + 0.602 + ], + "angle": 0, + "content": "Large language models (LLMs) generate responses that may contain both correct and incorrect information. To assess the reliability of these responses, we analyze the associated logits, which represent the model's confidence in its predictions. Typically, higher confidence correlates with correctness (Ma et al., 2025; Virk et al., 2024). Leveraging this property, we implement \"Internal Knowledge Filtering\", which uses the logits to help refine the answer selection. The logits of answer can be directly obtained from the LLM's output. Formally, let \\( A_{L} \\) denote the sets of answer candidates from the LLM model. Furthermore, let it's corresponding logits after softmax function be given by \\( \\ell_{L}(a) \\). The filtering step is given by the following:" + }, + { + "type": "equation", + "bbox": [ + 0.564, + 0.613, + 0.884, + 0.633 + ], + "angle": 0, + "content": "\\[\nA _ {L} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {L} \\mid \\ell_ {L} (a) \\geq \\tau_ {L} \\right\\}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.645, + 0.886, + 0.79 + ], + "angle": 0, + "content": "where \\(\\tau_{L} = 1\\). This allows us to filter out the responses that the LLM has low-confidence in. The experimental results are shown in Table 1. We can clearly see that that leveraging logits to filter out low-confidence responses has a large positive effect on performance. In this way, we can reconsider intrinsic knowledge and apply this approach to GraphRAG to better balance internal and external knowledge base on logits." + }, + { + "type": "table_caption", + "bbox": [ + 0.536, + 0.802, + 0.856, + 0.817 + ], + "angle": 0, + "content": "Table 1: Impact of logits on LLM performance" + }, + { + "type": "table", + "bbox": [ + 0.527, + 0.827, + 0.872, + 0.905 + ], + "angle": 0, + "content": "
MethodsWebQSPCWQ
HitF1HitF1
LLM66.1549.9740.2734.17
LLM with Logits84.1776.7461.8358.19
" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.251, + 0.099 + ], + "angle": 0, + "content": "3.6 Discussions" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.106, + 0.49, + 0.414 + ], + "angle": 0, + "content": "In this subsection, we summarize the key findings and discussions from our preliminary study. The performance issues observed in GraphRAG primarily arise from two key factors. (1) Noisy or Irrelevant Retrieval: Some retrieved paths contain irrelevant or misleading information. This negatively impacts the model's ability to properly answer the query. Furthermore, this noise can introduce conflicting or unnecessary information that hinders the decision-making process rather than improving it. (2) Lack of Consideration for LLM's Own Knowledge: GraphRAG does not always take into account the inherent reasoning ability of the LLM itself. In some cases, the retrieved information overrides the LLM's correct predictions, leading to performance degradation rather than enhancement. A more adaptive approach is needed to balance external knowledge retrieval with the model's internal knowledge." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.426, + 0.219, + 0.44 + ], + "angle": 0, + "content": "4 Method" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.453, + 0.49, + 0.629 + ], + "angle": 0, + "content": "Based on our analysis, we propose a new framework to address the identified challenges, guided by two key insights: (1) Filtering retrieved information: Given the tendency of GraphRAG to retrieve irrelevant or incorrect retrieved information, it is essential to refine the retrieved knowledge. (2) Properly leveraging the LLMs standalone capabilities: The LLM itself can often correctly answer some questions. It's thus necessary to effectively integrate and use the inherent reasoning ability of LLMs along with GraphRAG." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.631, + 0.49, + 0.822 + ], + "angle": 0, + "content": "An overview of our framework GraphRAG-FI is given in Figure 4. It consists of two core components: GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering first refines the retrieved information by removing irrelevant or misleading knowledge. GraphRAG-Integration module balances the retrieved knowledge with the LLM's inherent reasoning ability, thereby mitigating the overuse of retrieved information that can negatively impact performance. In the following subsections, we will introduce each component of our framework in detail." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.835, + 0.329, + 0.851 + ], + "angle": 0, + "content": "4.1 GraphRAG-Filtering" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.857, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Let \\( P = \\{p_1, p_2, \\ldots, p_N\\} \\) denote the set of \\( N \\) retrieved paths or triplets, where each path \\( p_i \\) is assigned an attention score \\( a_i \\). Then we design filtering via the following two stages." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.883, + 0.15 + ], + "angle": 0, + "content": "Stage 1: Coarse Filtering using Attention: In the first stage, we perform a coarse filtering by retaining only those paths whose attention scores exceeds a threshold \\(\\tau\\). This is given formally by:" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.162, + 0.884, + 0.18 + ], + "angle": 0, + "content": "\\[\nP _ {\\text {c o a r s e}} = \\left\\{p _ {i} \\in P \\mid a _ {i} \\geq \\tau \\right\\}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.192, + 0.885, + 0.386 + ], + "angle": 0, + "content": "Stage 2: Fine Filtering via LLMs: After the initial coarse filtering, which significantly reduces the number of candidate paths, we perform a more precise evaluation with a LLM on the remaining subset. This two-stage filtering approach not only enhances the quality of the retrieved paths but also greatly reduces the overall cost by limiting the use of the LLM to only those paths deemed promising in the first stage. Let \\( f(p) \\) represent the evaluation score provided by the LLM for a path \\( p \\), and let \\( \\tau' \\) be the corresponding threshold. The final set of filtered paths is then given by:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.398, + 0.884, + 0.417 + ], + "angle": 0, + "content": "\\[\nP _ {\\text {f i n a l}} = \\left\\{p \\in P _ {\\text {c o a r s e}} \\mid f (p) \\geq \\tau^ {\\prime} \\right\\}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.429, + 0.884, + 0.476 + ], + "angle": 0, + "content": "where \\(P_{\\mathrm{coarse}}\\) is the set of paths that passed the coarse filtering stage, \\(\\tau^{\\prime}\\) is not predefined but is determined by the LLM itself." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.486, + 0.884, + 0.567 + ], + "angle": 0, + "content": "Prompt Construction: After the two filtering stages, we incorporate the selected paths and query into the prompt to further guide the model's reasoning. The prompt contains the following two types of retrieved paths:" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.578, + 0.884, + 0.625 + ], + "angle": 0, + "content": "- High Priority Paths: These are the final filtered paths given by \\( P_{\\mathrm{final}} \\), which are considered the most reliable." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.637, + 0.885, + 0.735 + ], + "angle": 0, + "content": "- Additional Paths: We also consider the remaining paths included by the coarse filter but removed via the fine filter, \\( P_{\\mathrm{coarse}} - P_{\\mathrm{final}} \\). We conjecture that while they may not be as important as those paths in \\( P_{\\mathrm{final}} \\), they can still offer some useful supplementary context." + }, + { + "type": "list", + "bbox": [ + 0.532, + 0.578, + 0.885, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.745, + 0.885, + 0.922 + ], + "angle": 0, + "content": "The new prompt is then constructed by first inserting a header for the high-priority paths, followed by each path on a separate line. The same process is repeated for the additional paths. By structuring the prompt in this way, we are able to clearly delineate the paths by their priority. This ensures that the most critical information \\((P_{\\mathrm{final}})\\) is emphasized and processed first, while still incorporating the supplementary context from the additional paths. An example prompt is given in Appendix A.2." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.081, + 0.885, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.315, + 0.692, + 0.33 + ], + "angle": 0, + "content": "Figure 4: An overview of the GraphRAG-FI framework." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.355, + 0.418, + 0.388 + ], + "angle": 0, + "content": "4.2 Integration with LLMs' Internal Knowledge" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.415, + 0.49, + 0.56 + ], + "angle": 0, + "content": "As noted in Section 3.2, in addition to ensuring we only retrieve high-quality information, we also want to retain internal knowledge of the LLMs. As such, we want to also integrate the capabilities of just the LLM into our framework. However, a challenge is knowing when to defer to which method. When do we trust the answers given by GraphRAG and when the standalone LLM? Furthermore, how do we fuse the answers given by both methods?" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.572, + 0.49, + 0.814 + ], + "angle": 0, + "content": "To achieve this goal, we need a method to determine which answers produced by both LLM-only and GraphRAG are actually relevant. In Section 3.5, we found that the LLM's logits can provide a useful tool to refine the potential answers. That is, focusing only on those answers that are given a higher confidence is helpful. This naturally provides us with an easy way to focus on just the high-quality information. For both GraphRAG and the LLM-only model, we filter the answers based on their logits, ensuring that only high-confidence responses are retained. After this logits-based filtering, the refined answers from both sources are combined to produce the final answer, thereby enhancing robustness and accuracy." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.825, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Formally, let \\( A_G \\) and \\( A_L \\) denote the sets of answer candidates from GraphRAG and the LLM-only model, respectively. We further use \\( a \\) to indicate a single candidate answer in either set. Furthermore, let their corresponding logits after the softmax function be given by \\( \\ell_G(a) \\) and \\( \\ell_L(a) \\). The" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.355, + 0.8, + 0.372 + ], + "angle": 0, + "content": "filtering step is given by the following:" + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.382, + 0.884, + 0.401 + ], + "angle": 0, + "content": "\\[\nA _ {G} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {G} \\mid \\ell_ {G} (a) \\geq \\tau_ {G} \\right\\}, \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.403, + 0.884, + 0.423 + ], + "angle": 0, + "content": "\\[\nA _ {L} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {L} \\mid \\ell_ {L} (a) \\geq \\tau_ {L} \\right\\}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.435, + 0.884, + 0.483 + ], + "angle": 0, + "content": "where \\(\\tau_{G}\\) and \\(\\tau_{L}\\) are predefined thresholds, \\(\\tau_{L}\\) is set to 1. Subsequently, the final answer is determined by combining the filtered sets:" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.494, + 0.884, + 0.521 + ], + "angle": 0, + "content": "\\[\nA _ {\\text {f i n a l}} = \\operatorname {C o m b i n e} \\left(A _ {G} ^ {\\text {f i l t e r e d}}, A _ {L} ^ {\\text {f i l t e r e d}}\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.531, + 0.884, + 0.58 + ], + "angle": 0, + "content": "where \\(\\operatorname{Combine}(\\cdot)\\) denotes the function that integrates the filtered answers into the final reliable output." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.591, + 0.649, + 0.608 + ], + "angle": 0, + "content": "5 Experiment" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.617, + 0.885, + 0.761 + ], + "angle": 0, + "content": "In our experiments, we seek to address the following research questions: RQ1: How effective is the proposed method when applied to state-of-the-art GraphRAG retrievers in the knowledge graph QA task? RQ2: How does the proposed method compare to other filtering approaches? RQ3: How does the performance change when more noisy information is introduced? and RQ4: What is the impact of the two modules on performance?" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.772, + 0.719, + 0.789 + ], + "angle": 0, + "content": "5.1 Experiment Settings" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.793, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Datasets. To assess the effectiveness of our method, we evaluate it on two widely recognized KGQA benchmark datasets: WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018). WebQSP contains 4,737 natural language questions that require reasoning over paths of up to two hops. In contrast, CWQ includes 34,699 more complex questions that necessitate multi-hop reasoning over" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.207, + 0.083, + 0.788, + 0.098 + ], + "angle": 0, + "content": "Table 2: Performance comparison with different baselines on the two KGQA datasets." + }, + { + "type": "table", + "bbox": [ + 0.16, + 0.108, + 0.837, + 0.48 + ], + "angle": 0, + "content": "
TypeMethodsWebQSPCWQ
HitF1HitF1
LLMsFlan-T5-xl(Chung et al., 2024)31.0-14.7-
Alpaca-7B(Taori et al., 2023)51.8-27.4-
LLaMA2-Chat-7B(Touvron et al., 2023)64.4-34.6-
ChatGPT66.8-39.9-
ChatGPT+CoT75.6-48.9-
LLMs+KGsROG86.7370.7561.9154.95
ROG + Similarity85.5069.3861.6254.38
ROG + PageRank85.4469.6061.3454.41
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-FI89.2573.8664.8255.12
GNN-RAG90.1173.2569.1060.55
GNN-RAG + Similarity89.6872.1768.5060.26
GNN-RAG + PageRank89.1871.9266.7558.73
GNN-RAG + GraphRAG-Filtering91.2874.7469.7060.96
GNN-RAG + GraphRAG-FI91.8975.9871.1260.34
SubgraphRAG76.9064.6553.8750.43
SubgraphRAG + Similarity72.7259.9852.0548.27
SubgraphRAG + PageRank61.7950.6546.7543.23
SubgraphRAG + GraphRAG-Filtering81.0168.4058.8254.71
SubgraphRAG + GraphRAG-FI81.0868.2858.9652.52
" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.504, + 0.49, + 0.584 + ], + "angle": 0, + "content": "up to four hops. Both datasets are built upon Freebase, which consists of around 88 million entities, 20 thousand relations, and 126 million triples. Further details on the datasets are provided in Appendix A.1." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.592, + 0.49, + 0.881 + ], + "angle": 0, + "content": "Retriever Backbones. Our framework adopts three existing retrieval methods as its backbone: path-based retrieval (ROG (Luo et al., 2024)), GNN (Mavromatis and Karypis, 2024)), and subgraph-based retrieval (SubgraphRAG (Li et al., 2025)). Path-based retrieval extracts relevant paths using heuristics or shortest-path algorithms, while GNN-based retrieval leverages a Graph Neural Network to learn and retrieve informative paths. In contrast, subgraph-based retrieval retrieves relevant subgraphs and encodes them as triples, enabling fine-grained relational reasoning. Therefore, both path-based and GNN-based methods generate paths as input for the LLM. Lastly, subgraph-based methods give triples (i.e., edges) as input to the LLM that take the form of \\((h,r,t)\\). By considering these three methods, we are able to test our framework on a diverse set of retrieval methods." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Filter Baselines. The most commonly used filtering methods for RAG are similarity-based ap" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.504, + 0.885, + 0.648 + ], + "angle": 0, + "content": "proaches used in (Gao et al., 2025). Similarity-based methods evaluate the relevance of retrieved information by measuring feature similarity. For retrieval over graphs, PageRank-based filtering is widely adopted (Wang et al., 2024). PageRank-based filtering leverages the graph structure to rank nodes based on their connectivity and importance. These methods provide a baseline filtering mechanism for refining the retrieved results." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.664, + 0.885, + 0.923 + ], + "angle": 0, + "content": "Implementation and Evaluation Metrics. We use LLaMA2-Chat-7B from ROG as the LLM backbone, which is instruction-finetuned on the training split of WebQSP and CWQ, as well as Freebase, for three epochs. For the similarity-based filter, we utilize SentenceTransformer ('all-MiniLM-L6-v2') to generate representations for retrieval. We evaluate our retrieval methods using both Hit Rate (Hit) and F1-score (F1). Hit Rate measures the proportion of relevant items successfully retrieved, reflecting retrieval effectiveness. F1-score balances precision and recall, providing a comprehensive assessment of retrieval quality. These metrics ensure a robust evaluation of retrieval performance. We adjust the thresholds \\(\\tau\\) and \\(\\tau_{G}\\) within the ranges [top 40, top 50] and [0.4, 0.5], respectively." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.085, + 0.267, + 0.099 + ], + "angle": 0, + "content": "5.2 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.106, + 0.489, + 0.152 + ], + "angle": 0, + "content": "In this section, we evaluate the performance of our method with various retrievers and compare it against baseline filter models." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.155, + 0.489, + 0.378 + ], + "angle": 0, + "content": "RQ1: KGQA Performance Comparison. In this subsection, we apply our method to different retrievers, including the path-based retriever, GNN-based retriever, and subgraph-based retriever. The results presented in Table 2 demonstrate that our method consistently improves all retrievers, achieving an average improvement of \\(3.81\\%\\) in Hit and \\(2.35\\%\\) in F1 over ROG, \\(2.46\\%\\) in Hit and \\(1.7\\%\\) in F1 over GNN-RAG, and significant gains of \\(7.47\\%\\) in Hit and \\(4.88\\%\\) in F1 over SubgraphRAG across two datasets. These results demonstrate that our approach is effective across different retrieval paradigms, reinforcing its adaptability to various retrieval strategies in QA tasks." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.38, + 0.49, + 0.604 + ], + "angle": 0, + "content": "RQ2: Comparison with other filter methods. We compare our method against other filtering baselines, with the results presented in Table 2. Our approach consistently outperforms competing methods across both datasets and retriever types. Specifically, for ROG, our method can achieve an average improvement of \\(4.78\\%\\) in Hit and \\(3.95\\%\\) in F1 compared to similarity-based filtering on both datasets. Furthermore, compared to the PageRank-based filtering method, our approach yields an average increase of \\(5.03\\%\\) in Hit and \\(3.70\\%\\) in F1 across both datasets. These results highlight the superiority of our method in enhancing retrieval effectiveness and overall performance." + }, + { + "type": "table_caption", + "bbox": [ + 0.141, + 0.617, + 0.46, + 0.631 + ], + "angle": 0, + "content": "Table 3: Performance when adding more noise" + }, + { + "type": "table", + "bbox": [ + 0.132, + 0.641, + 0.475, + 0.716 + ], + "angle": 0, + "content": "
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG*85.8768.8160.4953.72
ROG* + GraphRAG-Filtering86.6173.0161.9155.67
" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.741, + 0.318, + 0.755 + ], + "angle": 0, + "content": "5.3 Robustness to Noise" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.761, + 0.49, + 0.921 + ], + "angle": 0, + "content": "In this subsection, we evaluate robustness of different methods to noise. To evaluate the noise resistance of the backbone model and our filter method, we use GPT to generate 30 additional noise paths that contain both irrelevant and incorrect information. This information is then incorporated into the retrieved context. We then analyze the impact of this noise on performance. The experimental results presented in Table 3, ROG* represents the cases where noise is introduced. As the noise level" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.214 + ], + "angle": 0, + "content": "increases, the Hit score decreases by \\(2.29\\%\\), and the F1 score drops by \\(2.23\\%\\) on the CWQ dataset, highlighting the model's sensitivity to noise. However, when applying our method, we observe a \\(2.23\\%\\) improvement in Hit and a \\(3.63\\%\\) improvement in F1 over \\(\\mathrm{ROG^{*}}\\) on CWQ. These results demonstrate the effectiveness of our approach in mitigating the negative impact of noisy retrieval." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.224, + 0.675, + 0.24 + ], + "angle": 0, + "content": "5.4 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.245, + 0.885, + 0.438 + ], + "angle": 0, + "content": "We conduct an ablation study to analyze the effectiveness of the filtering module and integrating module in GraphRAG-FI. From the results in Table 4, we can see that GraphRAG-Filtering is useful for the ROG retriever, as it improves both the F1 and Hit scores. For example, GraphRAG-Filtering increases the F1 score by \\(4.19\\%\\) and the Hit score by \\(3.15\\%\\) on CWQ dataset. We also see a boost in performance for GraphRAG-Integration, with a \\(1.60\\%\\) and \\(2.62\\%\\) increase in F1 and Hit score, respectively, on WebQSP. These results demonstrate the effectiveness of our two components." + }, + { + "type": "table_caption", + "bbox": [ + 0.612, + 0.45, + 0.779, + 0.464 + ], + "angle": 0, + "content": "Table 4: Ablation study." + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.474, + 0.882, + 0.566 + ], + "angle": 0, + "content": "
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-Integration89.0071.8864.2555.19
ROG + GraphRAG-FI89.2573.8664.8255.12
" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.591, + 0.642, + 0.606 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.616, + 0.885, + 0.873 + ], + "angle": 0, + "content": "In this work, we propose GraphRAG-FI (Filtering & Integration), an enhanced GraphRAG framework that addresses key challenges in graph retrieval-augmented generation. By incorporating GraphRAG-Filtering, which utilizes a two-stage filtering mechanism to refine retrieved information, and GraphRAG-Integration, which employs a logits-based selection strategy to balance retrieval and intrinsic reasoning, our approach mitigates the impact of noisy retrievals and excessive dependence on external knowledge. Experimental results on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning accuracy across multiple backbone models, establishing a more reliable and effective GraphRAG framework." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.221, + 0.1 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.109, + 0.493, + 0.352 + ], + "angle": 0, + "content": "In this work, we identify two key challenges in GraphRAG: (1) it is prone to errors due to the retrieval of irrelevant or misleading information, and (2) it places excessive emphasis on externally retrieved knowledge, which can diminish the intrinsic reasoning capabilities of LLMs. Future research will first explore a broader range of large language models to evaluate their effectiveness within GraphRAG. Additionally, further investigation into diverse filtering methods could enhance the refinement of retrieved information and reduce noise. More sophisticated fusion strategies may also be explored to dynamically balance external knowledge with the intrinsic reasoning of LLMs, enabling more effective information integration." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.377, + 0.214, + 0.391 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.399, + 0.49, + 0.452 + ], + "angle": 0, + "content": "Amit Ben-Artzy and Roy Schwartz. 2024. Attend first, consolidate later: On the importance of attention in different llm layers. arXiv preprint arXiv:2409.03621." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.46, + 0.49, + 0.526 + ], + "angle": 0, + "content": "Yung-Sung Chuang, Yujia Xie, Hongyin Luo, Yoon Kim, James Glass, and Pengcheng He. 2023. Dola: Decoding by contrasting layers improves factuality in large language models. arXiv preprint arXiv:2309.03883." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.535, + 0.49, + 0.601 + ], + "angle": 0, + "content": "Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2024. Scaling instruction-finetuned language models. Journal of Machine Learning Research, 25(70):1-53." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.609, + 0.489, + 0.662 + ], + "angle": 0, + "content": "Jialin Dong, Bahare Fatemi, Bryan Perozzi, Lin F Yang, and Anton Tsitsulin. 2024. Don't forget to connect! improving rag with graph-based reranking. arXiv preprint arXiv:2405.18414." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.67, + 0.489, + 0.737 + ], + "angle": 0, + "content": "Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. Frag: A flexible modular framework for retrieval-augmented generation based on knowledge graphs. arXiv preprint arXiv:2501.09957." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.745, + 0.49, + 0.798 + ], + "angle": 0, + "content": "Danny Halawi, Jean-Stanislas Denain, and Jacob Steinhardt. 2023. Overthinking the truth: Understanding how language models process false demonstrations. arXiv preprint arXiv:2307.09476." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.806, + 0.49, + 0.873 + ], + "angle": 0, + "content": "Haoyu Han, Yu Wang, Harry Shomer, Kai Guo, Jiayuan Ding, Yongjia Lei, Mahantesh Halappanavar, Ryan A Rossi, Subhabrata Mukherjee, Xianfeng Tang, et al. 2024. Retrieval-augmented generation with graphs (graphrag). arXiv preprint arXiv:2501.00309." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.881, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. 2023. Reasoning with language model is planning with world" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.399, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.529, + 0.086, + 0.885, + 0.127 + ], + "angle": 0, + "content": "model. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.137, + 0.885, + 0.216 + ], + "angle": 0, + "content": "Alexander Havrilla, Sharath Chandra Rarparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. Gore: When, where, and how to improve llm reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.227, + 0.885, + 0.306 + ], + "angle": 0, + "content": "Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2025. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. Advances in Neural Information Processing Systems, 37:132876-132907." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.317, + 0.885, + 0.383 + ], + "angle": 0, + "content": "Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh V Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2024. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. arXiv preprint arXiv:2402.07630." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.394, + 0.885, + 0.473 + ], + "angle": 0, + "content": "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2024. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.484, + 0.885, + 0.551 + ], + "angle": 0, + "content": "Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. 2023. Towards mitigating llm hallucination via self reflection. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 1827-1843." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.561, + 0.885, + 0.601 + ], + "angle": 0, + "content": "Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.611, + 0.885, + 0.678 + ], + "angle": 0, + "content": "Mufei Li, Siqi Miao, and Pan Li. 2025. Simple is effective: The roles of graphs and large language models in knowledge-graph-based retrieval-augmented generation. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.688, + 0.885, + 0.743 + ], + "angle": 0, + "content": "Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.753, + 0.885, + 0.793 + ], + "angle": 0, + "content": "Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. 2025. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.804, + 0.885, + 0.871 + ], + "angle": 0, + "content": "Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, and Jian Guo. 2024. Think-on-graph 2.0: Deep and interpretable large language model reasoning with knowledge graph-guided retrieval. arXiv e-prints, pages arXiv-2407." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.881, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Costas Mavromatis and George Karypis. 2024. Gnrag: Graph neural retrieval for large language model reasoning. arXiv preprint arXiv:2405.20139." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.885, + 0.921 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.14 + ], + "angle": 0, + "content": "Shiyu Ni, Keping Bi, Jiafeng Guo, and Xueqi Cheng. 2024. When do llms need retrieval augmentation? mitigating llms' overconfidence helps retrieval augmentation. arXiv preprint arXiv:2402.11457." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.153, + 0.488, + 0.231 + ], + "angle": 0, + "content": "Ruiyang Ren, Yuhao Wang, Yingqi Qu, Wayne Xin Zhao, Jing Liu, Hao Tian, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2023. Investigating the factual knowledge boundary of large language models with retrieval augmentation. arXiv preprint arXiv:2307.11019." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.245, + 0.488, + 0.311 + ], + "angle": 0, + "content": "Ishneet Sukhvinder Singh, Ritvik Aggarwal, Ibrahim Allahverdiyev, Muhammad Taha, Aslihan Akalin, Kevin Zhu, and Sean O'Brien. 2024. Chunkrag: Novel lmm-chunk filtering method for rag systems. arXiv preprint arXiv:2410.19572." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.324, + 0.488, + 0.402 + ], + "angle": 0, + "content": "Gaurang Sriramanan, Siddhant Bharti, Vinu Sankar Sadasivan, Shoumik Saha, Priyatham Kattakinda, and Soheil Feizi. 2025. Llm-check: Investigating detection of hallucinations in large language models. Advances in Neural Information Processing Systems, 37:34188-34216." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.417, + 0.488, + 0.496 + ], + "angle": 0, + "content": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel Ni, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.51, + 0.488, + 0.55 + ], + "angle": 0, + "content": "Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.563, + 0.488, + 0.629 + ], + "angle": 0, + "content": "Hexiang Tan, Fei Sun, Wanli Yang, Yuanzhuo Wang, Qi Cao, and Xueqi Cheng. 2024. Blinded by generated contexts: How language models merge generated and retrieved contexts for open-domain qa? arXiv preprint arXiv:2401.11911." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.643, + 0.488, + 0.696 + ], + "angle": 0, + "content": "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. 2023. Stanford alpaca: An instruction-following llama model." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.709, + 0.488, + 0.787 + ], + "angle": 0, + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.801, + 0.488, + 0.854 + ], + "angle": 0, + "content": "Yuvraj Virk, Premkumar Devanbu, and Toufique Ahmed. 2024. Enhancing trust in llm-generated code summaries with calibrated confidence scores. arXiv preprint arXiv:2404.19318." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.868, + 0.488, + 0.92 + ], + "angle": 0, + "content": "Yile Wang, Peng Li, Maosong Sun, and Yang Liu. 2023. Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.086, + 0.882, + 0.152 + ], + "angle": 0, + "content": "Yuqi Wang, Boran Jiang, Yi Luo, Dawei He, Peng Cheng, and Liangcai Gao. 2024. Reasoning on efficient knowledge paths: Knowledge graph guides large language model for domain question answering. arXiv preprint arXiv:2404.10384." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.162, + 0.882, + 0.227 + ], + "angle": 0, + "content": "Xiaoqian Wu, Yong-Lu Li, Jianhua Sun, and Cewu Lu. 2023. Symbol-llm: leverage language models for symbolic system in visual human activity reasoning. Advances in Neural Information Processing Systems, 36:29680-29691." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.238, + 0.882, + 0.291 + ], + "angle": 0, + "content": "Lijie Yang, Zhihao Zhang, Zhuofu Chen,zikun Li, and Zhihao Jia. 2024. Tidaldecode: Fast and accurate llm decoding with position persistent sparse attention. arXiv preprint arXiv:2410.05076." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.3, + 0.882, + 0.38 + ], + "angle": 0, + "content": "Wen-tau Yih, Matthew Richardson, Christopher Meek, Ming-Wei Chang, and Jina Suh. 2016. The value of semantic parse labeling for knowledge base question answering. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 201-206." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.389, + 0.882, + 0.456 + ], + "angle": 0, + "content": "Shenglai Zeng, Jiankun Zhang, Pengfei He, Yue Xing, Yiding Liu, Han Xu, Jie Ren, Shuaiqiang Wang, Dawei Yin, Yi Chang, et al. 2024a. The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag). arXiv preprint arXiv:2402.16893." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.465, + 0.882, + 0.544 + ], + "angle": 0, + "content": "Shenglai Zeng, Jiankun Zhang, Bingheng Li, Yuping Lin, Tianqi Zheng, Dante Everaert, Hanqing Lu, Hui Liu, Yue Xing, Monica Xiao Cheng, et al. 2024b. Towards knowledge checking in retrieval-augmented generation: A representation perspective. arXiv preprint arXiv:2411.14572." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.554, + 0.882, + 0.632 + ], + "angle": 0, + "content": "Qinggang Zhang, Shengyuan Chen, Yuanchen Bei, Zheng Yuan, Huachi Zhou, Zijin Hong, Junnan Dong, Hao Chen, Yi Chang, and Xiao Huang. 2025. A survey of graph retrieval-augmented generation for customized large language models. arXiv preprint arXiv:2501.13958." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.882, + 0.632 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.199, + 0.083, + 0.402, + 0.096 + ], + "angle": 0, + "content": "Table 5: Statistics of datasets." + }, + { + "type": "table", + "bbox": [ + 0.142, + 0.108, + 0.461, + 0.161 + ], + "angle": 0, + "content": "
Datasets#Train#TestMax #hop
WebQSP2,8261,6282
CWQ27,6393,5314
" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.184, + 0.237, + 0.201 + ], + "angle": 0, + "content": "A Appendix" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.21, + 0.232, + 0.223 + ], + "angle": 0, + "content": "A.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.23, + 0.489, + 0.325 + ], + "angle": 0, + "content": "We utilize two benchmark KGQA datasets, WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018), as proposed in previous studies. Following ROG, we maintain the same training and testing splits. The dataset statistics are provided in Table 5." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.337, + 0.299, + 0.353 + ], + "angle": 0, + "content": "A.2 Prompt Example" + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.526, + 0.214, + 0.54 + ], + "angle": 0, + "content": "Prompts" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.552, + 0.462, + 0.615 + ], + "angle": 0, + "content": "Based on the reasoning paths, please answer the given question. Please keep the answer as simple as possible and return all the possible answers as a list." + }, + { + "type": "title", + "bbox": [ + 0.144, + 0.617, + 0.28, + 0.631 + ], + "angle": 0, + "content": "Reasoning Paths:" + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.633, + 0.297, + 0.647 + ], + "angle": 0, + "content": "High Priority Paths:" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.649, + 0.462, + 0.695 + ], + "angle": 0, + "content": "Northern Colorado Bears football \\(\\rightarrow\\) education.educational_institution.sports_teams \\(\\rightarrow\\) University of Northern Colorado" + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.697, + 0.278, + 0.71 + ], + "angle": 0, + "content": "Additional Paths:" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.713, + 0.462, + 0.759 + ], + "angle": 0, + "content": "Northern Colorado Bears football \\(\\rightarrow\\) education.educational_institution.sports_teams \\(\\rightarrow\\) University of Northern Colorado" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.761, + 0.46, + 0.776 + ], + "angle": 0, + "content": "Greeley \\(\\rightarrow\\) location.location.containedby" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.778, + 0.354, + 0.79 + ], + "angle": 0, + "content": "→ United States of America" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.794, + 0.459, + 0.824 + ], + "angle": 0, + "content": "Greeley \\(\\rightarrow\\) location.location.containedby \\(\\rightarrow\\) Greeley Masonic Temple" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.826, + 0.462, + 0.872 + ], + "angle": 0, + "content": "Question: What educational institution has a football sports team named Northern Colorado Bears is in Greeley, Colorado?" + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.903, + 0.43, + 0.918 + ], + "angle": 0, + "content": "Figure 5: An Example of Our Prompt" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_origin.pdf b/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eef8a30c26af2ac6d2913abe5a78d4dc3635a749 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/53558f89-5ff8-41c8-b6ca-fe406c0656ac_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6a289ee05271c424d04f44ff111d8e8e95f09431188d37a37b5dd09118cbd8f +size 811893 diff --git a/data/2025/2503_13xxx/2503.13804/full.md b/data/2025/2503_13xxx/2503.13804/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8427e291926ee5d8291746b18df0ae4ca03ad3fe --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/full.md @@ -0,0 +1,297 @@ +# Empowering GraphRAG with Knowledge Filtering and Integration + +Kai Guo $^{1}$ , Harry Shomer $^{1}$ , Shenglai Zeng $^{1}$ , Haoyu Han $^{1}$ , Yu Wang $^{2}$ , Jiliang Tang $^{1}$ + +$^{1}$ Michigan State University $^{2}$ University of Oregon + +{guokai1, shomerha, zengshe1, hanhaoy1, tangjili} @msu.edu, + +{yuwang} @uoregon.edu + +# Abstract + +In recent years, large language models (LLMs) have revolutionized the field of natural language processing. However, they often suffer from knowledge gaps and hallucinations. Graph retrieval-augmented generation (GraphRAG) enhances LLM reasoning by integrating structured knowledge from external graphs. However, we identify two key challenges that plague GraphRAG: (1) Retrieving noisy and irrelevant information can degrade performance and (2) Excessive reliance on external knowledge suppresses the model's intrinsic reasoning. To address these issues, we propose GraphRAG-FI (Filtering & Integration), consisting of GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering employs a two-stage filtering mechanism to refine retrieved information. GraphRAG-Integration employs a logits-based selection strategy to balance external knowledge from GraphRAG with the LLM's intrinsic reasoning, reducing over-reliance on retrievals. Experiments on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning performance across multiple backbone models, establishing a more reliable and effective GraphRAG framework. + +# 1 Introduction + +Large language models (LLMs) have achieved remarkable success in NLP tasks, particularly in tasks that require complex reasoning (Havrilla et al.; Wu et al., 2023; Hao et al., 2023). However, despite their strengths, LLMs are prone to hallucinations, resulting in incorrect or poor reasoning (Ji et al., 2023; Huang et al., 2024; Sriramanan et al., 2025). GraphRAG techniques have emerged as a promising solution to this problem (Han et al., 2024; Zhang et al., 2025; He et al., 2025; Mavromatis and Karypis, 2024), by integrating relevant information from external graphs. Knowledge graphs, which store facts in the form of a graph, are commonly used for this problem. Specifically, relevant + +facts (i.e., triples) or paths are extracted from the knowledge graph and used to enrich the context of the LLMs with structured and reliable information (Luo et al., 2024; Li et al., 2025; Ma et al., 2024). This approach has shown ability to improve the reasoning capabilities and reduce the presence of hallucinations in LLMs (Sun et al.; Li et al., 2025; Dong et al., 2024). + +To better assess the efficacy of GraphRAG, in Section 3 we conduct a preliminary study comparing its performance with an LLM-only model (i.e., LLM without GraphRAG). This comparison reveals both the advantages and limitations of GraphRAG. While GraphRAG improved reasoning accuracy by correcting some LLM errors, it also introduces some notable weaknesses. For example, incorporating external knowledge will sometimes cause questions that were originally answered correctly by the LLM to be misclassified. This highlights the dangers of retrieving irrelevant information. Furthermore, excessive retrieval compounds this issue by introducing both noise and redundant information, thus further hindering the reasoning process. + +Meanwhile, we find that LLM-only and GraphRAG can complement one another. Specifically, GraphRAG can enhance reasoning for those questions LLMs lack knowledge of; while excessive reliance on external information may cause the model to overlook internally known correct answers. These findings highlight two key limitations of existing GraphRAG methods. First, GraphRAG is highly susceptible to retrieving irrelevant or misleading information. Second, GraphRAG struggles to balance external retrieval with the LLM's internal knowledge, often missing parts of the answer that the LLM-only model can provide using its own knowledge. + +Inspired by these findings, we propose a novel design that aims to address these issues. First, we aim to enhance the retrieval quality to better avoid + +retrieving irrelevant information. Second, we integrate GraphRAG with an LLM's intrinsic reasoning ability, thus only using GraphRAG when external knowledge is necessary. In particular, to mitigate the issue of retrieving irrelevant information, we introduce a two-stage filtering process. Furthermore, to mitigate GraphRAG from overrelying on retrieved information while underutilizing the LLM's inherent reasoning ability, we introduce a logits-based selection mechanism that dynamically integrates LLMs' standalone answers with GraphRAG's outputs. This approach ensures that the final response effectively balances external knowledge with the model's internal reasoning. The main contributions of our work are summarized as follows: + +- We identify two key challenges in GraphRAG: (1) It is susceptible to errors by retrieving irrelevant or misleading information. (2) It overemphasizes the externally retrieved knowledge, at the expense of the intrinsic reasoning capabilities of LLMs. +- We introduce a novel approach that enhances GraphRAG by incorporating a two-stage filtering mechanism to refine the retrieved knowledge and dynamically integrate this knowledge with a LLMs' standalone reasoning capabilities. +- Extensive experiments on knowledge graph QA demonstrate the effectiveness of our method across multiple backbone models. + +# 2 Related work + +GraphRAG. GraphRAG aims to address hallucinations and outdated knowledge in LLMs by incorporating additional information retrieved from external knowledge bases (Sun et al.; Li et al., 2025; Dong et al., 2024). G-R retriever (He et al., 2025) identifies relevant nodes and edges for a given query based on cosine similarity, and then constructs a subgraph to aid in the generation process. Similarly, RoG (Luo et al., 2024) introduces a planning-retrieval-reasoning framework, where it retrieves reasoning paths guided by a planning module and performs reasoning using these paths. On the other hand, GNN-RAG (Mavromatis and Karypis, 2024) leverages Graph Neural Networks (GNNs) (Kipf and Welling, 2016) to process the intricate graph structures within knowledge graphs, enabling effective retrieval. They also use + +retrieval augmentation techniques to enhance diversity. However, the effectiveness of these methods is heavily dependent on the quality of the retrieved information, and their performance significantly declines when the retrieved graph data is either noisy or unrelated to the query (He et al., 2025). + +Filter Methods. Filtering attempts to only keep those pieces of retrieved information that are relevant to the given query (Gao et al., 2025). ChunkRAG (Singh et al., 2024) tries to improve RAG systems by assessing and filtering retrieved data at the chunk level, with each "chunk" representing a concise and coherent segment of a document. This method first applies semantic chunking to partition documents into meaningful sections. It then leverages LLM-based relevance scoring to evaluate how well each chunk aligns with the user query. Zeng et al. (2024b) thoroughly investigate LLM representation behaviors in relation to RAG, uncovering distinct patterns between positive and negative samples in the representation space. This distinction enables representation-based methods to achieve significantly better performance for certain tasks. Building on these insights, they introduce Rep-PCA, which employs representation classifiers for knowledge filtering. RoK (Wang et al., 2024) refines the reasoning paths within the subgraph by computing the average PageRank score for each path. Similarly, He et al. (2024) use PageRank to identify the most relevant entities. + +# 3 Preliminary studies + +To evaluate the effectiveness of GraphRAG, we compare the performance with and without retrieved external knowledge. Furthermore, we analyze the attention scores of the LLM to assess its ability to discern both the relevance and importance of the retrieved information. Lastly, we evaluate the performance of internal knowledge filtering. + +# 3.1 Experimental settings + +In this section, we aim to study the importance of retrieving external information when using GraphRAG for knowledge graph QA. To do so, we report the QA performance when using: LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). For GraphRAG, we use RoG (Luo et al., 2024) and GNN-RAG (Mavromatis and Karypis, 2024). For the LLM-only experiments, we use the fine-tuned LLaMA 2-7B model, which is the same LLM used by RoG. The experiments are con + +![](images/496e28bd577aa6e70383f3ab6939ca74c0779a5e7b7c2d509d916d5c878e76e6.jpg) +Figure 1: Category A includes cases where both GraphRAG and the LLM-only model are correct. Category B covers instances where GraphRAG outperforms the LLM-only model, while Category C includes cases where the LLM-only model performs better than GraphRAG. Category D represents cases where both models fail. + +duced on two common datasets the WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018) datasets. In this study, we mainly use the F1 score to evaluate the performance. + +![](images/318b8766a631ff1fd8611c77f5b5c3cc04ca1a589f8e03899978a762189be102.jpg) +Figure 2: The relationship between path number and average F1 + +# 3.2 The Impact of GraphRAG + +To understanding the effectiveness of GraphRAG, we compare prediction outcomes between LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). We categorize the results into four groups based on F1 scores, as shown in the Figure 1. Category A includes cases where both GraphRAG and the LLM-only model provide correct answers. Category B consists of instances where GraphRAG produces a more accurate answer than the LLM-only model. Category C includes cases where the LLM-only model outperforms GraphRAG. Finally, Category D represents instances where both GraphRAG and the LLM-only model fail to generate the correct answer. Figure 1 illustrates the key observations from our experiments. While GraphRAG enhances certain predictions, it also + +introduces notable challenges that require further investigation. + +Positive Impact of GraphRAG GraphRAG can enhance the LLM's reasoning capabilities by correcting errors that the standalone model would typically commit. Notably, in the category B, $45.64\%$ of previously incorrect responses were successfully rectified with the integration of GraphRAG. This highlights the advantage of leveraging structured knowledge graphs to boost LLM performance. + +Limited Impact of GraphRAG Category A contains those answers where both GraphRAG and LLM-only are correct. This show that GraphRAG can sometimes preserve the performance of a LLM when the LLM already possesses the correct knowledge. Conversely, category D, representing $9.03\%$ of cases, corresponds to those cases where GraphRAG fails to enhance the model's accuracy. For this category, neither the standalone LLM nor GraphRAG are able to provide the correct answer. This pattern implies that GraphRAG does not always access or incorporate sufficiently informative or relevant knowledge. + +Negative Impact of GraphRAG A notable drawback of GraphRAG is that will occasionally degrade the performance of a standalone LLM. That is, it will sometimes lead to wrong predictions for queries that the standalone LLM originally got right. These instances are represented by category C and accounts for $16.89\%$ of samples when evaluating via the F1 score. In these cases, GraphRAG misleads the model rather than improving it. This suggests that some of the retrieved information may be incorrect, noisy, or irrelevant, ultimately leading to poorer predictions. Therefore, in some cases, LLMs without GraphRAG outperform those with GraphRAG, because existing works have shown that LLMs tend to over-rely on external information (Ren et al., 2023; Tan et al., 2024; Wang et al., 2023; Ni et al., 2024; Zeng et al., 2024a). When retrieval is insufficient or the quality of retrieved knowledge is low, this reliance can degrade generation quality. + +# 3.3 The Impact of the Number of Retrieved Paths + +Due to the structure of knowledge graphs, nodes with high degrees and numerous relational edges have a greater likelihood of yielding a large number of retrieved paths. In this subsection, we study + +the impact of the number of retrieved paths on performance. Figure 2 illustrates the relationship between the number of retrieved paths and the model's performance. Interestingly, as indicated by the smoothed line (blue), incorporating a moderate amount of retrieved information enhances performance. However, increasing the number of retrieved paths ultimately leads to a decline in performance. This trend (green line) suggests that retrieving too much information will introduce noise, making it harder for the model to use the correct and relevant knowledge for the task. This phenomenon thus highlights an important insight - more information does not necessarily indicate better performance. Instead, an overabundance of retrieved data can overwhelm the model with irrelevant details. This observation underscores the necessity for effective filtering mechanisms that can prioritize high-quality, relevant knowledge while discarding extraneous or misleading information. + +![](images/5394ce95b061ee2998994297ff15b21d1f49085458e088ff35602ddff5e6c4a4.jpg) +3.4 Attention Reflects the Importance of Retrieved Information +Figure 3: Attention Scores for Retrieved Information With/Without Ground Truth + +In this subsection, we analyze the ability of the LLM to distinguish the importance of retrieved external knowledge. The attention scores of a LLM can provide a natural indicator of the relevance and significance of the retrieved knowledge (Yang et al., 2024; Ben-Artzy and Schwartz, 2024). The attention scores, derived from the model's internal mechanisms, effectively capture which pieces of information are most influential in reaching the final decision. Inspired by recent work (Chuang et al., 2023; Halawi et al., 2023), which suggests that attention scores in the middle layers are more effective. We examine the attention scores of the (middle + 2)-th layer in the LLM for each retrieved path. We obtain the attention scores for all retrieved paths and categorize them into two groups: (1) paths that + +contain the ground truth and (2) paths that do not. We then compute the average attention score for each group and present the results in Figure 3. As demonstrated in Figure 3, there is a clear alignment between the attention scores and the ground truth labels, suggesting that these scores can be used to assess the relevance of retrieved information. + +This observation inspires a key insight: The attention scores highlight the most significant retrieved information, suggesting their potential use in filtering out noisy or irrelevant knowledge. Since retrieved information with lower attention scores contribute minimally to the final output, they can be pruned to streamline retrieval and enhance overall performance. + +# 3.5 Internal Knowledge Filtering + +Large language models (LLMs) generate responses that may contain both correct and incorrect information. To assess the reliability of these responses, we analyze the associated logits, which represent the model's confidence in its predictions. Typically, higher confidence correlates with correctness (Ma et al., 2025; Virk et al., 2024). Leveraging this property, we implement "Internal Knowledge Filtering", which uses the logits to help refine the answer selection. The logits of answer can be directly obtained from the LLM's output. Formally, let $A_{L}$ denote the sets of answer candidates from the LLM model. Furthermore, let it's corresponding logits after softmax function be given by $\ell_{L}(a)$ . The filtering step is given by the following: + +$$ +A _ {L} ^ {\text {f i l t e r e d}} = \left\{a \in A _ {L} \mid \ell_ {L} (a) \geq \tau_ {L} \right\}, \tag {1} +$$ + +where $\tau_{L} = 1$ . This allows us to filter out the responses that the LLM has low-confidence in. The experimental results are shown in Table 1. We can clearly see that that leveraging logits to filter out low-confidence responses has a large positive effect on performance. In this way, we can reconsider intrinsic knowledge and apply this approach to GraphRAG to better balance internal and external knowledge base on logits. + +Table 1: Impact of logits on LLM performance + +
MethodsWebQSPCWQ
HitF1HitF1
LLM66.1549.9740.2734.17
LLM with Logits84.1776.7461.8358.19
+ +# 3.6 Discussions + +In this subsection, we summarize the key findings and discussions from our preliminary study. The performance issues observed in GraphRAG primarily arise from two key factors. (1) Noisy or Irrelevant Retrieval: Some retrieved paths contain irrelevant or misleading information. This negatively impacts the model's ability to properly answer the query. Furthermore, this noise can introduce conflicting or unnecessary information that hinders the decision-making process rather than improving it. (2) Lack of Consideration for LLM's Own Knowledge: GraphRAG does not always take into account the inherent reasoning ability of the LLM itself. In some cases, the retrieved information overrides the LLM's correct predictions, leading to performance degradation rather than enhancement. A more adaptive approach is needed to balance external knowledge retrieval with the model's internal knowledge. + +# 4 Method + +Based on our analysis, we propose a new framework to address the identified challenges, guided by two key insights: (1) Filtering retrieved information: Given the tendency of GraphRAG to retrieve irrelevant or incorrect retrieved information, it is essential to refine the retrieved knowledge. (2) Properly leveraging the LLMs standalone capabilities: The LLM itself can often correctly answer some questions. It's thus necessary to effectively integrate and use the inherent reasoning ability of LLMs along with GraphRAG. + +An overview of our framework GraphRAG-FI is given in Figure 4. It consists of two core components: GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering first refines the retrieved information by removing irrelevant or misleading knowledge. GraphRAG-Integration module balances the retrieved knowledge with the LLM's inherent reasoning ability, thereby mitigating the overuse of retrieved information that can negatively impact performance. In the following subsections, we will introduce each component of our framework in detail. + +# 4.1 GraphRAG-Filtering + +Let $P = \{p_1, p_2, \ldots, p_N\}$ denote the set of $N$ retrieved paths or triplets, where each path $p_i$ is assigned an attention score $a_i$ . Then we design filtering via the following two stages. + +Stage 1: Coarse Filtering using Attention: In the first stage, we perform a coarse filtering by retaining only those paths whose attention scores exceeds a threshold $\tau$ . This is given formally by: + +$$ +P _ {\text {c o a r s e}} = \left\{p _ {i} \in P \mid a _ {i} \geq \tau \right\}. \tag {2} +$$ + +Stage 2: Fine Filtering via LLMs: After the initial coarse filtering, which significantly reduces the number of candidate paths, we perform a more precise evaluation with a LLM on the remaining subset. This two-stage filtering approach not only enhances the quality of the retrieved paths but also greatly reduces the overall cost by limiting the use of the LLM to only those paths deemed promising in the first stage. Let $f(p)$ represent the evaluation score provided by the LLM for a path $p$ , and let $\tau'$ be the corresponding threshold. The final set of filtered paths is then given by: + +$$ +P _ {\text {f i n a l}} = \left\{p \in P _ {\text {c o a r s e}} \mid f (p) \geq \tau^ {\prime} \right\}, \tag {3} +$$ + +where $P_{\mathrm{coarse}}$ is the set of paths that passed the coarse filtering stage, $\tau^{\prime}$ is not predefined but is determined by the LLM itself. + +Prompt Construction: After the two filtering stages, we incorporate the selected paths and query into the prompt to further guide the model's reasoning. The prompt contains the following two types of retrieved paths: + +- High Priority Paths: These are the final filtered paths given by $P_{\mathrm{final}}$ , which are considered the most reliable. +- Additional Paths: We also consider the remaining paths included by the coarse filter but removed via the fine filter, $P_{\mathrm{coarse}} - P_{\mathrm{final}}$ . We conjecture that while they may not be as important as those paths in $P_{\mathrm{final}}$ , they can still offer some useful supplementary context. + +The new prompt is then constructed by first inserting a header for the high-priority paths, followed by each path on a separate line. The same process is repeated for the additional paths. By structuring the prompt in this way, we are able to clearly delineate the paths by their priority. This ensures that the most critical information $(P_{\mathrm{final}})$ is emphasized and processed first, while still incorporating the supplementary context from the additional paths. An example prompt is given in Appendix A.2. + +![](images/175f4b8c514ad2758bb069413e6f2e98fab5a73ec12eebfcea3ab375c3e494a4.jpg) +Figure 4: An overview of the GraphRAG-FI framework. + +# 4.2 Integration with LLMs' Internal Knowledge + +As noted in Section 3.2, in addition to ensuring we only retrieve high-quality information, we also want to retain internal knowledge of the LLMs. As such, we want to also integrate the capabilities of just the LLM into our framework. However, a challenge is knowing when to defer to which method. When do we trust the answers given by GraphRAG and when the standalone LLM? Furthermore, how do we fuse the answers given by both methods? + +To achieve this goal, we need a method to determine which answers produced by both LLM-only and GraphRAG are actually relevant. In Section 3.5, we found that the LLM's logits can provide a useful tool to refine the potential answers. That is, focusing only on those answers that are given a higher confidence is helpful. This naturally provides us with an easy way to focus on just the high-quality information. For both GraphRAG and the LLM-only model, we filter the answers based on their logits, ensuring that only high-confidence responses are retained. After this logits-based filtering, the refined answers from both sources are combined to produce the final answer, thereby enhancing robustness and accuracy. + +Formally, let $A_G$ and $A_L$ denote the sets of answer candidates from GraphRAG and the LLM-only model, respectively. We further use $a$ to indicate a single candidate answer in either set. Furthermore, let their corresponding logits after the softmax function be given by $\ell_G(a)$ and $\ell_L(a)$ . The + +filtering step is given by the following: + +$$ +A _ {G} ^ {\text {f i l t e r e d}} = \left\{a \in A _ {G} \mid \ell_ {G} (a) \geq \tau_ {G} \right\}, \tag {4} +$$ + +$$ +A _ {L} ^ {\text {f i l t e r e d}} = \left\{a \in A _ {L} \mid \ell_ {L} (a) \geq \tau_ {L} \right\}, \tag {5} +$$ + +where $\tau_{G}$ and $\tau_{L}$ are predefined thresholds, $\tau_{L}$ is set to 1. Subsequently, the final answer is determined by combining the filtered sets: + +$$ +A _ {\text {f i n a l}} = \operatorname {C o m b i n e} \left(A _ {G} ^ {\text {f i l t e r e d}}, A _ {L} ^ {\text {f i l t e r e d}}\right), \tag {6} +$$ + +where $\operatorname{Combine}(\cdot)$ denotes the function that integrates the filtered answers into the final reliable output. + +# 5 Experiment + +In our experiments, we seek to address the following research questions: RQ1: How effective is the proposed method when applied to state-of-the-art GraphRAG retrievers in the knowledge graph QA task? RQ2: How does the proposed method compare to other filtering approaches? RQ3: How does the performance change when more noisy information is introduced? and RQ4: What is the impact of the two modules on performance? + +# 5.1 Experiment Settings + +Datasets. To assess the effectiveness of our method, we evaluate it on two widely recognized KGQA benchmark datasets: WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018). WebQSP contains 4,737 natural language questions that require reasoning over paths of up to two hops. In contrast, CWQ includes 34,699 more complex questions that necessitate multi-hop reasoning over + +Table 2: Performance comparison with different baselines on the two KGQA datasets. + +
TypeMethodsWebQSPCWQ
HitF1HitF1
LLMsFlan-T5-xl(Chung et al., 2024)31.0-14.7-
Alpaca-7B(Taori et al., 2023)51.8-27.4-
LLaMA2-Chat-7B(Touvron et al., 2023)64.4-34.6-
ChatGPT66.8-39.9-
ChatGPT+CoT75.6-48.9-
LLMs+KGsROG86.7370.7561.9154.95
ROG + Similarity85.5069.3861.6254.38
ROG + PageRank85.4469.6061.3454.41
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-FI89.2573.8664.8255.12
GNN-RAG90.1173.2569.1060.55
GNN-RAG + Similarity89.6872.1768.5060.26
GNN-RAG + PageRank89.1871.9266.7558.73
GNN-RAG + GraphRAG-Filtering91.2874.7469.7060.96
GNN-RAG + GraphRAG-FI91.8975.9871.1260.34
SubgraphRAG76.9064.6553.8750.43
SubgraphRAG + Similarity72.7259.9852.0548.27
SubgraphRAG + PageRank61.7950.6546.7543.23
SubgraphRAG + GraphRAG-Filtering81.0168.4058.8254.71
SubgraphRAG + GraphRAG-FI81.0868.2858.9652.52
+ +up to four hops. Both datasets are built upon Freebase, which consists of around 88 million entities, 20 thousand relations, and 126 million triples. Further details on the datasets are provided in Appendix A.1. + +Retriever Backbones. Our framework adopts three existing retrieval methods as its backbone: path-based retrieval (ROG (Luo et al., 2024)), GNN (Mavromatis and Karypis, 2024)), and subgraph-based retrieval (SubgraphRAG (Li et al., 2025)). Path-based retrieval extracts relevant paths using heuristics or shortest-path algorithms, while GNN-based retrieval leverages a Graph Neural Network to learn and retrieve informative paths. In contrast, subgraph-based retrieval retrieves relevant subgraphs and encodes them as triples, enabling fine-grained relational reasoning. Therefore, both path-based and GNN-based methods generate paths as input for the LLM. Lastly, subgraph-based methods give triples (i.e., edges) as input to the LLM that take the form of $(h,r,t)$ . By considering these three methods, we are able to test our framework on a diverse set of retrieval methods. + +Filter Baselines. The most commonly used filtering methods for RAG are similarity-based ap + +proaches used in (Gao et al., 2025). Similarity-based methods evaluate the relevance of retrieved information by measuring feature similarity. For retrieval over graphs, PageRank-based filtering is widely adopted (Wang et al., 2024). PageRank-based filtering leverages the graph structure to rank nodes based on their connectivity and importance. These methods provide a baseline filtering mechanism for refining the retrieved results. + +Implementation and Evaluation Metrics. We use LLaMA2-Chat-7B from ROG as the LLM backbone, which is instruction-finetuned on the training split of WebQSP and CWQ, as well as Freebase, for three epochs. For the similarity-based filter, we utilize SentenceTransformer ('all-MiniLM-L6-v2') to generate representations for retrieval. We evaluate our retrieval methods using both Hit Rate (Hit) and F1-score (F1). Hit Rate measures the proportion of relevant items successfully retrieved, reflecting retrieval effectiveness. F1-score balances precision and recall, providing a comprehensive assessment of retrieval quality. These metrics ensure a robust evaluation of retrieval performance. We adjust the thresholds $\tau$ and $\tau_{G}$ within the ranges [top 40, top 50] and [0.4, 0.5], respectively. + +# 5.2 Main Results + +In this section, we evaluate the performance of our method with various retrievers and compare it against baseline filter models. + +RQ1: KGQA Performance Comparison. In this subsection, we apply our method to different retrievers, including the path-based retriever, GNN-based retriever, and subgraph-based retriever. The results presented in Table 2 demonstrate that our method consistently improves all retrievers, achieving an average improvement of $3.81\%$ in Hit and $2.35\%$ in F1 over ROG, $2.46\%$ in Hit and $1.7\%$ in F1 over GNN-RAG, and significant gains of $7.47\%$ in Hit and $4.88\%$ in F1 over SubgraphRAG across two datasets. These results demonstrate that our approach is effective across different retrieval paradigms, reinforcing its adaptability to various retrieval strategies in QA tasks. + +RQ2: Comparison with other filter methods. We compare our method against other filtering baselines, with the results presented in Table 2. Our approach consistently outperforms competing methods across both datasets and retriever types. Specifically, for ROG, our method can achieve an average improvement of $4.78\%$ in Hit and $3.95\%$ in F1 compared to similarity-based filtering on both datasets. Furthermore, compared to the PageRank-based filtering method, our approach yields an average increase of $5.03\%$ in Hit and $3.70\%$ in F1 across both datasets. These results highlight the superiority of our method in enhancing retrieval effectiveness and overall performance. + +Table 3: Performance when adding more noise + +
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG*85.8768.8160.4953.72
ROG* + GraphRAG-Filtering86.6173.0161.9155.67
+ +# 5.3 Robustness to Noise + +In this subsection, we evaluate robustness of different methods to noise. To evaluate the noise resistance of the backbone model and our filter method, we use GPT to generate 30 additional noise paths that contain both irrelevant and incorrect information. This information is then incorporated into the retrieved context. We then analyze the impact of this noise on performance. The experimental results presented in Table 3, ROG* represents the cases where noise is introduced. As the noise level + +increases, the Hit score decreases by $2.29\%$ , and the F1 score drops by $2.23\%$ on the CWQ dataset, highlighting the model's sensitivity to noise. However, when applying our method, we observe a $2.23\%$ improvement in Hit and a $3.63\%$ improvement in F1 over $\mathrm{ROG^{*}}$ on CWQ. These results demonstrate the effectiveness of our approach in mitigating the negative impact of noisy retrieval. + +# 5.4 Ablation Study + +We conduct an ablation study to analyze the effectiveness of the filtering module and integrating module in GraphRAG-FI. From the results in Table 4, we can see that GraphRAG-Filtering is useful for the ROG retriever, as it improves both the F1 and Hit scores. For example, GraphRAG-Filtering increases the F1 score by $4.19\%$ and the Hit score by $3.15\%$ on CWQ dataset. We also see a boost in performance for GraphRAG-Integration, with a $1.60\%$ and $2.62\%$ increase in F1 and Hit score, respectively, on WebQSP. These results demonstrate the effectiveness of our two components. + +Table 4: Ablation study. + +
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-Integration89.0071.8864.2555.19
ROG + GraphRAG-FI89.2573.8664.8255.12
+ +# 6 Conclusion + +In this work, we propose GraphRAG-FI (Filtering & Integration), an enhanced GraphRAG framework that addresses key challenges in graph retrieval-augmented generation. By incorporating GraphRAG-Filtering, which utilizes a two-stage filtering mechanism to refine retrieved information, and GraphRAG-Integration, which employs a logits-based selection strategy to balance retrieval and intrinsic reasoning, our approach mitigates the impact of noisy retrievals and excessive dependence on external knowledge. Experimental results on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning accuracy across multiple backbone models, establishing a more reliable and effective GraphRAG framework. + +# Limitations + +In this work, we identify two key challenges in GraphRAG: (1) it is prone to errors due to the retrieval of irrelevant or misleading information, and (2) it places excessive emphasis on externally retrieved knowledge, which can diminish the intrinsic reasoning capabilities of LLMs. Future research will first explore a broader range of large language models to evaluate their effectiveness within GraphRAG. Additionally, further investigation into diverse filtering methods could enhance the refinement of retrieved information and reduce noise. More sophisticated fusion strategies may also be explored to dynamically balance external knowledge with the intrinsic reasoning of LLMs, enabling more effective information integration. + +# References + +Amit Ben-Artzy and Roy Schwartz. 2024. Attend first, consolidate later: On the importance of attention in different llm layers. arXiv preprint arXiv:2409.03621. +Yung-Sung Chuang, Yujia Xie, Hongyin Luo, Yoon Kim, James Glass, and Pengcheng He. 2023. Dola: Decoding by contrasting layers improves factuality in large language models. arXiv preprint arXiv:2309.03883. +Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2024. Scaling instruction-finetuned language models. Journal of Machine Learning Research, 25(70):1-53. +Jialin Dong, Bahare Fatemi, Bryan Perozzi, Lin F Yang, and Anton Tsitsulin. 2024. Don't forget to connect! improving rag with graph-based reranking. arXiv preprint arXiv:2405.18414. +Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. Frag: A flexible modular framework for retrieval-augmented generation based on knowledge graphs. arXiv preprint arXiv:2501.09957. +Danny Halawi, Jean-Stanislas Denain, and Jacob Steinhardt. 2023. Overthinking the truth: Understanding how language models process false demonstrations. arXiv preprint arXiv:2307.09476. +Haoyu Han, Yu Wang, Harry Shomer, Kai Guo, Jiayuan Ding, Yongjia Lei, Mahantesh Halappanavar, Ryan A Rossi, Subhabrata Mukherjee, Xianfeng Tang, et al. 2024. Retrieval-augmented generation with graphs (graphrag). arXiv preprint arXiv:2501.00309. +Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. 2023. Reasoning with language model is planning with world + +model. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173. +Alexander Havrilla, Sharath Chandra Rarparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. Gore: When, where, and how to improve llm reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*. +Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2025. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. Advances in Neural Information Processing Systems, 37:132876-132907. +Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh V Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2024. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. arXiv preprint arXiv:2402.07630. +Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2024. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems. +Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. 2023. Towards mitigating llm hallucination via self reflection. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 1827-1843. +Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907. +Mufei Li, Siqi Miao, and Pan Li. 2025. Simple is effective: The roles of graphs and large language models in knowledge-graph-based retrieval-augmented generation. In International Conference on Learning Representations. +Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In International Conference on Learning Representations. +Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. 2025. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290. +Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, and Jian Guo. 2024. Think-on-graph 2.0: Deep and interpretable large language model reasoning with knowledge graph-guided retrieval. arXiv e-prints, pages arXiv-2407. +Costas Mavromatis and George Karypis. 2024. Gnrag: Graph neural retrieval for large language model reasoning. arXiv preprint arXiv:2405.20139. + +Shiyu Ni, Keping Bi, Jiafeng Guo, and Xueqi Cheng. 2024. When do llms need retrieval augmentation? mitigating llms' overconfidence helps retrieval augmentation. arXiv preprint arXiv:2402.11457. +Ruiyang Ren, Yuhao Wang, Yingqi Qu, Wayne Xin Zhao, Jing Liu, Hao Tian, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2023. Investigating the factual knowledge boundary of large language models with retrieval augmentation. arXiv preprint arXiv:2307.11019. +Ishneet Sukhvinder Singh, Ritvik Aggarwal, Ibrahim Allahverdiyev, Muhammad Taha, Aslihan Akalin, Kevin Zhu, and Sean O'Brien. 2024. Chunkrag: Novel lmm-chunk filtering method for rag systems. arXiv preprint arXiv:2410.19572. +Gaurang Sriramanan, Siddhant Bharti, Vinu Sankar Sadasivan, Shoumik Saha, Priyatham Kattakinda, and Soheil Feizi. 2025. Llm-check: Investigating detection of hallucinations in large language models. Advances in Neural Information Processing Systems, 37:34188-34216. +Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel Ni, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations. +Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643. +Hexiang Tan, Fei Sun, Wanli Yang, Yuanzhuo Wang, Qi Cao, and Xueqi Cheng. 2024. Blinded by generated contexts: How language models merge generated and retrieved contexts for open-domain qa? arXiv preprint arXiv:2401.11911. +Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. 2023. Stanford alpaca: An instruction-following llama model. +Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288. +Yuvraj Virk, Premkumar Devanbu, and Toufique Ahmed. 2024. Enhancing trust in llm-generated code summaries with calibrated confidence scores. arXiv preprint arXiv:2404.19318. +Yile Wang, Peng Li, Maosong Sun, and Yang Liu. 2023. Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002. + +Yuqi Wang, Boran Jiang, Yi Luo, Dawei He, Peng Cheng, and Liangcai Gao. 2024. Reasoning on efficient knowledge paths: Knowledge graph guides large language model for domain question answering. arXiv preprint arXiv:2404.10384. +Xiaoqian Wu, Yong-Lu Li, Jianhua Sun, and Cewu Lu. 2023. Symbol-llm: leverage language models for symbolic system in visual human activity reasoning. Advances in Neural Information Processing Systems, 36:29680-29691. +Lijie Yang, Zhihao Zhang, Zhuofu Chen,zikun Li, and Zhihao Jia. 2024. Tidaldecode: Fast and accurate llm decoding with position persistent sparse attention. arXiv preprint arXiv:2410.05076. +Wen-tau Yih, Matthew Richardson, Christopher Meek, Ming-Wei Chang, and Jina Suh. 2016. The value of semantic parse labeling for knowledge base question answering. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 201-206. +Shenglai Zeng, Jiankun Zhang, Pengfei He, Yue Xing, Yiding Liu, Han Xu, Jie Ren, Shuaiqiang Wang, Dawei Yin, Yi Chang, et al. 2024a. The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag). arXiv preprint arXiv:2402.16893. +Shenglai Zeng, Jiankun Zhang, Bingheng Li, Yuping Lin, Tianqi Zheng, Dante Everaert, Hanqing Lu, Hui Liu, Yue Xing, Monica Xiao Cheng, et al. 2024b. Towards knowledge checking in retrieval-augmented generation: A representation perspective. arXiv preprint arXiv:2411.14572. +Qinggang Zhang, Shengyuan Chen, Yuanchen Bei, Zheng Yuan, Huachi Zhou, Zijin Hong, Junnan Dong, Hao Chen, Yi Chang, and Xiao Huang. 2025. A survey of graph retrieval-augmented generation for customized large language models. arXiv preprint arXiv:2501.13958. + +Table 5: Statistics of datasets. + +
Datasets#Train#TestMax #hop
WebQSP2,8261,6282
CWQ27,6393,5314
+ +# A Appendix + +# A.1 Datasets + +We utilize two benchmark KGQA datasets, WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018), as proposed in previous studies. Following ROG, we maintain the same training and testing splits. The dataset statistics are provided in Table 5. + +# A.2 Prompt Example + +# Prompts + +Based on the reasoning paths, please answer the given question. Please keep the answer as simple as possible and return all the possible answers as a list. + +# Reasoning Paths: + +High Priority Paths: + +Northern Colorado Bears football $\rightarrow$ education.educational_institution.sports_teams $\rightarrow$ University of Northern Colorado + +Additional Paths: + +Northern Colorado Bears football $\rightarrow$ education.educational_institution.sports_teams $\rightarrow$ University of Northern Colorado + +Greeley $\rightarrow$ location.location.containedby + +→ United States of America + +Greeley $\rightarrow$ location.location.containedby $\rightarrow$ Greeley Masonic Temple + +Question: What educational institution has a football sports team named Northern Colorado Bears is in Greeley, Colorado? + +Figure 5: An Example of Our Prompt \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13804/images/126d7898f0d8ce0776605a29f6c763a81d49da74e920a4306526b091d6c2782d.jpg b/data/2025/2503_13xxx/2503.13804/images/126d7898f0d8ce0776605a29f6c763a81d49da74e920a4306526b091d6c2782d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92e9c9c993aed925f04da0c3a819720af7e1667a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/126d7898f0d8ce0776605a29f6c763a81d49da74e920a4306526b091d6c2782d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:818ec1593bcb094299dd1184b37cc06954181c2379b86d857b6cfdb000291270 +size 19393 diff --git a/data/2025/2503_13xxx/2503.13804/images/167a42a5baf1a000a36b149db67dd2f9386cc0c431471e8215a23ca01506e714.jpg b/data/2025/2503_13xxx/2503.13804/images/167a42a5baf1a000a36b149db67dd2f9386cc0c431471e8215a23ca01506e714.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab4f24adc9221cb4df5b747276e37e8602976a94 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/167a42a5baf1a000a36b149db67dd2f9386cc0c431471e8215a23ca01506e714.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eeadc29bedfeb09375e18fb994ec70c102fbd3b98c982c3c09e84795ebec15c +size 15966 diff --git a/data/2025/2503_13xxx/2503.13804/images/175f4b8c514ad2758bb069413e6f2e98fab5a73ec12eebfcea3ab375c3e494a4.jpg b/data/2025/2503_13xxx/2503.13804/images/175f4b8c514ad2758bb069413e6f2e98fab5a73ec12eebfcea3ab375c3e494a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..182b3c6beb5aca625a3903b6477af986966cabaf --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/175f4b8c514ad2758bb069413e6f2e98fab5a73ec12eebfcea3ab375c3e494a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e816590785e35714fb1e69889e5be66774c8cd153aefadb2421121d9dc09956d +size 60007 diff --git a/data/2025/2503_13xxx/2503.13804/images/318b8766a631ff1fd8611c77f5b5c3cc04ca1a589f8e03899978a762189be102.jpg b/data/2025/2503_13xxx/2503.13804/images/318b8766a631ff1fd8611c77f5b5c3cc04ca1a589f8e03899978a762189be102.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a907142b8702340d63c01f759390750a1b11acd6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/318b8766a631ff1fd8611c77f5b5c3cc04ca1a589f8e03899978a762189be102.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33404f375497cf3e0af5e3f06f1a6ef92d3e780a10759fcd3ceae35520e6c8d5 +size 36017 diff --git a/data/2025/2503_13xxx/2503.13804/images/4087547e5200291794f586a6dafcdf49e71d0fb7af07f8a49e96ec278af46f8f.jpg b/data/2025/2503_13xxx/2503.13804/images/4087547e5200291794f586a6dafcdf49e71d0fb7af07f8a49e96ec278af46f8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..010ae6ab829b5d4106f7ce6500d00ea3a34f687b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/4087547e5200291794f586a6dafcdf49e71d0fb7af07f8a49e96ec278af46f8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6f1e6f78c16069adfa01eab6d19f1b566fa299e153f84466fae324e5b80ec40 +size 5374 diff --git a/data/2025/2503_13xxx/2503.13804/images/496e28bd577aa6e70383f3ab6939ca74c0779a5e7b7c2d509d916d5c878e76e6.jpg b/data/2025/2503_13xxx/2503.13804/images/496e28bd577aa6e70383f3ab6939ca74c0779a5e7b7c2d509d916d5c878e76e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..702c7f52113cfe2aa8f084db910bf99406fce552 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/496e28bd577aa6e70383f3ab6939ca74c0779a5e7b7c2d509d916d5c878e76e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8564b7938ff6d9e79002baf10e2d046279e36aadc4db4db35e2de88ebb3f06e +size 20748 diff --git a/data/2025/2503_13xxx/2503.13804/images/5394ce95b061ee2998994297ff15b21d1f49085458e088ff35602ddff5e6c4a4.jpg b/data/2025/2503_13xxx/2503.13804/images/5394ce95b061ee2998994297ff15b21d1f49085458e088ff35602ddff5e6c4a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a8c72b837994c9613b5ffe404d6f30e9146a6e2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/5394ce95b061ee2998994297ff15b21d1f49085458e088ff35602ddff5e6c4a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41a984e9887defd875ea4f44d60f4c14e98b00666f469b24b73d187e1d3baf0e +size 18960 diff --git a/data/2025/2503_13xxx/2503.13804/images/5ea5ec16d3f4ddc7189d90c2255697e615ca505193be5d33d1ffce274dce5bf4.jpg b/data/2025/2503_13xxx/2503.13804/images/5ea5ec16d3f4ddc7189d90c2255697e615ca505193be5d33d1ffce274dce5bf4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e275f9ffdb42b5b8000dd0f01234c4a0b95cbf2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/5ea5ec16d3f4ddc7189d90c2255697e615ca505193be5d33d1ffce274dce5bf4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4892cd7efc1995049b9e5e080b53c20e3692cd3ce976ae7966aa67c3e3c3e1e +size 5121 diff --git a/data/2025/2503_13xxx/2503.13804/images/7ea290b7ab9020ea4a898db2b330171fb36b307cdf12ffee4dc2238a8e246f09.jpg b/data/2025/2503_13xxx/2503.13804/images/7ea290b7ab9020ea4a898db2b330171fb36b307cdf12ffee4dc2238a8e246f09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a48d6412341bd72e5d6335657f3697b90092d0f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/7ea290b7ab9020ea4a898db2b330171fb36b307cdf12ffee4dc2238a8e246f09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c187628315f6c1511bd25f4dfaa24f9b752454cb5e3544577395b04fef5a3f6 +size 4186 diff --git a/data/2025/2503_13xxx/2503.13804/images/8df0d0d841560df2b2c8fffc2d15d88d4b71b35a9f03a2f9f18a2b3fa59e808c.jpg b/data/2025/2503_13xxx/2503.13804/images/8df0d0d841560df2b2c8fffc2d15d88d4b71b35a9f03a2f9f18a2b3fa59e808c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1add4d3252b03b869639fe433596d42e9020a96d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/8df0d0d841560df2b2c8fffc2d15d88d4b71b35a9f03a2f9f18a2b3fa59e808c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b12660d7439ecda4ffdb39c1763a1ec9e73736e50d88e996247bbca7702770f +size 17134 diff --git a/data/2025/2503_13xxx/2503.13804/images/8ee88875f5eee9241b66f29772211d4531307597a46a52acd6ff71bb0ad82e24.jpg b/data/2025/2503_13xxx/2503.13804/images/8ee88875f5eee9241b66f29772211d4531307597a46a52acd6ff71bb0ad82e24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f312d81b5c5cd4f59cd31d6a29e3ada0e16d5c5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/8ee88875f5eee9241b66f29772211d4531307597a46a52acd6ff71bb0ad82e24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91654c71aaafdf2cbb5593bbaead370f4ef2d088b1de1a79e524418d2c6bc149 +size 5083 diff --git a/data/2025/2503_13xxx/2503.13804/images/8fbeb86a47c75ef462e1cde623194a4293e71edd7e7770812d08d3233aab0bf8.jpg b/data/2025/2503_13xxx/2503.13804/images/8fbeb86a47c75ef462e1cde623194a4293e71edd7e7770812d08d3233aab0bf8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e05fdddf4ee9cef5bb31460744f0cf566a4edc1a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/8fbeb86a47c75ef462e1cde623194a4293e71edd7e7770812d08d3233aab0bf8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c07bcad3b0496d6c4a2fb804b9f57754dde9d2d39cb4e9ed3c6234c068edf28d +size 156797 diff --git a/data/2025/2503_13xxx/2503.13804/images/97d7bcded28c6bd95bc57d2b198b2b35020c21b98714b72d413982e1a9bdfe07.jpg b/data/2025/2503_13xxx/2503.13804/images/97d7bcded28c6bd95bc57d2b198b2b35020c21b98714b72d413982e1a9bdfe07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e0937dbfb7c341155a04b90f292753bec11155f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/97d7bcded28c6bd95bc57d2b198b2b35020c21b98714b72d413982e1a9bdfe07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e96bcf9ad3156f55cf626e5bd9b97c4dc1fc7b37239c9a39dee67e5441699be5 +size 5130 diff --git a/data/2025/2503_13xxx/2503.13804/images/b180c3b6429f6a2dae130d14350518b88d86c32cc66cbe1abb5b4d31d624d598.jpg b/data/2025/2503_13xxx/2503.13804/images/b180c3b6429f6a2dae130d14350518b88d86c32cc66cbe1abb5b4d31d624d598.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8704c7c6e579270c159c886bfe7edbcc573eb86 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/b180c3b6429f6a2dae130d14350518b88d86c32cc66cbe1abb5b4d31d624d598.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a140ec02833e443f53227f4f81116e8f6286c273bdc17b64ec4c26af181d85d0 +size 26774 diff --git a/data/2025/2503_13xxx/2503.13804/images/e59d82f255eb6ed4169554c4abd0f849e5d3c2a8617557aedb004677bc7c494e.jpg b/data/2025/2503_13xxx/2503.13804/images/e59d82f255eb6ed4169554c4abd0f849e5d3c2a8617557aedb004677bc7c494e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77b26e104357c3b84ef80ad643663402ae7aa282 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/images/e59d82f255eb6ed4169554c4abd0f849e5d3c2a8617557aedb004677bc7c494e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69735b07b9c78cc1615f50270fc06bd8169e42ef2e84b19b7ac1b1ac323147ba +size 6030 diff --git a/data/2025/2503_13xxx/2503.13804/layout.json b/data/2025/2503_13xxx/2503.13804/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1f86eae57cd194c1b22f55a1634bb85535aeb793 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13804/layout.json @@ -0,0 +1,7065 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 88, + 75, + 506, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 75, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 88, + 75, + 506, + 95 + ], + "type": "text", + "content": "Empowering GraphRAG with Knowledge Filtering and Integration" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "spans": [ + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "text", + "content": "Kai Guo" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "text", + "content": ", Harry Shomer" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "text", + "content": ", Shenglai Zeng" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "text", + "content": ", Haoyu Han" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "text", + "content": ", Yu Wang" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "text", + "content": ", Jiliang Tang" + }, + { + "bbox": [ + 86, + 116, + 506, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 132, + 424, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 132, + 424, + 146 + ], + "spans": [ + { + "bbox": [ + 167, + 132, + 424, + 146 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 167, + 132, + 424, + 146 + ], + "type": "text", + "content": "Michigan State University " + }, + { + "bbox": [ + 167, + 132, + 424, + 146 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 167, + 132, + 424, + 146 + ], + "type": "text", + "content": " University of Oregon" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 146, + 146, + 447, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 146, + 447, + 159 + ], + "spans": [ + { + "bbox": [ + 146, + 146, + 447, + 159 + ], + "type": "text", + "content": "{guokai1, shomerha, zengshe1, hanhaoy1, tangjili} @msu.edu," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 236, + 160, + 357, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 160, + 357, + 174 + ], + "spans": [ + { + "bbox": [ + 236, + 160, + 357, + 174 + ], + "type": "text", + "content": "{yuwang} @uoregon.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 237, + 274, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 237, + 274, + 560 + ], + "spans": [ + { + "bbox": [ + 86, + 237, + 274, + 560 + ], + "type": "text", + "content": "In recent years, large language models (LLMs) have revolutionized the field of natural language processing. However, they often suffer from knowledge gaps and hallucinations. Graph retrieval-augmented generation (GraphRAG) enhances LLM reasoning by integrating structured knowledge from external graphs. However, we identify two key challenges that plague GraphRAG: (1) Retrieving noisy and irrelevant information can degrade performance and (2) Excessive reliance on external knowledge suppresses the model's intrinsic reasoning. To address these issues, we propose GraphRAG-FI (Filtering & Integration), consisting of GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering employs a two-stage filtering mechanism to refine retrieved information. GraphRAG-Integration employs a logits-based selection strategy to balance external knowledge from GraphRAG with the LLM's intrinsic reasoning, reducing over-reliance on retrievals. Experiments on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning performance across multiple backbone models, establishing a more reliable and effective GraphRAG framework." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 565, + 155, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 565, + 155, + 577 + ], + "spans": [ + { + "bbox": [ + 68, + 565, + 155, + 577 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 586, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 292, + 775 + ], + "type": "text", + "content": "Large language models (LLMs) have achieved remarkable success in NLP tasks, particularly in tasks that require complex reasoning (Havrilla et al.; Wu et al., 2023; Hao et al., 2023). However, despite their strengths, LLMs are prone to hallucinations, resulting in incorrect or poor reasoning (Ji et al., 2023; Huang et al., 2024; Sriramanan et al., 2025). GraphRAG techniques have emerged as a promising solution to this problem (Han et al., 2024; Zhang et al., 2025; He et al., 2025; Mavromatis and Karypis, 2024), by integrating relevant information from external graphs. Knowledge graphs, which store facts in the form of a graph, are commonly used for this problem. Specifically, relevant" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 220, + 526, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 220, + 526, + 327 + ], + "spans": [ + { + "bbox": [ + 302, + 220, + 526, + 327 + ], + "type": "text", + "content": "facts (i.e., triples) or paths are extracted from the knowledge graph and used to enrich the context of the LLMs with structured and reliable information (Luo et al., 2024; Li et al., 2025; Ma et al., 2024). This approach has shown ability to improve the reasoning capabilities and reduce the presence of hallucinations in LLMs (Sun et al.; Li et al., 2025; Dong et al., 2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 328, + 526, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 328, + 526, + 544 + ], + "spans": [ + { + "bbox": [ + 302, + 328, + 526, + 544 + ], + "type": "text", + "content": "To better assess the efficacy of GraphRAG, in Section 3 we conduct a preliminary study comparing its performance with an LLM-only model (i.e., LLM without GraphRAG). This comparison reveals both the advantages and limitations of GraphRAG. While GraphRAG improved reasoning accuracy by correcting some LLM errors, it also introduces some notable weaknesses. For example, incorporating external knowledge will sometimes cause questions that were originally answered correctly by the LLM to be misclassified. This highlights the dangers of retrieving irrelevant information. Furthermore, excessive retrieval compounds this issue by introducing both noise and redundant information, thus further hindering the reasoning process." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 545, + 526, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 545, + 526, + 734 + ], + "spans": [ + { + "bbox": [ + 302, + 545, + 526, + 734 + ], + "type": "text", + "content": "Meanwhile, we find that LLM-only and GraphRAG can complement one another. Specifically, GraphRAG can enhance reasoning for those questions LLMs lack knowledge of; while excessive reliance on external information may cause the model to overlook internally known correct answers. These findings highlight two key limitations of existing GraphRAG methods. First, GraphRAG is highly susceptible to retrieving irrelevant or misleading information. Second, GraphRAG struggles to balance external retrieval with the LLM's internal knowledge, often missing parts of the answer that the LLM-only model can provide using its own knowledge." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 735, + 525, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 735, + 525, + 776 + ], + "spans": [ + { + "bbox": [ + 302, + 735, + 525, + 776 + ], + "type": "text", + "content": "Inspired by these findings, we propose a novel design that aims to address these issues. First, we aim to enhance the retrieval quality to better avoid" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 261, + 36, + 610 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 261, + 36, + 610 + ], + "spans": [ + { + "bbox": [ + 13, + 261, + 36, + 610 + ], + "type": "text", + "content": "arXiv:2503.13804v1 [cs.AI] 18 Mar 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 287 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 287 + ], + "type": "text", + "content": "retrieving irrelevant information. Second, we integrate GraphRAG with an LLM's intrinsic reasoning ability, thus only using GraphRAG when external knowledge is necessary. In particular, to mitigate the issue of retrieving irrelevant information, we introduce a two-stage filtering process. Furthermore, to mitigate GraphRAG from overrelying on retrieved information while underutilizing the LLM's inherent reasoning ability, we introduce a logits-based selection mechanism that dynamically integrates LLMs' standalone answers with GraphRAG's outputs. This approach ensures that the final response effectively balances external knowledge with the model's internal reasoning. The main contributions of our work are summarized as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 81, + 296, + 291, + 515 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 81, + 296, + 291, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 296, + 291, + 377 + ], + "spans": [ + { + "bbox": [ + 81, + 296, + 291, + 377 + ], + "type": "text", + "content": "- We identify two key challenges in GraphRAG: (1) It is susceptible to errors by retrieving irrelevant or misleading information. (2) It overemphasizes the externally retrieved knowledge, at the expense of the intrinsic reasoning capabilities of LLMs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 81, + 386, + 291, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 386, + 291, + 467 + ], + "spans": [ + { + "bbox": [ + 81, + 386, + 291, + 467 + ], + "type": "text", + "content": "- We introduce a novel approach that enhances GraphRAG by incorporating a two-stage filtering mechanism to refine the retrieved knowledge and dynamically integrate this knowledge with a LLMs' standalone reasoning capabilities." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 81, + 476, + 290, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 476, + 290, + 515 + ], + "spans": [ + { + "bbox": [ + 81, + 476, + 290, + 515 + ], + "type": "text", + "content": "- Extensive experiments on knowledge graph QA demonstrate the effectiveness of our method across multiple backbone models." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 524, + 158, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 524, + 158, + 537 + ], + "spans": [ + { + "bbox": [ + 67, + 524, + 158, + 537 + ], + "type": "text", + "content": "2 Related work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "content": "GraphRAG. GraphRAG aims to address hallucinations and outdated knowledge in LLMs by incorporating additional information retrieved from external knowledge bases (Sun et al.; Li et al., 2025; Dong et al., 2024). G-R retriever (He et al., 2025) identifies relevant nodes and edges for a given query based on cosine similarity, and then constructs a subgraph to aid in the generation process. Similarly, RoG (Luo et al., 2024) introduces a planning-retrieval-reasoning framework, where it retrieves reasoning paths guided by a planning module and performs reasoning using these paths. On the other hand, GNN-RAG (Mavromatis and Karypis, 2024) leverages Graph Neural Networks (GNNs) (Kipf and Welling, 2016) to process the intricate graph structures within knowledge graphs, enabling effective retrieval. They also use" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 526, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 152 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 152 + ], + "type": "text", + "content": "retrieval augmentation techniques to enhance diversity. However, the effectiveness of these methods is heavily dependent on the quality of the retrieved information, and their performance significantly declines when the retrieved graph data is either noisy or unrelated to the query (He et al., 2025)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 153, + 526, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 153, + 526, + 477 + ], + "spans": [ + { + "bbox": [ + 302, + 153, + 526, + 477 + ], + "type": "text", + "content": "Filter Methods. Filtering attempts to only keep those pieces of retrieved information that are relevant to the given query (Gao et al., 2025). ChunkRAG (Singh et al., 2024) tries to improve RAG systems by assessing and filtering retrieved data at the chunk level, with each \"chunk\" representing a concise and coherent segment of a document. This method first applies semantic chunking to partition documents into meaningful sections. It then leverages LLM-based relevance scoring to evaluate how well each chunk aligns with the user query. Zeng et al. (2024b) thoroughly investigate LLM representation behaviors in relation to RAG, uncovering distinct patterns between positive and negative samples in the representation space. This distinction enables representation-based methods to achieve significantly better performance for certain tasks. Building on these insights, they introduce Rep-PCA, which employs representation classifiers for knowledge filtering. RoK (Wang et al., 2024) refines the reasoning paths within the subgraph by computing the average PageRank score for each path. Similarly, He et al. (2024) use PageRank to identify the most relevant entities." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 491, + 425, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 491, + 425, + 505 + ], + "spans": [ + { + "bbox": [ + 302, + 491, + 425, + 505 + ], + "type": "text", + "content": "3 Preliminary studies" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 514, + 525, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 514, + 525, + 609 + ], + "spans": [ + { + "bbox": [ + 302, + 514, + 525, + 609 + ], + "type": "text", + "content": "To evaluate the effectiveness of GraphRAG, we compare the performance with and without retrieved external knowledge. Furthermore, we analyze the attention scores of the LLM to assess its ability to discern both the relevance and importance of the retrieved information. Lastly, we evaluate the performance of internal knowledge filtering." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 620, + 433, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 620, + 433, + 634 + ], + "spans": [ + { + "bbox": [ + 302, + 620, + 433, + 634 + ], + "type": "text", + "content": "3.1 Experimental settings" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "content": "In this section, we aim to study the importance of retrieving external information when using GraphRAG for knowledge graph QA. To do so, we report the QA performance when using: LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). For GraphRAG, we use RoG (Luo et al., 2024) and GNN-RAG (Mavromatis and Karypis, 2024). For the LLM-only experiments, we use the fine-tuned LLaMA 2-7B model, which is the same LLM used by RoG. The experiments are con" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 79, + 68, + 279, + 184 + ], + "blocks": [ + { + "bbox": [ + 79, + 68, + 279, + 184 + ], + "lines": [ + { + "bbox": [ + 79, + 68, + 279, + 184 + ], + "spans": [ + { + "bbox": [ + 79, + 68, + 279, + 184 + ], + "type": "image", + "image_path": "496e28bd577aa6e70383f3ab6939ca74c0779a5e7b7c2d509d916d5c878e76e6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 192, + 291, + 277 + ], + "lines": [ + { + "bbox": [ + 67, + 192, + 291, + 277 + ], + "spans": [ + { + "bbox": [ + 67, + 192, + 291, + 277 + ], + "type": "text", + "content": "Figure 1: Category A includes cases where both GraphRAG and the LLM-only model are correct. Category B covers instances where GraphRAG outperforms the LLM-only model, while Category C includes cases where the LLM-only model performs better than GraphRAG. Category D represents cases where both models fail." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 296, + 290, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 296, + 290, + 351 + ], + "spans": [ + { + "bbox": [ + 67, + 296, + 290, + 351 + ], + "type": "text", + "content": "duced on two common datasets the WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018) datasets. In this study, we mainly use the F1 score to evaluate the performance." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 69, + 359, + 290, + 486 + ], + "blocks": [ + { + "bbox": [ + 69, + 359, + 290, + 486 + ], + "lines": [ + { + "bbox": [ + 69, + 359, + 290, + 486 + ], + "spans": [ + { + "bbox": [ + 69, + 359, + 290, + 486 + ], + "type": "image", + "image_path": "318b8766a631ff1fd8611c77f5b5c3cc04ca1a589f8e03899978a762189be102.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 495, + 289, + 519 + ], + "lines": [ + { + "bbox": [ + 67, + 495, + 289, + 519 + ], + "spans": [ + { + "bbox": [ + 67, + 495, + 289, + 519 + ], + "type": "text", + "content": "Figure 2: The relationship between path number and average F1" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 541, + 220, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 541, + 220, + 554 + ], + "spans": [ + { + "bbox": [ + 67, + 541, + 220, + 554 + ], + "type": "text", + "content": "3.2 The Impact of GraphRAG" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "content": "To understanding the effectiveness of GraphRAG, we compare prediction outcomes between LLM with GraphRAG and LLM w/o GraphRAG (i.e., LLM-only). We categorize the results into four groups based on F1 scores, as shown in the Figure 1. Category A includes cases where both GraphRAG and the LLM-only model provide correct answers. Category B consists of instances where GraphRAG produces a more accurate answer than the LLM-only model. Category C includes cases where the LLM-only model outperforms GraphRAG. Finally, Category D represents instances where both GraphRAG and the LLM-only model fail to generate the correct answer. Figure 1 illustrates the key observations from our experiments. While GraphRAG enhances certain predictions, it also" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "text", + "content": "introduces notable challenges that require further investigation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 107, + 525, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 107, + 525, + 215 + ], + "spans": [ + { + "bbox": [ + 302, + 107, + 525, + 215 + ], + "type": "text", + "content": "Positive Impact of GraphRAG GraphRAG can enhance the LLM's reasoning capabilities by correcting errors that the standalone model would typically commit. Notably, in the category B, " + }, + { + "bbox": [ + 302, + 107, + 525, + 215 + ], + "type": "inline_equation", + "content": "45.64\\%" + }, + { + "bbox": [ + 302, + 107, + 525, + 215 + ], + "type": "text", + "content": " of previously incorrect responses were successfully rectified with the integration of GraphRAG. This highlights the advantage of leveraging structured knowledge graphs to boost LLM performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 224, + 526, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 224, + 526, + 400 + ], + "spans": [ + { + "bbox": [ + 302, + 224, + 526, + 400 + ], + "type": "text", + "content": "Limited Impact of GraphRAG Category A contains those answers where both GraphRAG and LLM-only are correct. This show that GraphRAG can sometimes preserve the performance of a LLM when the LLM already possesses the correct knowledge. Conversely, category D, representing " + }, + { + "bbox": [ + 302, + 224, + 526, + 400 + ], + "type": "inline_equation", + "content": "9.03\\%" + }, + { + "bbox": [ + 302, + 224, + 526, + 400 + ], + "type": "text", + "content": " of cases, corresponds to those cases where GraphRAG fails to enhance the model's accuracy. For this category, neither the standalone LLM nor GraphRAG are able to provide the correct answer. This pattern implies that GraphRAG does not always access or incorporate sufficiently informative or relevant knowledge." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 408, + 525, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 408, + 525, + 679 + ], + "spans": [ + { + "bbox": [ + 302, + 408, + 525, + 679 + ], + "type": "text", + "content": "Negative Impact of GraphRAG A notable drawback of GraphRAG is that will occasionally degrade the performance of a standalone LLM. That is, it will sometimes lead to wrong predictions for queries that the standalone LLM originally got right. These instances are represented by category C and accounts for " + }, + { + "bbox": [ + 302, + 408, + 525, + 679 + ], + "type": "inline_equation", + "content": "16.89\\%" + }, + { + "bbox": [ + 302, + 408, + 525, + 679 + ], + "type": "text", + "content": " of samples when evaluating via the F1 score. In these cases, GraphRAG misleads the model rather than improving it. This suggests that some of the retrieved information may be incorrect, noisy, or irrelevant, ultimately leading to poorer predictions. Therefore, in some cases, LLMs without GraphRAG outperform those with GraphRAG, because existing works have shown that LLMs tend to over-rely on external information (Ren et al., 2023; Tan et al., 2024; Wang et al., 2023; Ni et al., 2024; Zeng et al., 2024a). When retrieval is insufficient or the quality of retrieved knowledge is low, this reliance can degrade generation quality." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 689, + 516, + 715 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 689, + 516, + 715 + ], + "spans": [ + { + "bbox": [ + 302, + 689, + 516, + 715 + ], + "type": "text", + "content": "3.3 The Impact of the Number of Retrieved Paths" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "content": "Due to the structure of knowledge graphs, nodes with high degrees and numerous relational edges have a greater likelihood of yielding a large number of retrieved paths. In this subsection, we study" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 343 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 343 + ], + "type": "text", + "content": "the impact of the number of retrieved paths on performance. Figure 2 illustrates the relationship between the number of retrieved paths and the model's performance. Interestingly, as indicated by the smoothed line (blue), incorporating a moderate amount of retrieved information enhances performance. However, increasing the number of retrieved paths ultimately leads to a decline in performance. This trend (green line) suggests that retrieving too much information will introduce noise, making it harder for the model to use the correct and relevant knowledge for the task. This phenomenon thus highlights an important insight - more information does not necessarily indicate better performance. Instead, an overabundance of retrieved data can overwhelm the model with irrelevant details. This observation underscores the necessity for effective filtering mechanisms that can prioritize high-quality, relevant knowledge while discarding extraneous or misleading information." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 74, + 394, + 285, + 508 + ], + "blocks": [ + { + "bbox": [ + 68, + 353, + 269, + 379 + ], + "lines": [ + { + "bbox": [ + 68, + 353, + 269, + 379 + ], + "spans": [ + { + "bbox": [ + 68, + 353, + 269, + 379 + ], + "type": "text", + "content": "3.4 Attention Reflects the Importance of Retrieved Information" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 74, + 394, + 285, + 508 + ], + "lines": [ + { + "bbox": [ + 74, + 394, + 285, + 508 + ], + "spans": [ + { + "bbox": [ + 74, + 394, + 285, + 508 + ], + "type": "image", + "image_path": "5394ce95b061ee2998994297ff15b21d1f49085458e088ff35602ddff5e6c4a4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 517, + 290, + 542 + ], + "lines": [ + { + "bbox": [ + 67, + 517, + 290, + 542 + ], + "spans": [ + { + "bbox": [ + 67, + 517, + 290, + 542 + ], + "type": "text", + "content": "Figure 3: Attention Scores for Retrieved Information With/Without Ground Truth" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "content": "In this subsection, we analyze the ability of the LLM to distinguish the importance of retrieved external knowledge. The attention scores of a LLM can provide a natural indicator of the relevance and significance of the retrieved knowledge (Yang et al., 2024; Ben-Artzy and Schwartz, 2024). The attention scores, derived from the model's internal mechanisms, effectively capture which pieces of information are most influential in reaching the final decision. Inspired by recent work (Chuang et al., 2023; Halawi et al., 2023), which suggests that attention scores in the middle layers are more effective. We examine the attention scores of the (middle + 2)-th layer in the LLM for each retrieved path. We obtain the attention scores for all retrieved paths and categorize them into two groups: (1) paths that" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "content": "contain the ground truth and (2) paths that do not. We then compute the average attention score for each group and present the results in Figure 3. As demonstrated in Figure 3, there is a clear alignment between the attention scores and the ground truth labels, suggesting that these scores can be used to assess the relevance of retrieved information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 167, + 526, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 167, + 526, + 275 + ], + "spans": [ + { + "bbox": [ + 302, + 167, + 526, + 275 + ], + "type": "text", + "content": "This observation inspires a key insight: The attention scores highlight the most significant retrieved information, suggesting their potential use in filtering out noisy or irrelevant knowledge. Since retrieved information with lower attention scores contribute minimally to the final output, they can be pruned to streamline retrieval and enhance overall performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 285, + 468, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 285, + 468, + 298 + ], + "spans": [ + { + "bbox": [ + 302, + 285, + 468, + 298 + ], + "type": "text", + "content": "3.5 Internal Knowledge Filtering" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 302, + 527, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 302, + 527, + 506 + ], + "spans": [ + { + "bbox": [ + 302, + 302, + 527, + 506 + ], + "type": "text", + "content": "Large language models (LLMs) generate responses that may contain both correct and incorrect information. To assess the reliability of these responses, we analyze the associated logits, which represent the model's confidence in its predictions. Typically, higher confidence correlates with correctness (Ma et al., 2025; Virk et al., 2024). Leveraging this property, we implement \"Internal Knowledge Filtering\", which uses the logits to help refine the answer selection. The logits of answer can be directly obtained from the LLM's output. Formally, let " + }, + { + "bbox": [ + 302, + 302, + 527, + 506 + ], + "type": "inline_equation", + "content": "A_{L}" + }, + { + "bbox": [ + 302, + 302, + 527, + 506 + ], + "type": "text", + "content": " denote the sets of answer candidates from the LLM model. Furthermore, let it's corresponding logits after softmax function be given by " + }, + { + "bbox": [ + 302, + 302, + 527, + 506 + ], + "type": "inline_equation", + "content": "\\ell_{L}(a)" + }, + { + "bbox": [ + 302, + 302, + 527, + 506 + ], + "type": "text", + "content": ". The filtering step is given by the following:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 335, + 515, + 525, + 532 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 515, + 525, + 532 + ], + "spans": [ + { + "bbox": [ + 335, + 515, + 525, + 532 + ], + "type": "interline_equation", + "content": "A _ {L} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {L} \\mid \\ell_ {L} (a) \\geq \\tau_ {L} \\right\\}, \\tag {1}", + "image_path": "97d7bcded28c6bd95bc57d2b198b2b35020c21b98714b72d413982e1a9bdfe07.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 542, + 527, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 542, + 527, + 664 + ], + "spans": [ + { + "bbox": [ + 302, + 542, + 527, + 664 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 542, + 527, + 664 + ], + "type": "inline_equation", + "content": "\\tau_{L} = 1" + }, + { + "bbox": [ + 302, + 542, + 527, + 664 + ], + "type": "text", + "content": ". This allows us to filter out the responses that the LLM has low-confidence in. The experimental results are shown in Table 1. We can clearly see that that leveraging logits to filter out low-confidence responses has a large positive effect on performance. In this way, we can reconsider intrinsic knowledge and apply this approach to GraphRAG to better balance internal and external knowledge base on logits." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 313, + 695, + 518, + 761 + ], + "blocks": [ + { + "bbox": [ + 318, + 674, + 509, + 687 + ], + "lines": [ + { + "bbox": [ + 318, + 674, + 509, + 687 + ], + "spans": [ + { + "bbox": [ + 318, + 674, + 509, + 687 + ], + "type": "text", + "content": "Table 1: Impact of logits on LLM performance" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 695, + 518, + 761 + ], + "lines": [ + { + "bbox": [ + 313, + 695, + 518, + 761 + ], + "spans": [ + { + "bbox": [ + 313, + 695, + 518, + 761 + ], + "type": "table", + "html": "
MethodsWebQSPCWQ
HitF1HitF1
LLM66.1549.9740.2734.17
LLM with Logits84.1776.7461.8358.19
", + "image_path": "8df0d0d841560df2b2c8fffc2d15d88d4b71b35a9f03a2f9f18a2b3fa59e808c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 149, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 149, + 83 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 149, + 83 + ], + "type": "text", + "content": "3.6 Discussions" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 291, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 89, + 291, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 291, + 348 + ], + "type": "text", + "content": "In this subsection, we summarize the key findings and discussions from our preliminary study. The performance issues observed in GraphRAG primarily arise from two key factors. (1) Noisy or Irrelevant Retrieval: Some retrieved paths contain irrelevant or misleading information. This negatively impacts the model's ability to properly answer the query. Furthermore, this noise can introduce conflicting or unnecessary information that hinders the decision-making process rather than improving it. (2) Lack of Consideration for LLM's Own Knowledge: GraphRAG does not always take into account the inherent reasoning ability of the LLM itself. In some cases, the retrieved information overrides the LLM's correct predictions, leading to performance degradation rather than enhancement. A more adaptive approach is needed to balance external knowledge retrieval with the model's internal knowledge." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 358, + 130, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 358, + 130, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 358, + 130, + 370 + ], + "type": "text", + "content": "4 Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 380, + 291, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 291, + 528 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 291, + 528 + ], + "type": "text", + "content": "Based on our analysis, we propose a new framework to address the identified challenges, guided by two key insights: (1) Filtering retrieved information: Given the tendency of GraphRAG to retrieve irrelevant or incorrect retrieved information, it is essential to refine the retrieved knowledge. (2) Properly leveraging the LLMs standalone capabilities: The LLM itself can often correctly answer some questions. It's thus necessary to effectively integrate and use the inherent reasoning ability of LLMs along with GraphRAG." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 530, + 291, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 530, + 291, + 691 + ], + "spans": [ + { + "bbox": [ + 67, + 530, + 291, + 691 + ], + "type": "text", + "content": "An overview of our framework GraphRAG-FI is given in Figure 4. It consists of two core components: GraphRAG-Filtering and GraphRAG-Integration. GraphRAG-Filtering first refines the retrieved information by removing irrelevant or misleading knowledge. GraphRAG-Integration module balances the retrieved knowledge with the LLM's inherent reasoning ability, thereby mitigating the overuse of retrieved information that can negatively impact performance. In the following subsections, we will introduce each component of our framework in detail." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 702, + 195, + 715 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 702, + 195, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 702, + 195, + 715 + ], + "type": "text", + "content": "4.1 GraphRAG-Filtering" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "inline_equation", + "content": "P = \\{p_1, p_2, \\ldots, p_N\\}" + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "content": " denote the set of " + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "content": " retrieved paths or triplets, where each path " + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "content": " is assigned an attention score " + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "content": ". Then we design filtering via the following two stages." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": "Stage 1: Coarse Filtering using Attention: In the first stage, we perform a coarse filtering by retaining only those paths whose attention scores exceeds a threshold " + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": ". This is given formally by:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 349, + 136, + 525, + 151 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 136, + 525, + 151 + ], + "spans": [ + { + "bbox": [ + 349, + 136, + 525, + 151 + ], + "type": "interline_equation", + "content": "P _ {\\text {c o a r s e}} = \\left\\{p _ {i} \\in P \\mid a _ {i} \\geq \\tau \\right\\}. \\tag {2}", + "image_path": "7ea290b7ab9020ea4a898db2b330171fb36b307cdf12ffee4dc2238a8e246f09.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "spans": [ + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "text", + "content": "Stage 2: Fine Filtering via LLMs: After the initial coarse filtering, which significantly reduces the number of candidate paths, we perform a more precise evaluation with a LLM on the remaining subset. This two-stage filtering approach not only enhances the quality of the retrieved paths but also greatly reduces the overall cost by limiting the use of the LLM to only those paths deemed promising in the first stage. Let " + }, + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "inline_equation", + "content": "f(p)" + }, + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "text", + "content": " represent the evaluation score provided by the LLM for a path " + }, + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "text", + "content": ", and let " + }, + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "inline_equation", + "content": "\\tau'" + }, + { + "bbox": [ + 302, + 161, + 526, + 324 + ], + "type": "text", + "content": " be the corresponding threshold. The final set of filtered paths is then given by:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 337, + 334, + 525, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 334, + 525, + 350 + ], + "spans": [ + { + "bbox": [ + 337, + 334, + 525, + 350 + ], + "type": "interline_equation", + "content": "P _ {\\text {f i n a l}} = \\left\\{p \\in P _ {\\text {c o a r s e}} \\mid f (p) \\geq \\tau^ {\\prime} \\right\\}, \\tag {3}", + "image_path": "8ee88875f5eee9241b66f29772211d4531307597a46a52acd6ff71bb0ad82e24.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 360, + 525, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 360, + 525, + 400 + ], + "spans": [ + { + "bbox": [ + 302, + 360, + 525, + 400 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 360, + 525, + 400 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{coarse}}" + }, + { + "bbox": [ + 302, + 360, + 525, + 400 + ], + "type": "text", + "content": " is the set of paths that passed the coarse filtering stage, " + }, + { + "bbox": [ + 302, + 360, + 525, + 400 + ], + "type": "inline_equation", + "content": "\\tau^{\\prime}" + }, + { + "bbox": [ + 302, + 360, + 525, + 400 + ], + "type": "text", + "content": " is not predefined but is determined by the LLM itself." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 408, + 525, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 408, + 525, + 476 + ], + "spans": [ + { + "bbox": [ + 302, + 408, + 525, + 476 + ], + "type": "text", + "content": "Prompt Construction: After the two filtering stages, we incorporate the selected paths and query into the prompt to further guide the model's reasoning. The prompt contains the following two types of retrieved paths:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 316, + 486, + 526, + 618 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 316, + 486, + 525, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 486, + 525, + 525 + ], + "spans": [ + { + "bbox": [ + 316, + 486, + 525, + 525 + ], + "type": "text", + "content": "- High Priority Paths: These are the final filtered paths given by " + }, + { + "bbox": [ + 316, + 486, + 525, + 525 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{final}}" + }, + { + "bbox": [ + 316, + 486, + 525, + 525 + ], + "type": "text", + "content": ", which are considered the most reliable." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 535, + 526, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 535, + 526, + 618 + ], + "spans": [ + { + "bbox": [ + 316, + 535, + 526, + 618 + ], + "type": "text", + "content": "- Additional Paths: We also consider the remaining paths included by the coarse filter but removed via the fine filter, " + }, + { + "bbox": [ + 316, + 535, + 526, + 618 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{coarse}} - P_{\\mathrm{final}}" + }, + { + "bbox": [ + 316, + 535, + 526, + 618 + ], + "type": "text", + "content": ". We conjecture that while they may not be as important as those paths in " + }, + { + "bbox": [ + 316, + 535, + 526, + 618 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{final}}" + }, + { + "bbox": [ + 316, + 535, + 526, + 618 + ], + "type": "text", + "content": ", they can still offer some useful supplementary context." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "type": "text", + "content": "The new prompt is then constructed by first inserting a header for the high-priority paths, followed by each path on a separate line. The same process is repeated for the additional paths. By structuring the prompt in this way, we are able to clearly delineate the paths by their priority. This ensures that the most critical information " + }, + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "type": "inline_equation", + "content": "(P_{\\mathrm{final}})" + }, + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "type": "text", + "content": " is emphasized and processed first, while still incorporating the supplementary context from the additional paths. An example prompt is given in Appendix A.2." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 526, + 257 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 526, + 257 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 526, + 257 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 526, + 257 + ], + "type": "image", + "image_path": "175f4b8c514ad2758bb069413e6f2e98fab5a73ec12eebfcea3ab375c3e494a4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 264, + 411, + 277 + ], + "lines": [ + { + "bbox": [ + 182, + 264, + 411, + 277 + ], + "spans": [ + { + "bbox": [ + 182, + 264, + 411, + 277 + ], + "type": "text", + "content": "Figure 4: An overview of the GraphRAG-FI framework." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 298, + 248, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 298, + 248, + 326 + ], + "spans": [ + { + "bbox": [ + 67, + 298, + 248, + 326 + ], + "type": "text", + "content": "4.2 Integration with LLMs' Internal Knowledge" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 349, + 291, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 349, + 291, + 470 + ], + "spans": [ + { + "bbox": [ + 67, + 349, + 291, + 470 + ], + "type": "text", + "content": "As noted in Section 3.2, in addition to ensuring we only retrieve high-quality information, we also want to retain internal knowledge of the LLMs. As such, we want to also integrate the capabilities of just the LLM into our framework. However, a challenge is knowing when to defer to which method. When do we trust the answers given by GraphRAG and when the standalone LLM? Furthermore, how do we fuse the answers given by both methods?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 481, + 291, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 481, + 291, + 684 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 291, + 684 + ], + "type": "text", + "content": "To achieve this goal, we need a method to determine which answers produced by both LLM-only and GraphRAG are actually relevant. In Section 3.5, we found that the LLM's logits can provide a useful tool to refine the potential answers. That is, focusing only on those answers that are given a higher confidence is helpful. This naturally provides us with an easy way to focus on just the high-quality information. For both GraphRAG and the LLM-only model, we filter the answers based on their logits, ensuring that only high-confidence responses are retained. After this logits-based filtering, the refined answers from both sources are combined to produce the final answer, thereby enhancing robustness and accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "text", + "content": "Formally, let " + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "inline_equation", + "content": "A_G" + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "inline_equation", + "content": "A_L" + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "text", + "content": " denote the sets of answer candidates from GraphRAG and the LLM-only model, respectively. We further use " + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "text", + "content": " to indicate a single candidate answer in either set. Furthermore, let their corresponding logits after the softmax function be given by " + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\ell_G(a)" + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\ell_L(a)" + }, + { + "bbox": [ + 67, + 693, + 291, + 775 + ], + "type": "text", + "content": ". The" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 303, + 298, + 476, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 298, + 476, + 312 + ], + "spans": [ + { + "bbox": [ + 303, + 298, + 476, + 312 + ], + "type": "text", + "content": "filtering step is given by the following:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 334, + 321, + 525, + 337 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 321, + 525, + 337 + ], + "spans": [ + { + "bbox": [ + 334, + 321, + 525, + 337 + ], + "type": "interline_equation", + "content": "A _ {G} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {G} \\mid \\ell_ {G} (a) \\geq \\tau_ {G} \\right\\}, \\tag {4}", + "image_path": "4087547e5200291794f586a6dafcdf49e71d0fb7af07f8a49e96ec278af46f8f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 334, + 338, + 525, + 355 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 338, + 525, + 355 + ], + "spans": [ + { + "bbox": [ + 334, + 338, + 525, + 355 + ], + "type": "interline_equation", + "content": "A _ {L} ^ {\\text {f i l t e r e d}} = \\left\\{a \\in A _ {L} \\mid \\ell_ {L} (a) \\geq \\tau_ {L} \\right\\}, \\tag {5}", + "image_path": "5ea5ec16d3f4ddc7189d90c2255697e615ca505193be5d33d1ffce274dce5bf4.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "spans": [ + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "inline_equation", + "content": "\\tau_{G}" + }, + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "inline_equation", + "content": "\\tau_{L}" + }, + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "text", + "content": " are predefined thresholds, " + }, + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "inline_equation", + "content": "\\tau_{L}" + }, + { + "bbox": [ + 302, + 365, + 525, + 406 + ], + "type": "text", + "content": " is set to 1. Subsequently, the final answer is determined by combining the filtered sets:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 330, + 415, + 525, + 438 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 415, + 525, + 438 + ], + "spans": [ + { + "bbox": [ + 330, + 415, + 525, + 438 + ], + "type": "interline_equation", + "content": "A _ {\\text {f i n a l}} = \\operatorname {C o m b i n e} \\left(A _ {G} ^ {\\text {f i l t e r e d}}, A _ {L} ^ {\\text {f i l t e r e d}}\\right), \\tag {6}", + "image_path": "e59d82f255eb6ed4169554c4abd0f849e5d3c2a8617557aedb004677bc7c494e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 446, + 525, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 446, + 525, + 487 + ], + "spans": [ + { + "bbox": [ + 302, + 446, + 525, + 487 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 446, + 525, + 487 + ], + "type": "inline_equation", + "content": "\\operatorname{Combine}(\\cdot)" + }, + { + "bbox": [ + 302, + 446, + 525, + 487 + ], + "type": "text", + "content": " denotes the function that integrates the filtered answers into the final reliable output." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 497, + 386, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 497, + 386, + 511 + ], + "spans": [ + { + "bbox": [ + 302, + 497, + 386, + 511 + ], + "type": "text", + "content": "5 Experiment" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 301, + 518, + 526, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 518, + 526, + 640 + ], + "spans": [ + { + "bbox": [ + 301, + 518, + 526, + 640 + ], + "type": "text", + "content": "In our experiments, we seek to address the following research questions: RQ1: How effective is the proposed method when applied to state-of-the-art GraphRAG retrievers in the knowledge graph QA task? RQ2: How does the proposed method compare to other filtering approaches? RQ3: How does the performance change when more noisy information is introduced? and RQ4: What is the impact of the two modules on performance?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 649, + 427, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 649, + 427, + 663 + ], + "spans": [ + { + "bbox": [ + 302, + 649, + 427, + 663 + ], + "type": "text", + "content": "5.1 Experiment Settings" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 666, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 666, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 666, + 526, + 775 + ], + "type": "text", + "content": "Datasets. To assess the effectiveness of our method, we evaluate it on two widely recognized KGQA benchmark datasets: WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018). WebQSP contains 4,737 natural language questions that require reasoning over paths of up to two hops. In contrast, CWQ includes 34,699 more complex questions that necessitate multi-hop reasoning over" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 95, + 90, + 498, + 403 + ], + "blocks": [ + { + "bbox": [ + 123, + 69, + 468, + 82 + ], + "lines": [ + { + "bbox": [ + 123, + 69, + 468, + 82 + ], + "spans": [ + { + "bbox": [ + 123, + 69, + 468, + 82 + ], + "type": "text", + "content": "Table 2: Performance comparison with different baselines on the two KGQA datasets." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 95, + 90, + 498, + 403 + ], + "lines": [ + { + "bbox": [ + 95, + 90, + 498, + 403 + ], + "spans": [ + { + "bbox": [ + 95, + 90, + 498, + 403 + ], + "type": "table", + "html": "
TypeMethodsWebQSPCWQ
HitF1HitF1
LLMsFlan-T5-xl(Chung et al., 2024)31.0-14.7-
Alpaca-7B(Taori et al., 2023)51.8-27.4-
LLaMA2-Chat-7B(Touvron et al., 2023)64.4-34.6-
ChatGPT66.8-39.9-
ChatGPT+CoT75.6-48.9-
LLMs+KGsROG86.7370.7561.9154.95
ROG + Similarity85.5069.3861.6254.38
ROG + PageRank85.4469.6061.3454.41
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-FI89.2573.8664.8255.12
GNN-RAG90.1173.2569.1060.55
GNN-RAG + Similarity89.6872.1768.5060.26
GNN-RAG + PageRank89.1871.9266.7558.73
GNN-RAG + GraphRAG-Filtering91.2874.7469.7060.96
GNN-RAG + GraphRAG-FI91.8975.9871.1260.34
SubgraphRAG76.9064.6553.8750.43
SubgraphRAG + Similarity72.7259.9852.0548.27
SubgraphRAG + PageRank61.7950.6546.7543.23
SubgraphRAG + GraphRAG-Filtering81.0168.4058.8254.71
SubgraphRAG + GraphRAG-FI81.0868.2858.9652.52
", + "image_path": "8fbeb86a47c75ef462e1cde623194a4293e71edd7e7770812d08d3233aab0bf8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 423, + 291, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 423, + 291, + 491 + ], + "spans": [ + { + "bbox": [ + 67, + 423, + 291, + 491 + ], + "type": "text", + "content": "up to four hops. Both datasets are built upon Freebase, which consists of around 88 million entities, 20 thousand relations, and 126 million triples. Further details on the datasets are provided in Appendix A.1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 497, + 291, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 497, + 291, + 740 + ], + "spans": [ + { + "bbox": [ + 67, + 497, + 291, + 740 + ], + "type": "text", + "content": "Retriever Backbones. Our framework adopts three existing retrieval methods as its backbone: path-based retrieval (ROG (Luo et al., 2024)), GNN (Mavromatis and Karypis, 2024)), and subgraph-based retrieval (SubgraphRAG (Li et al., 2025)). Path-based retrieval extracts relevant paths using heuristics or shortest-path algorithms, while GNN-based retrieval leverages a Graph Neural Network to learn and retrieve informative paths. In contrast, subgraph-based retrieval retrieves relevant subgraphs and encodes them as triples, enabling fine-grained relational reasoning. Therefore, both path-based and GNN-based methods generate paths as input for the LLM. Lastly, subgraph-based methods give triples (i.e., edges) as input to the LLM that take the form of " + }, + { + "bbox": [ + 67, + 497, + 291, + 740 + ], + "type": "inline_equation", + "content": "(h,r,t)" + }, + { + "bbox": [ + 67, + 497, + 291, + 740 + ], + "type": "text", + "content": ". By considering these three methods, we are able to test our framework on a diverse set of retrieval methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "Filter Baselines. The most commonly used filtering methods for RAG are similarity-based ap" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 423, + 526, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 423, + 526, + 544 + ], + "spans": [ + { + "bbox": [ + 302, + 423, + 526, + 544 + ], + "type": "text", + "content": "proaches used in (Gao et al., 2025). Similarity-based methods evaluate the relevance of retrieved information by measuring feature similarity. For retrieval over graphs, PageRank-based filtering is widely adopted (Wang et al., 2024). PageRank-based filtering leverages the graph structure to rank nodes based on their connectivity and importance. These methods provide a baseline filtering mechanism for refining the retrieved results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 558, + 526, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 558, + 526, + 776 + ], + "spans": [ + { + "bbox": [ + 302, + 558, + 526, + 776 + ], + "type": "text", + "content": "Implementation and Evaluation Metrics. We use LLaMA2-Chat-7B from ROG as the LLM backbone, which is instruction-finetuned on the training split of WebQSP and CWQ, as well as Freebase, for three epochs. For the similarity-based filter, we utilize SentenceTransformer ('all-MiniLM-L6-v2') to generate representations for retrieval. We evaluate our retrieval methods using both Hit Rate (Hit) and F1-score (F1). Hit Rate measures the proportion of relevant items successfully retrieved, reflecting retrieval effectiveness. F1-score balances precision and recall, providing a comprehensive assessment of retrieval quality. These metrics ensure a robust evaluation of retrieval performance. We adjust the thresholds " + }, + { + "bbox": [ + 302, + 558, + 526, + 776 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 302, + 558, + 526, + 776 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 558, + 526, + 776 + ], + "type": "inline_equation", + "content": "\\tau_{G}" + }, + { + "bbox": [ + 302, + 558, + 526, + 776 + ], + "type": "text", + "content": " within the ranges [top 40, top 50] and [0.4, 0.5], respectively." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 158, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 158, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 158, + 83 + ], + "type": "text", + "content": "5.2 Main Results" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 290, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 89, + 290, + 127 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 290, + 127 + ], + "type": "text", + "content": "In this section, we evaluate the performance of our method with various retrievers and compare it against baseline filter models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "spans": [ + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "content": "RQ1: KGQA Performance Comparison. In this subsection, we apply our method to different retrievers, including the path-based retriever, GNN-based retriever, and subgraph-based retriever. The results presented in Table 2 demonstrate that our method consistently improves all retrievers, achieving an average improvement of " + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "inline_equation", + "content": "3.81\\%" + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "content": " in Hit and " + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "inline_equation", + "content": "2.35\\%" + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "content": " in F1 over ROG, " + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "inline_equation", + "content": "2.46\\%" + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "content": " in Hit and " + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "inline_equation", + "content": "1.7\\%" + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "content": " in F1 over GNN-RAG, and significant gains of " + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "inline_equation", + "content": "7.47\\%" + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "content": " in Hit and " + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "inline_equation", + "content": "4.88\\%" + }, + { + "bbox": [ + 67, + 130, + 290, + 317 + ], + "type": "text", + "content": " in F1 over SubgraphRAG across two datasets. These results demonstrate that our approach is effective across different retrieval paradigms, reinforcing its adaptability to various retrieval strategies in QA tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "spans": [ + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "text", + "content": "RQ2: Comparison with other filter methods. We compare our method against other filtering baselines, with the results presented in Table 2. Our approach consistently outperforms competing methods across both datasets and retriever types. Specifically, for ROG, our method can achieve an average improvement of " + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "inline_equation", + "content": "4.78\\%" + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "text", + "content": " in Hit and " + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "inline_equation", + "content": "3.95\\%" + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "text", + "content": " in F1 compared to similarity-based filtering on both datasets. Furthermore, compared to the PageRank-based filtering method, our approach yields an average increase of " + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "inline_equation", + "content": "5.03\\%" + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "text", + "content": " in Hit and " + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "inline_equation", + "content": "3.70\\%" + }, + { + "bbox": [ + 67, + 319, + 291, + 507 + ], + "type": "text", + "content": " in F1 across both datasets. These results highlight the superiority of our method in enhancing retrieval effectiveness and overall performance." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 78, + 539, + 282, + 602 + ], + "blocks": [ + { + "bbox": [ + 83, + 518, + 273, + 530 + ], + "lines": [ + { + "bbox": [ + 83, + 518, + 273, + 530 + ], + "spans": [ + { + "bbox": [ + 83, + 518, + 273, + 530 + ], + "type": "text", + "content": "Table 3: Performance when adding more noise" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 78, + 539, + 282, + 602 + ], + "lines": [ + { + "bbox": [ + 78, + 539, + 282, + 602 + ], + "spans": [ + { + "bbox": [ + 78, + 539, + 282, + 602 + ], + "type": "table", + "html": "
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG*85.8768.8160.4953.72
ROG* + GraphRAG-Filtering86.6173.0161.9155.67
", + "image_path": "126d7898f0d8ce0776605a29f6c763a81d49da74e920a4306526b091d6c2782d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 623, + 189, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 623, + 189, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 623, + 189, + 634 + ], + "type": "text", + "content": "5.3 Robustness to Noise" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 640, + 291, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 640, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 640, + 291, + 774 + ], + "type": "text", + "content": "In this subsection, we evaluate robustness of different methods to noise. To evaluate the noise resistance of the backbone model and our filter method, we use GPT to generate 30 additional noise paths that contain both irrelevant and incorrect information. This information is then incorporated into the retrieved context. We then analyze the impact of this noise on performance. The experimental results presented in Table 3, ROG* represents the cases where noise is introduced. As the noise level" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "content": "increases, the Hit score decreases by " + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "inline_equation", + "content": "2.29\\%" + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "content": ", and the F1 score drops by " + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "inline_equation", + "content": "2.23\\%" + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "content": " on the CWQ dataset, highlighting the model's sensitivity to noise. However, when applying our method, we observe a " + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "inline_equation", + "content": "2.23\\%" + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "content": " improvement in Hit and a " + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "inline_equation", + "content": "3.63\\%" + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "content": " improvement in F1 over " + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "inline_equation", + "content": "\\mathrm{ROG^{*}}" + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "content": " on CWQ. These results demonstrate the effectiveness of our approach in mitigating the negative impact of noisy retrieval." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 188, + 401, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 188, + 401, + 201 + ], + "spans": [ + { + "bbox": [ + 302, + 188, + 401, + 201 + ], + "type": "text", + "content": "5.4 Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "spans": [ + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "text", + "content": "We conduct an ablation study to analyze the effectiveness of the filtering module and integrating module in GraphRAG-FI. From the results in Table 4, we can see that GraphRAG-Filtering is useful for the ROG retriever, as it improves both the F1 and Hit scores. For example, GraphRAG-Filtering increases the F1 score by " + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "inline_equation", + "content": "4.19\\%" + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "text", + "content": " and the Hit score by " + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "inline_equation", + "content": "3.15\\%" + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "text", + "content": " on CWQ dataset. We also see a boost in performance for GraphRAG-Integration, with a " + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "inline_equation", + "content": "1.60\\%" + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "inline_equation", + "content": "2.62\\%" + }, + { + "bbox": [ + 302, + 206, + 526, + 368 + ], + "type": "text", + "content": " increase in F1 and Hit score, respectively, on WebQSP. These results demonstrate the effectiveness of our two components." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 306, + 398, + 524, + 476 + ], + "blocks": [ + { + "bbox": [ + 364, + 378, + 463, + 390 + ], + "lines": [ + { + "bbox": [ + 364, + 378, + 463, + 390 + ], + "spans": [ + { + "bbox": [ + 364, + 378, + 463, + 390 + ], + "type": "text", + "content": "Table 4: Ablation study." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 398, + 524, + 476 + ], + "lines": [ + { + "bbox": [ + 306, + 398, + 524, + 476 + ], + "spans": [ + { + "bbox": [ + 306, + 398, + 524, + 476 + ], + "type": "table", + "html": "
MethodsWebQSPCWQ
HitF1HitF1
ROG-original86.7370.7561.9154.95
ROG + GraphRAG-Filtering87.4073.4163.8657.25
ROG + GraphRAG-Integration89.0071.8864.2555.19
ROG + GraphRAG-FI89.2573.8664.8255.12
", + "image_path": "b180c3b6429f6a2dae130d14350518b88d86c32cc66cbe1abb5b4d31d624d598.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 497, + 381, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 497, + 381, + 509 + ], + "spans": [ + { + "bbox": [ + 302, + 497, + 381, + 509 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 518, + 526, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 518, + 526, + 734 + ], + "spans": [ + { + "bbox": [ + 302, + 518, + 526, + 734 + ], + "type": "text", + "content": "In this work, we propose GraphRAG-FI (Filtering & Integration), an enhanced GraphRAG framework that addresses key challenges in graph retrieval-augmented generation. By incorporating GraphRAG-Filtering, which utilizes a two-stage filtering mechanism to refine retrieved information, and GraphRAG-Integration, which employs a logits-based selection strategy to balance retrieval and intrinsic reasoning, our approach mitigates the impact of noisy retrievals and excessive dependence on external knowledge. Experimental results on knowledge graph QA tasks demonstrate that GraphRAG-FI significantly improves reasoning accuracy across multiple backbone models, establishing a more reliable and effective GraphRAG framework." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 293, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 293, + 296 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 293, + 296 + ], + "type": "text", + "content": "In this work, we identify two key challenges in GraphRAG: (1) it is prone to errors due to the retrieval of irrelevant or misleading information, and (2) it places excessive emphasis on externally retrieved knowledge, which can diminish the intrinsic reasoning capabilities of LLMs. Future research will first explore a broader range of large language models to evaluate their effectiveness within GraphRAG. Additionally, further investigation into diverse filtering methods could enhance the refinement of retrieved information and reduce noise. More sophisticated fusion strategies may also be explored to dynamically balance external knowledge with the intrinsic reasoning of LLMs, enabling more effective information integration." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 317, + 127, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 317, + 127, + 328 + ], + "spans": [ + { + "bbox": [ + 68, + 317, + 127, + 328 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 335, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 335, + 291, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 291, + 380 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 291, + 380 + ], + "type": "text", + "content": "Amit Ben-Artzy and Roy Schwartz. 2024. Attend first, consolidate later: On the importance of attention in different llm layers. arXiv preprint arXiv:2409.03621." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 386, + 291, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 386, + 291, + 442 + ], + "spans": [ + { + "bbox": [ + 69, + 386, + 291, + 442 + ], + "type": "text", + "content": "Yung-Sung Chuang, Yujia Xie, Hongyin Luo, Yoon Kim, James Glass, and Pengcheng He. 2023. Dola: Decoding by contrasting layers improves factuality in large language models. arXiv preprint arXiv:2309.03883." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 449, + 291, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 449, + 291, + 505 + ], + "spans": [ + { + "bbox": [ + 69, + 449, + 291, + 505 + ], + "type": "text", + "content": "Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2024. Scaling instruction-finetuned language models. Journal of Machine Learning Research, 25(70):1-53." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 512, + 290, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 512, + 290, + 556 + ], + "spans": [ + { + "bbox": [ + 69, + 512, + 290, + 556 + ], + "type": "text", + "content": "Jialin Dong, Bahare Fatemi, Bryan Perozzi, Lin F Yang, and Anton Tsitsulin. 2024. Don't forget to connect! improving rag with graph-based reranking. arXiv preprint arXiv:2405.18414." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 563, + 290, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 563, + 290, + 619 + ], + "spans": [ + { + "bbox": [ + 69, + 563, + 290, + 619 + ], + "type": "text", + "content": "Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. Frag: A flexible modular framework for retrieval-augmented generation based on knowledge graphs. arXiv preprint arXiv:2501.09957." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 626, + 291, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 626, + 291, + 671 + ], + "spans": [ + { + "bbox": [ + 69, + 626, + 291, + 671 + ], + "type": "text", + "content": "Danny Halawi, Jean-Stanislas Denain, and Jacob Steinhardt. 2023. Overthinking the truth: Understanding how language models process false demonstrations. arXiv preprint arXiv:2307.09476." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 677, + 291, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 677, + 291, + 734 + ], + "spans": [ + { + "bbox": [ + 69, + 677, + 291, + 734 + ], + "type": "text", + "content": "Haoyu Han, Yu Wang, Harry Shomer, Kai Guo, Jiayuan Ding, Yongjia Lei, Mahantesh Halappanavar, Ryan A Rossi, Subhabrata Mukherjee, Xianfeng Tang, et al. 2024. Retrieval-augmented generation with graphs (graphrag). arXiv preprint arXiv:2501.00309." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 740, + 291, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 740, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 740, + 291, + 774 + ], + "type": "text", + "content": "Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. 2023. Reasoning with language model is planning with world" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 774 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 314, + 72, + 526, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 526, + 106 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 526, + 106 + ], + "type": "text", + "content": "model. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 115, + 526, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 115, + 526, + 181 + ], + "spans": [ + { + "bbox": [ + 304, + 115, + 526, + 181 + ], + "type": "text", + "content": "Alexander Havrilla, Sharath Chandra Rarparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. Gore: When, where, and how to improve llm reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 190, + 526, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 190, + 526, + 257 + ], + "spans": [ + { + "bbox": [ + 304, + 190, + 526, + 257 + ], + "type": "text", + "content": "Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2025. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. Advances in Neural Information Processing Systems, 37:132876-132907." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 266, + 526, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 526, + 322 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 526, + 322 + ], + "type": "text", + "content": "Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh V Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2024. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. arXiv preprint arXiv:2402.07630." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 331, + 526, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 526, + 397 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 526, + 397 + ], + "type": "text", + "content": "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2024. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 407, + 526, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 526, + 463 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 526, + 463 + ], + "type": "text", + "content": "Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. 2023. Towards mitigating llm hallucination via self reflection. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 1827-1843." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 471, + 526, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 471, + 526, + 505 + ], + "spans": [ + { + "bbox": [ + 304, + 471, + 526, + 505 + ], + "type": "text", + "content": "Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 513, + 526, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 513, + 526, + 570 + ], + "spans": [ + { + "bbox": [ + 304, + 513, + 526, + 570 + ], + "type": "text", + "content": "Mufei Li, Siqi Miao, and Pan Li. 2025. Simple is effective: The roles of graphs and large language models in knowledge-graph-based retrieval-augmented generation. In International Conference on Learning Representations." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 578, + 526, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 578, + 526, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 578, + 526, + 624 + ], + "type": "text", + "content": "Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In International Conference on Learning Representations." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 633, + 526, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 633, + 526, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 633, + 526, + 666 + ], + "type": "text", + "content": "Huan Ma, Jingdong Chen, Guangyu Wang, and Changqing Zhang. 2025. Estimating llm uncertainty with logits. arXiv preprint arXiv:2502.00290." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 676, + 526, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 676, + 526, + 732 + ], + "spans": [ + { + "bbox": [ + 304, + 676, + 526, + 732 + ], + "type": "text", + "content": "Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, and Jian Guo. 2024. Think-on-graph 2.0: Deep and interpretable large language model reasoning with knowledge graph-guided retrieval. arXiv e-prints, pages arXiv-2407." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 740, + 526, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 740, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 740, + 526, + 774 + ], + "type": "text", + "content": "Costas Mavromatis and George Karypis. 2024. Gnrag: Graph neural retrieval for large language model reasoning. arXiv preprint arXiv:2405.20139." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 773 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 117 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 117 + ], + "type": "text", + "content": "Shiyu Ni, Keping Bi, Jiafeng Guo, and Xueqi Cheng. 2024. When do llms need retrieval augmentation? mitigating llms' overconfidence helps retrieval augmentation. arXiv preprint arXiv:2402.11457." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 128, + 290, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 128, + 290, + 194 + ], + "spans": [ + { + "bbox": [ + 69, + 128, + 290, + 194 + ], + "type": "text", + "content": "Ruiyang Ren, Yuhao Wang, Yingqi Qu, Wayne Xin Zhao, Jing Liu, Hao Tian, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2023. Investigating the factual knowledge boundary of large language models with retrieval augmentation. arXiv preprint arXiv:2307.11019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 206, + 290, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 206, + 290, + 261 + ], + "spans": [ + { + "bbox": [ + 69, + 206, + 290, + 261 + ], + "type": "text", + "content": "Ishneet Sukhvinder Singh, Ritvik Aggarwal, Ibrahim Allahverdiyev, Muhammad Taha, Aslihan Akalin, Kevin Zhu, and Sean O'Brien. 2024. Chunkrag: Novel lmm-chunk filtering method for rag systems. arXiv preprint arXiv:2410.19572." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 272, + 290, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 272, + 290, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 272, + 290, + 338 + ], + "type": "text", + "content": "Gaurang Sriramanan, Siddhant Bharti, Vinu Sankar Sadasivan, Shoumik Saha, Priyatham Kattakinda, and Soheil Feizi. 2025. Llm-check: Investigating detection of hallucinations in large language models. Advances in Neural Information Processing Systems, 37:34188-34216." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 350, + 290, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 350, + 290, + 417 + ], + "spans": [ + { + "bbox": [ + 69, + 350, + 290, + 417 + ], + "type": "text", + "content": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel Ni, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 428, + 290, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 428, + 290, + 462 + ], + "spans": [ + { + "bbox": [ + 69, + 428, + 290, + 462 + ], + "type": "text", + "content": "Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 473, + 290, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 473, + 290, + 528 + ], + "spans": [ + { + "bbox": [ + 69, + 473, + 290, + 528 + ], + "type": "text", + "content": "Hexiang Tan, Fei Sun, Wanli Yang, Yuanzhuo Wang, Qi Cao, and Xueqi Cheng. 2024. Blinded by generated contexts: How language models merge generated and retrieved contexts for open-domain qa? arXiv preprint arXiv:2401.11911." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 540, + 290, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 540, + 290, + 585 + ], + "spans": [ + { + "bbox": [ + 69, + 540, + 290, + 585 + ], + "type": "text", + "content": "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. 2023. Stanford alpaca: An instruction-following llama model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 596, + 290, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 596, + 290, + 661 + ], + "spans": [ + { + "bbox": [ + 69, + 596, + 290, + 661 + ], + "type": "text", + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 673, + 290, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 673, + 290, + 718 + ], + "spans": [ + { + "bbox": [ + 69, + 673, + 290, + 718 + ], + "type": "text", + "content": "Yuvraj Virk, Premkumar Devanbu, and Toufique Ahmed. 2024. Enhancing trust in llm-generated code summaries with calibrated confidence scores. arXiv preprint arXiv:2404.19318." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 729, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 290, + 773 + ], + "type": "text", + "content": "Yile Wang, Peng Li, Maosong Sun, and Yang Liu. 2023. Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 531 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 305, + 72, + 524, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 524, + 127 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 524, + 127 + ], + "type": "text", + "content": "Yuqi Wang, Boran Jiang, Yi Luo, Dawei He, Peng Cheng, and Liangcai Gao. 2024. Reasoning on efficient knowledge paths: Knowledge graph guides large language model for domain question answering. arXiv preprint arXiv:2404.10384." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 136, + 524, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 136, + 524, + 190 + ], + "spans": [ + { + "bbox": [ + 304, + 136, + 524, + 190 + ], + "type": "text", + "content": "Xiaoqian Wu, Yong-Lu Li, Jianhua Sun, and Cewu Lu. 2023. Symbol-llm: leverage language models for symbolic system in visual human activity reasoning. Advances in Neural Information Processing Systems, 36:29680-29691." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 200, + 524, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 200, + 524, + 244 + ], + "spans": [ + { + "bbox": [ + 304, + 200, + 524, + 244 + ], + "type": "text", + "content": "Lijie Yang, Zhihao Zhang, Zhuofu Chen,zikun Li, and Zhihao Jia. 2024. Tidaldecode: Fast and accurate llm decoding with position persistent sparse attention. arXiv preprint arXiv:2410.05076." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 252, + 524, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 252, + 524, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 252, + 524, + 319 + ], + "type": "text", + "content": "Wen-tau Yih, Matthew Richardson, Christopher Meek, Ming-Wei Chang, and Jina Suh. 2016. The value of semantic parse labeling for knowledge base question answering. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 201-206." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 327, + 524, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 524, + 383 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 524, + 383 + ], + "type": "text", + "content": "Shenglai Zeng, Jiankun Zhang, Pengfei He, Yue Xing, Yiding Liu, Han Xu, Jie Ren, Shuaiqiang Wang, Dawei Yin, Yi Chang, et al. 2024a. The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag). arXiv preprint arXiv:2402.16893." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 391, + 524, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 391, + 524, + 457 + ], + "spans": [ + { + "bbox": [ + 304, + 391, + 524, + 457 + ], + "type": "text", + "content": "Shenglai Zeng, Jiankun Zhang, Bingheng Li, Yuping Lin, Tianqi Zheng, Dante Everaert, Hanqing Lu, Hui Liu, Yue Xing, Monica Xiao Cheng, et al. 2024b. Towards knowledge checking in retrieval-augmented generation: A representation perspective. arXiv preprint arXiv:2411.14572." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 465, + 524, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 465, + 524, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 465, + 524, + 531 + ], + "type": "text", + "content": "Qinggang Zhang, Shengyuan Chen, Yuanchen Bei, Zheng Yuan, Huachi Zhou, Zijin Hong, Junnan Dong, Hao Chen, Yi Chang, and Xiao Huang. 2025. A survey of graph retrieval-augmented generation for customized large language models. arXiv preprint arXiv:2501.13958." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 84, + 90, + 274, + 135 + ], + "blocks": [ + { + "bbox": [ + 118, + 69, + 239, + 80 + ], + "lines": [ + { + "bbox": [ + 118, + 69, + 239, + 80 + ], + "spans": [ + { + "bbox": [ + 118, + 69, + 239, + 80 + ], + "type": "text", + "content": "Table 5: Statistics of datasets." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 84, + 90, + 274, + 135 + ], + "lines": [ + { + "bbox": [ + 84, + 90, + 274, + 135 + ], + "spans": [ + { + "bbox": [ + 84, + 90, + 274, + 135 + ], + "type": "table", + "html": "
Datasets#Train#TestMax #hop
WebQSP2,8261,6282
CWQ27,6393,5314
", + "image_path": "167a42a5baf1a000a36b149db67dd2f9386cc0c431471e8215a23ca01506e714.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 154, + 141, + 169 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 154, + 141, + 169 + ], + "spans": [ + { + "bbox": [ + 68, + 154, + 141, + 169 + ], + "type": "text", + "content": "A Appendix" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 176, + 138, + 187 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 176, + 138, + 187 + ], + "spans": [ + { + "bbox": [ + 68, + 176, + 138, + 187 + ], + "type": "text", + "content": "A.1 Datasets" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 193, + 290, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 193, + 290, + 273 + ], + "spans": [ + { + "bbox": [ + 67, + 193, + 290, + 273 + ], + "type": "text", + "content": "We utilize two benchmark KGQA datasets, WebQSP (Yih et al., 2016) and CWQ (Talmor and Berant, 2018), as proposed in previous studies. Following ROG, we maintain the same training and testing splits. The dataset statistics are provided in Table 5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 283, + 177, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 283, + 177, + 296 + ], + "spans": [ + { + "bbox": [ + 68, + 283, + 177, + 296 + ], + "type": "text", + "content": "A.2 Prompt Example" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 85, + 442, + 127, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 442, + 127, + 454 + ], + "spans": [ + { + "bbox": [ + 85, + 442, + 127, + 454 + ], + "type": "text", + "content": "Prompts" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 464, + 274, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 464, + 274, + 517 + ], + "spans": [ + { + "bbox": [ + 84, + 464, + 274, + 517 + ], + "type": "text", + "content": "Based on the reasoning paths, please answer the given question. Please keep the answer as simple as possible and return all the possible answers as a list." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 85, + 518, + 166, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 518, + 166, + 530 + ], + "spans": [ + { + "bbox": [ + 85, + 518, + 166, + 530 + ], + "type": "text", + "content": "Reasoning Paths:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 88, + 532, + 176, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 532, + 176, + 544 + ], + "spans": [ + { + "bbox": [ + 88, + 532, + 176, + 544 + ], + "type": "text", + "content": "High Priority Paths:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 85, + 545, + 274, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 545, + 274, + 584 + ], + "spans": [ + { + "bbox": [ + 85, + 545, + 274, + 584 + ], + "type": "text", + "content": "Northern Colorado Bears football " + }, + { + "bbox": [ + 85, + 545, + 274, + 584 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 85, + 545, + 274, + 584 + ], + "type": "text", + "content": " education.educational_institution.sports_teams " + }, + { + "bbox": [ + 85, + 545, + 274, + 584 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 85, + 545, + 274, + 584 + ], + "type": "text", + "content": " University of Northern Colorado" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 88, + 586, + 165, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 586, + 165, + 597 + ], + "spans": [ + { + "bbox": [ + 88, + 586, + 165, + 597 + ], + "type": "text", + "content": "Additional Paths:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 85, + 599, + 274, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 599, + 274, + 638 + ], + "spans": [ + { + "bbox": [ + 85, + 599, + 274, + 638 + ], + "type": "text", + "content": "Northern Colorado Bears football " + }, + { + "bbox": [ + 85, + 599, + 274, + 638 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 85, + 599, + 274, + 638 + ], + "type": "text", + "content": " education.educational_institution.sports_teams " + }, + { + "bbox": [ + 85, + 599, + 274, + 638 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 85, + 599, + 274, + 638 + ], + "type": "text", + "content": " University of Northern Colorado" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 85, + 640, + 273, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 640, + 273, + 652 + ], + "spans": [ + { + "bbox": [ + 85, + 640, + 273, + 652 + ], + "type": "text", + "content": "Greeley " + }, + { + "bbox": [ + 85, + 640, + 273, + 652 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 85, + 640, + 273, + 652 + ], + "type": "text", + "content": " location.location.containedby" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 85, + 654, + 210, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 654, + 210, + 664 + ], + "spans": [ + { + "bbox": [ + 85, + 654, + 210, + 664 + ], + "type": "text", + "content": "→ United States of America" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 85, + 667, + 273, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 667, + 273, + 692 + ], + "spans": [ + { + "bbox": [ + 85, + 667, + 273, + 692 + ], + "type": "text", + "content": "Greeley " + }, + { + "bbox": [ + 85, + 667, + 273, + 692 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 85, + 667, + 273, + 692 + ], + "type": "text", + "content": " location.location.containedby " + }, + { + "bbox": [ + 85, + 667, + 273, + 692 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 85, + 667, + 273, + 692 + ], + "type": "text", + "content": " Greeley Masonic Temple" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 85, + 694, + 274, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 694, + 274, + 733 + ], + "spans": [ + { + "bbox": [ + 85, + 694, + 274, + 733 + ], + "type": "text", + "content": "Question: What educational institution has a football sports team named Northern Colorado Bears is in Greeley, Colorado?" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 102, + 759, + 255, + 772 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 759, + 255, + 772 + ], + "spans": [ + { + "bbox": [ + 102, + 759, + 255, + 772 + ], + "type": "text", + "content": "Figure 5: An Example of Our Prompt" + } + ] + } + ], + "index": 17, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_content_list.json b/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..35b2f8583709f405ad3c734bf377a8f8c27d0dd6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_content_list.json @@ -0,0 +1,1608 @@ +[ + { + "type": "text", + "text": "RAD: Retrieval-Augmented Decision-Making of Meta-Actions with Vision-Language Models in Autonomous Driving", + "text_level": 1, + "bbox": [ + 161, + 161, + 835, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yujin Wang", + "bbox": [ + 201, + 227, + 347, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Junfeng Jiao", + "bbox": [ + 223, + 244, + 332, + 260 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tongji University", + "bbox": [ + 201, + 260, + 347, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Quanfeng Liu", + "bbox": [ + 347, + 228, + 467, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hongqing Chu1,*", + "bbox": [ + 371, + 244, + 507, + 260 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Yale University", + "bbox": [ + 386, + 261, + 515, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhengxin Jiang", + "bbox": [ + 509, + 228, + 638, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bingzhao Gao", + "bbox": [ + 549, + 244, + 670, + 260 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "nyi Wang2", + "bbox": [ + 709, + 228, + 791, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hong Chen", + "bbox": [ + 714, + 244, + 811, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3University of Texas at Austin", + "bbox": [ + 557, + 261, + 791, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "chuhongqing@tongji.edu.cn", + "bbox": [ + 384, + 280, + 608, + 293 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 324, + 324, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Accurately understanding and deciding high-level meta- actions is essential for ensuring reliable and safe autonomous driving systems. While vision-language models (VLMs) have shown significant potential in various autonomous driving tasks, they often suffer from limitations such as inadequate spatial perception and hallucination, reducing their effectiveness in complex autonomous driving scenarios. To address these challenges, we propose a retrieval-augmented decision-making (RAD) framework, a novel architecture designed to enhance VLMs' capabilities to reliably generate meta-actions in autonomous driving scenes. RAD leverages a retrieval-augmented generation (RAG) pipeline to dynamically improve decision accuracy through a three-stage process consisting of the embedding flow, retrieving flow, and generating flow. Additionally, we fine-tune VLMs on a specifically curated dataset derived from the NuScenes dataset to enhance their spatial perception and bird's-eye view image comprehension capabilities. Extensive experimental evaluations on the curated NuScenes-based dataset demonstrate that RAD outperforms baseline methods across key evaluation metrics, including match accuracy, and F1 score, and self-defined overall score, highlighting its effectiveness in improving meta-action decision-making for autonomous driving tasks.", + "bbox": [ + 89, + 355, + 482, + 687 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 90, + 715, + 221, + 730 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, the race towards fully autonomous vehicles has spurred extensive research into robust decision-making approaches, a fundamental task in autonomous driving systems [26, 41, 49]. Ensuring safe and efficient motion planning requires continuous interpretation of dynamic environments, real-time reasoning under uncertainty, and efficient integration of vast amounts of multimodal data [28].", + "bbox": [ + 89, + 740, + 482, + 837 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Traditional autonomous driving systems adopt a modular development strategy, in which perception, prediction,", + "bbox": [ + 89, + 838, + 482, + 866 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "planning, and control are developed and optimized independently before being integrated into the vehicle system [15, 47]. However, as the information flow propagates across these modules, errors and delays can accumulate, potentially leading to suboptimal or even unreasonable driving decisions. To further mitigate these errors and improve computational efficiency, end-to-end autonomous driving has emerged as a prominent research direction [7, 8].", + "bbox": [ + 510, + 326, + 904, + 436 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "End-to-end refers to a model that directly receives input from sensor data (e.g., cameras, LiDAR) and directly outputs vehicle planning decisions. In recent studies [11, 18, 22], end-to-end autonomous driving algorithms have demonstrated their superiority in both simulation environments and real-world road tests. Moreover, the emergence of foundation models provides a promising solution to enhance motion planning performance, improve generalization across diverse scenarios, and increase interpretability in end-to-end autonomous driving [13, 16, 29, 38]. Trained on huge amounts of human knowledge, these models exhibit advanced comprehension and reasoning capabilities, highlighting the immense potential of artificial intelligence in complex decision-making tasks. Integrating such foundation models into autonomous driving systems could facilitate the development of human-like driving behaviors, advancing the field toward safer and more adaptable autonomous vehicles.", + "bbox": [ + 510, + 437, + 905, + 684 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Autonomous driving tasks require models with robust visual perception capabilities, making vision-language models (VLMs) particularly well-suited for this domain. VLMs trained on large-scale data often demonstrate strong reasoning capabilities, enabling them to infer the evolution of complex driving scenarios. Current research [19, 31, 33, 34, 36] has focused on fine-tuning pre-trained VLMs using visual question-answer (VQA) pairs composed of scene images and corresponding driving actions. This approach enables VLMs to generate feasible trajectories, enhancing their applicability in real-world autonomous driving tasks.", + "bbox": [ + 510, + 686, + 905, + 838 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, fine-tuning or even full-scale fine-tuning of VLMs using large-scale datasets requires substantial com", + "bbox": [ + 512, + 838, + 905, + 866 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 494, + 888, + 502, + 898 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2fcfd978558698a24c8389607cd68771a9521155361203e02c91eaff8017791f.jpg", + "image_caption": [ + "Figure 1. The overview of our RAD method. The framework consists of four working flows, namely embedding flow, retrieving flow, fine-tuning flow and generating flow. The embedding flow encodes front-view images and BEV images into a vector database. Given a query scene, the retrieving flow retrieves the most similar scene from the database. The fine-tuning flow involves fine-tuning VLMs to enhance spatial perception and BEV image comprehension. The generating flow guides VLMs in generating contextually appropriate meta-actions according to the query scene, the retrieved scene, its ground truth meta-action, and proper prompts." + ], + "image_footnote": [], + "bbox": [ + 137, + 130, + 860, + 382 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "putational resources. Additionally, deploying VLMs with an extremely large number of parameters on vehicle-end hardware poses significant constraints. To address these challenges, retrieval-augmented generation (RAG) has emerged as a promising approach to enhance the decision-making capabilities of VLMs by incorporating external knowledge bases [14, 42]. The core idea of RAG is to augment generative models with a retrieval module that dynamically retrieves relevant textual information during the generation process. In vision-language tasks, RAG can effectively mitigate limitations caused by knowledge scarcity. By integrating external knowledge bases, models can not only extract information from images but also retrieve supplementary knowledge, thereby improving the robustness and accuracy of the generated outputs. Although the direct application of RAG to the decision-making process in autonomous driving remains limited, an increasing number of studies have explored its potential in specific tasks such as scene understanding and regulation retrieval [4, 20, 46].", + "bbox": [ + 87, + 483, + 485, + 747 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we propose a retrieval-augmented decision-making (RAD) framework, introducing a novel approach to assist VLMs in generating meta-actions using RAG for the first time, as depicted in Figure 1. The main research contributions of this work are outlined as follows:", + "bbox": [ + 89, + 751, + 484, + 820 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Pre-Training VLMs for Spatial Perception Tasks: We construct obstacle perception tasks based on the NuScenes dataset [3], incorporating VQA pairs designed", + "bbox": [ + 90, + 824, + 484, + 866 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to capture obstacle categories, positions, and other spatial information. This pre-training process enables VLMs to explicitly learn key geometric features such as the locations and sizes of obstacles, leading to improved performance in spatial perception tasks.", + "bbox": [ + 524, + 483, + 905, + 552 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Establishing an External Knowledge Base with NuScenes Ground Truth Data: We select a subset of scenes containing navigation information, historical trajectory data, and future meta-action ground truth. Furthermore, we generate bird's-eye view (BEV) images corresponding to the scene images. The surround-view images from these scenes are then encoded into vector representations using BLIP-2 [25], alongside the BEV images, to form the knowledge base.", + "- Developing a Retrieval and Generation Pipeline for Meta-Action Decision-Making using Fine-Tuned VLMs and RAG: We employ cosine similarity to retrieve the most similar scene from the external knowledge base including the front-view image of the current scene. The corresponding six surround-view images, speed information, navigation data, and ground truth trajectory are then used as auxiliary inputs, guiding the VLM in generating a trustworthy planning trajectory for the current scene." + ], + "bbox": [ + 512, + 552, + 907, + 802 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The remainder of this paper is organized as follows: In Section 2, the detailed literature review is conducted. In Section 3, four working flows of the proposed RAD framework are introduced. In Section 4, comparative experiments", + "bbox": [ + 512, + 810, + 907, + 866 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 888, + 504, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and ablation studies are designed. Section 5 summarizes the work and discusses future research directions.", + "bbox": [ + 90, + 126, + 482, + 153 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 90, + 168, + 240, + 184 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. Multimodal Large Language Models in Autonomous Driving", + "text_level": 1, + "bbox": [ + 90, + 193, + 482, + 223 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Utilizing multimodal large language models (MLLMs) in autonomous driving enhances decision-making by leveraging their extensive knowledge and reasoning capabilities through multisource information such as vision, language, and rules, significantly improving scene understanding, strategy generation, and interpretability. DriveMLM [39] employed MLLMs to generate high-level behavioral decisions (e.g., lane-changing, deceleration, acceleration, etc.), which were then integrated with traditional motion planning modules, balancing flexibility and interpretability. \"Drive as you speak\" [10] enriched large language models (LLMs) with comprehensive environmental data from different vehicle modules, leading to safer decisions. \"Driving with LLMs\" [6] introduced a LLM that generated 10,000 driving scenarios for agent training. \"Drive like a human\" [13] demonstrated LLMs' capabilities of understanding and interacting with environments in closed-loop systems, effectively navigating long-tail autonomous driving scenarios. DriveVLM [37] adopted a multistage reasoning chain that combined scene description, dynamic analysis, and hierarchical planning. Additionally, DriveVLM-Dual incorporated traditional 3D perception algorithms to ensure both cognitive depth and real-time control. Pix2Planning [30] formulated planning as an autoregressive sequence prediction problem, using a vision-language Transformer to generate trajectory points. VLP [31] incorporated linguistic descriptions into the training process and aligned them with visual features, significantly improving cross-city and cross-scenario generalization. To enhance interpretability, some studies [44, 45] introduced \"future trajectory images\", which were processed by multimodal models to generate natural language explanations. Senna [23] further refined the decision-making process by separating high-level meta-actions from low-level trajectory predictions. In this framework, VLMs first produced directional or speed-level decisions before end-to-end models executed precise paths, thereby achieving a hierarchical strategy that was similar to human driving behaviors.", + "bbox": [ + 92, + 227, + 485, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, these methods are prone to hallucination, a limitation arising from the reliance of MLLMs on learned associations between visual inputs and language-based reasoning. As a result, they may misinterpret ambiguous or occluded objects, leading to incorrect high-level decision-making. This issue becomes particularly critical in long-tail scenarios, where the model encounters rare or underrepresented driving conditions not well-covered in the training", + "bbox": [ + 90, + 756, + 482, + 866 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "data. Such misinterpretations can ultimately compromise the reliability and safety of the autonomous driving system.", + "bbox": [ + 512, + 126, + 905, + 154 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Retrieval-Augmented Generation in Vision-Language Models", + "text_level": 1, + "bbox": [ + 512, + 166, + 905, + 195 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In vision-language tasks, RAG mitigates knowledge limitations by leveraging external knowledge bases, enabling models to extract insights from images while supplementing them with retrieved contextual data. This dual approach significantly helps mitigate model hallucination and improve planning accuracy. Jiang et al. [24] introduced a RAG-based framework for VLMs, demonstrating its effectiveness in complex tasks requiring extensive background knowledge. Their study underscored the limitations of conventional end-to-end VLMs when faced with knowledge deficiencies, whereas RAG facilitated richer contextual integration, enhancing both reasoning and generation. Building on this, Shao et al. [35] further investigated RAG's role in VQA tasks, showing that combining retrieval mechanisms with pre-trained VLMs significantly strengthened model performance in complex reasoning scenarios. Additionally, Ram et al. [32] examined RAG's impact on pre-training and fine-tuning, illustrating that incorporating large-scale external data sources during pre-training improved downstream performance by enhancing cross-modal reasoning, particularly in retrieval-based tasks. Meanwhile, Zheng et al. [50] emphasized RAG's broader advantages, particularly in improving generative flexibility and adaptability in multimodal tasks. Their findings highlighted RAG's effectiveness in handling scenarios lacking sufficient annotations or domain-specific knowledge, reinforcing its potential in bridging knowledge gaps for more informed and context-aware model outputs. Hussien et al. [20] illustrated how RAG-augmented VLMs enhanced cross-modal retrieval, particularly by strengthening associations between images and textual data. For performance optimization, Yuan et al. [46] introduced a dynamic knowledge retrieval mechanism, emphasizing real-time adjustments in retrieval and generation processes based on task-specific requirements. This adaptive approach allowed RAG to selectively retrieve the most relevant background knowledge, improving performance across various multimodal applications. Cai et al. [4] developed a traffic regulation retrieval agent based on RAG, enabling automatic retrieval of relevant traffic rules and guidelines based on the ego vehicle's status. Moreover, Cui et al. [12] incorporated a RAG-based memory module that continuously learned takeover preferences through human feedback to enhance motion planning.", + "bbox": [ + 512, + 200, + 907, + 796 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Despite its strong potential, research on directly utilizing RAG to guide VLMs in meta-action decision-making remains limited. To address this gap, we propose the RAD framework, which, for the first time, integrates RAG with pre-training for spatial perception capabilities, enabling", + "bbox": [ + 512, + 797, + 907, + 868 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 492, + 888, + 504, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "more effective decision-making of meta-actions.", + "bbox": [ + 90, + 126, + 410, + 139 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 90, + 152, + 225, + 168 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Figure 1, the proposed RAD framework comprises four work flows: embedding flow, retrieving flow, fine-tuning flow and generating flow. Among these, the fine-tuning flow operates independently, as its primary objective is to enhance the spatial perception capabilities of VLMs through separate fine-tuning. In the embedding flow, BEV images are generated to correspond with front-view scene images from the NuScenes dataset. These image pairs are encoded into a vector space using a frozen BLIP-2 model and the separate embeddings are then concatenated and stored in a vector database. In the retrieving flow, a new front-view image and its corresponding BEV image serve as a query. These images are encoded into the vector space using the same frozen BLIP-2 model. Cosine similarity is then computed between the query images and those stored in the database, enabling the retrieval of the most similar scene from the database. Furthermore, based on the relative positional relationships between consecutive scenes in the NuScenes dataset, the ground truth meta-actions executed in each scene can be extracted. Finally, in the generating flow, the query scene, retrieved scene, its ground truth meta-action, and proper prompts serve as inputs to the VLMs. These inputs guide the model to make decisions and generate meta-actions, ensuring more accurate and context-aware autonomous driving behaviors.", + "bbox": [ + 89, + 175, + 482, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "All the extracted meta-actions are shown as follows:", + "bbox": [ + 109, + 521, + 455, + 533 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Speed up (rapidly) (2) Slow down (rapidly)", + "(3) Turn left/right (4) Drive along the curve", + "(5) Turn around (6) Change lane to the left/right", + "(7) Reverse (8) Shift slightly to the left/right", + "(9) Stop (10) Go straight constantly/slowly" + ], + "bbox": [ + 105, + 545, + 473, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. The Fine-Tuning Flow", + "text_level": 1, + "bbox": [ + 90, + 638, + 297, + 653 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Making precise meta-action decisions in autonomous driving requires an accurate understanding of the environment. If a model lacks sufficient spatial perception capabilities, it may fail to construct a reliable environmental representation, potentially leading to obstacle avoidance failures in meta-action decision-making. VLMs typically rely on monocular or surround-view camera inputs and estimate depth information from single-frame images. However, in long-trail scenarios, monocular vision exhibits significant depth estimation errors [5]. Experimental results on the NuScenes dataset indicate that existing VLMs generally lack robust spatial perception, which severely impacts the safety of decision-making and motion control [43].", + "bbox": [ + 89, + 659, + 482, + 838 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address the aforementioned challenges, VLMs should first undergo fine-tuning to enhance their spatial perception", + "bbox": [ + 89, + 838, + 482, + 866 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/87a3dad6ccd70533953de17fe61bb786242837c776ed4147096cbd1507c1b284.jpg", + "image_caption": [ + "Figure 2. The process of generating a dataset for spatial perception enhancement based on the NuScenes dataset" + ], + "image_footnote": [], + "bbox": [ + 521, + 124, + 902, + 234 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "capabilities. The structure of VLMs typically consists of a vision encoder and an LLM. In this work, we focus on fine-tuning only the LLM component to enhance its spatial perception. We utilize the NuScenes dataset to generate a specified dataset for spatial perception enhancement, following the process illustrated in Figure 2.", + "bbox": [ + 510, + 294, + 905, + 378 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During the image filtering process, it is necessary to ensure the uniqueness of the VQA pairs by cross-referencing the annotated data from the origin NuScenes dataset. The generated dataset for fine-tuning includes over 100,000 training samples, covering key spatial perception tasks such as object class recognition, object distance estimation and object size estimation.", + "bbox": [ + 510, + 378, + 905, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For spatial perception enhancement fine-tuning, the loss function for a single sample is defined as follows:", + "bbox": [ + 512, + 475, + 904, + 502 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} J = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left[ \\lambda_ {1, i} \\left(- \\sum_ {c = 1} ^ {n} y _ {c, i} \\log \\left(p _ {c, i}\\right)\\right) \\right. \\\\ \\left. + \\lambda_ {2, i} \\left(- \\frac {1}{3} \\sum_ {j = 1} ^ {3} \\left(z _ {j, i} - z _ {j, i} ^ {*}\\right) ^ {2}\\right) + \\lambda_ {3, i} \\left(x _ {i} - x _ {i} ^ {*}\\right) ^ {2} \\right] \\tag {1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 513, + 905, + 606 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where, $N$ is the batch size during fine-tuning; $\\lambda_{1,i}$ is the loss identifier for object class recognition in the $i$ -th sample (if there is a corresponding class, $\\lambda_{1,i}$ will be set to 1; and otherwise, $\\lambda_{1,i}$ will be set to 0); $\\lambda_{2,i}$ is the loss identifier for object size estimation; $\\lambda_{3,i}$ is the loss identifier for object distance estimation; $n$ is the total number of classes in the classification task; $y_{c,i}$ is the label for the $i$ -th sample belonging to class $c$ , represented by one-hot encoding; $p_{c,i}$ is the probability of the $i$ -th sample being classified as class $c$ by the model; $z_{j,i}$ is the output size of the $i$ -th sample in the $j$ -th dimension from the model; $z_{j,i}^{*}$ is the ground truth size of the $i$ -th sample in the $j$ -th dimension; $x_{i}$ is the model's output for the distance from the $i$ -th sample to the reference object; and $x_{i}^{*}$ is the ground truth distance from the $i$ -th sample to the reference object.", + "bbox": [ + 510, + 617, + 905, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this work, we fine-tune a series of VLMs, primarily from the Qwen family [1, 2, 9], using low-rank adaptation (LoRA) [17, 51]. The overall training is conducted for", + "bbox": [ + 510, + 825, + 904, + 866 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 492, + 888, + 502, + 898 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3b2c0252547851bd74c7c00b8d60c2fc30f5ecd67aa4851a41887284e269116f.jpg", + "image_caption": [ + "Figure 3. The fine-tuning VQA paradigm for BEV image understanding" + ], + "image_footnote": [], + "bbox": [ + 97, + 129, + 394, + 281 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "System: This image illustrates the BEV view of a driving scene, showing the area of 60 meters ahead, 30 meters behind, 30 meters left and right of the ego vehicle. The units of longitudinal and lateral coordinates are meters. The ego vehicle is located at the center $[0,0]$ , represented by a blue rectangle. The red rectangles represent the objects of vehicle type, including cars, trucks, etc. If there is an arrow on the red rectangle, it means that it will move in the direction of the arrow. The green dots represent pedestrians, and the green arrows also indicate the moving direction. Black dots are static obstacles, including roadblocks, traffic lights, etc.", + "bbox": [ + 405, + 133, + 885, + 277 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Question 1: What kind of object (pedestrian, vehicle, or static obstacle) is located within the coordinate [7.6,8.9] in this image?", + "Answer 1: There is a vehicle located within the coordinate [7.6,8.9].", + "Question 2: What is the central position coordinate of the left-front static obstacle in this image? The result retains one decimal place after the decimal point.", + "Answer 2: The central position coordinate of the left-front static obstacle is [24.5,17.2].", + "Question 3: What is the distance from the left-front static obstacle to the left pedestrian in this image? The result retains one decimal place after the decimal point.", + "Answer 3: The distance from the left-front static obstacle to the left pedestrian is $16.3\\mathrm{m}$" + ], + "bbox": [ + 102, + 291, + 875, + 420 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "three epochs. Additionally, following the BEVFormer [27], we generate BEV images from the existing surround-view images in the NuScenes dataset. Intuitively, incorporating BEV images helps the model better understand the relative spatial relationships of objects in driving scenes. Therefore, it is also necessary to train VLMs to recognize and interpret BEV images effectively. The fine-tuning paradigm, as illustrated in Figure 3, follows a similar approach to the VQA pair construction method based on ground truth information to develop a robust ability to understand BEV images.", + "bbox": [ + 89, + 472, + 482, + 609 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. The Embedding Flow", + "text_level": 1, + "bbox": [ + 90, + 631, + 292, + 645 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the embedding flow, we encode front-view images from the NuScenes dataset along with the pre-generated BEV images into a unified vector space. Since this embedding operation does not involve cross-modal content, the frozen BLIP-2 model weights can be directly utilized, ensuring computational efficiency and consistency. To maintain the one-to-one correspondence between front-view images and BEV images, their embedding vectors are concatenated within this flow. The resulting concatenated vectors are then uniformly stored in an indexed vector database.", + "bbox": [ + 89, + 655, + 482, + 793 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. The Retrieving Flow", + "text_level": 1, + "bbox": [ + 90, + 814, + 284, + 829 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The core of the retrieving flow lies in the computation of cosine similarity. Given two image embeddings $\\mathbf{v}_i$ and $\\mathbf{v}_j$ ,", + "bbox": [ + 89, + 838, + 484, + 866 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "cosine similarity is defined as:", + "bbox": [ + 512, + 472, + 717, + 485 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s i m i l a r i t y} _ {i, j} = \\frac {\\mathbf {v} _ {i} \\cdot \\mathbf {v} _ {j}}{\\| \\mathbf {v} _ {i} \\| \\| \\mathbf {v} _ {j} \\|} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 491, + 905, + 520 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where, $\\| *\\|$ represents the Euclidean norm.", + "bbox": [ + 512, + 526, + 803, + 539 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The main framework of the retrieving flow is illustrated in Figure 4. For a new scene, we first generate its BEV images from the surround-view images. The front-view image and BEV image of the new scene jointly trigger a query scene. The embeddings for the new front-view image and BEV image are then extracted using the frozen BLIP-2 model. Since the vector database stores concatenated embedding vectors, the embeddings for the front-view image and BEV image are retrieved through length decomposition. The cosine similarity between the new front-view image embeddings and those stored in the database is computed and denoted as $similarity_{fv}$ . Similarly, the cosine similarity between the new BEV image embeddings and those stored in the database is computed and denoted as $similarity_{bev}$ . To flexibly adjust the retrieval preference toward either the front-view image or the BEV image, a hyperparameter $\\omega$ is introduced. In this work, $\\omega$ is set to 0.5 as a balanced weight for retrieval. The overall similarity could be calculated as follows:", + "bbox": [ + 512, + 541, + 905, + 802 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s i m i l a r i t y} = (1 - \\omega) \\cdot \\text {s i m i l a r i t y} _ {f v} + \\omega \\cdot \\text {s i m i l a r i t y} _ {\\text {b e v}} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 512, + 810, + 905, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The scene with the highest overall similarity is then retrieved from the vector database. Using its index, we can", + "bbox": [ + 512, + 839, + 905, + 866 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 888, + 504, + 898 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "obtain the corresponding front-view image, BEV image, and pre-extracted ground truth meta-action.", + "bbox": [ + 90, + 124, + 482, + 153 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f7ade894530b154e1b79cdbf72256b8bcb4124794ba45c0c288a410898e29806.jpg", + "image_caption": [ + "Figure 4. The main framework of the retrieving flow" + ], + "image_footnote": [], + "bbox": [ + 117, + 171, + 468, + 420 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4. The Generating Flow", + "text_level": 1, + "bbox": [ + 90, + 475, + 290, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the generating flow, we primarily employ prompt engineering to guide VLMs in reasoning based on the retrieved scene and its corresponding meta-action, enabling them to make accurate meta-action decisions for the new scene. The prompts should be divided into two key components:", + "bbox": [ + 89, + 495, + 482, + 565 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- System Prompt: Guide VLMs to make meta-action decisions based on the provided images.", + "- RAG-Specific Prompt: Instruct VLMs to understand the retrieved scene images and corresponding meta-actions." + ], + "bbox": [ + 90, + 568, + 482, + 624 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For this process, we primarily use the Qwen series of VLMs, as they support multiple image inputs, making prompt design more flexible and effective. With structured and well-designed prompts, the VLMs analyze the front-view image and BEV image of the current scene, ultimately generating a single meta-action as the final output.", + "bbox": [ + 89, + 627, + 482, + 712 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 90, + 725, + 223, + 740 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Dataset Preparation", + "text_level": 1, + "bbox": [ + 90, + 749, + 282, + 764 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We divide the 34,000 scenes from the NuScenes dataset into three subsets: 10,000 scenes are allocated for fine-tuning VLMs, focusing on enhancing spatial perception and BEV image understanding; 20,000 scenes are embedded in the vector database as prior information; and the remaining 4,000 scenes serve as the test set, used to evaluate the framework's effectiveness and the model's overall performance.", + "bbox": [ + 89, + 769, + 482, + 866 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Evaluation Metrics", + "text_level": 1, + "bbox": [ + 512, + 124, + 695, + 137 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To assess the performance, we employ traditional classification metrics such as accuracy, precision, recall and F1 score. Additionally, we introduce a customized partial match score to account for semantically similar but not entirely identical cases. Finally, we utilize a weighted method to compute a comprehensive performance score.", + "bbox": [ + 512, + 145, + 905, + 227 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We firstly adopt ExactMatchAccuracy to evaluate whether the model provides a fully correct meta-action for a given scene, which is formally defined as follows:", + "bbox": [ + 512, + 228, + 905, + 269 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nE x a c t M a t c h A c c u r a c y = \\frac {N _ {m a t c h}}{N _ {t o t a l}} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 588, + 278, + 905, + 307 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where, $N_{match}$ is the number of scenes where the generated meta-actions exactly match the ground truth; and $N_{total}$ is the total number of scenes.", + "bbox": [ + 512, + 313, + 905, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For each meta-action, Precision, Recall, and $F1$ can be used as evaluation metrics, which are defined as follows:", + "bbox": [ + 512, + 355, + 905, + 382 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nP r e c i s i o n _ {i} = \\frac {T P _ {i}}{T P _ {i} + F P _ {i}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 390, + 905, + 420 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e c a l l} _ {i} = \\frac {T P _ {i}}{T P _ {i} + F N _ {i}} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 628, + 426, + 905, + 456 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nF 1 _ {i} = \\frac {2 \\times \\text {P r e c i s i o n} _ {i} \\times \\text {R e c a l l} _ {i}}{\\text {P r e c i s i o n} _ {i} + \\text {R e c a l l} _ {i}} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 460, + 904, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where, $TP_{i}$ is the true positives, and the number of scenes where the generated meta-actions are $i$ and the ground truth are also $i$ ; $FP_{i}$ is the false positives, and the number of scenes where the generated meta-actions are $i$ but the ground truth are not $i$ ; $FN_{i}$ is the false negatives, and the number of scenes where the generated meta-actions are not $i$ but the ground truth are $i$ .", + "bbox": [ + 512, + 492, + 905, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate the overall performance across different meta-actions in the test set, Macro $-F1$ and Weighted $-F1$ scores are introduced. Macro $-F1$ is the unweighted average of $F1$ scores across all meta-actions, while Weighted $-F1$ is the weighted average of $F1$ scores, which are defined as:", + "bbox": [ + 512, + 589, + 905, + 671 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nM a c r o - F 1 = \\frac {1}{K} \\sum_ {i = 1} ^ {K} F 1 _ {i} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 613, + 680, + 904, + 718 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nW e i g h t e d - F 1 = \\frac {1}{N _ {\\text {t o t a l}}} \\sum_ {i = 1} ^ {K} n _ {i} F 1 _ {i} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 727, + 904, + 765 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where, $K$ represents the total number of meta-actions, which is set to 15; and $n_i$ represents the number of scenes where the ground truth meta-action is $i$ .", + "bbox": [ + 512, + 769, + 905, + 810 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To account for the semantic similarity between certain meta-actions, we introduce a PartialMatchScore. Specifically, meta-actions involving leftward maneuvers—such as turn left, change lane to the left and shift", + "bbox": [ + 512, + 810, + 905, + 866 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 888, + 502, + 898 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7fa8a5818551c36b52d6e493c15ef043dab065c73dc51516b5d7e6dfe7997138.jpg", + "table_caption": [ + "Table 1. Comparison among different baselines and our RAD method" + ], + "table_footnote": [], + "table_body": "
MethodExact Match AccuracyMacro-F1Weighted-F1Partial Match ScoreOverall Score
Lynx (Fine-tuning)[48]0.15240.01670.06530.27680.1327
CogVLM (Fine-tuning)[40]0.21780.02040.11050.35630.1846
DriveLM (on LLaMA-LoRA-BIAS-7B)[36]0.14550.04480.12030.30280.1518
DriveLM (on LLaMA-BIAS-7B)[36]0.18960.04090.12120.34250.1693
DriveLM (on LLaMA-CAPTION-7B)[36]0.20340.03800.10800.39520.1896
GPT-4o (Official API)[21]0.29940.11270.22880.43770.2756
DriveVLM[37]0.37430.16710.33250.54620.3589
DriveVLM-Dual (cooperating with VAD[22])[37]0.40160.18540.35060.56130.3801
RAD (Ours, on Qwen-VL-2.5-7B)0.40960.19070.38130.58700.3956
", + "bbox": [ + 90, + 146, + 904, + 278 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "slightly to the left--are classified under the left group, while analogous rightward actions form the right group. Similarly, meta-actions indicating forward motion at varying speeds are categorized accordingly, with go straight slowly, slow down, and slow down rapidly mapping to the deceleration group, while both speed up and speed up rapidly mapping to acceleration group. Furthermore, unique behaviors such as go straight constantly, turn around, reverse, stop, and drive along the curve are collectively assigned to a separate unique group. If the generated meta-actions and the ground truth meta-actions are not identical but belong to the same semantic group (excluding the unique group), they are considered partially matched. Thus, the semantic similarity $S$ is defined as follows:", + "bbox": [ + 89, + 300, + 482, + 494 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nS (i, \\hat {i}) = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\hat {i} \\text {i s t h e s a m e a s} i. \\\\ 0. 5, & \\text {i f} \\hat {i} \\text {p a r t i a l l y m a t c h e s} i. \\\\ 0, & \\text {i f} \\hat {i} \\text {t o t a l l y d i f f e r s f r o m} i. \\end{array} \\right. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 502, + 480, + 552 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where, $i$ is the ground truth meta-action in one scene; and $\\hat{i}$ is the generated meta-action.", + "bbox": [ + 89, + 562, + 482, + 590 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Then, the average PartialMatchScore is obtained by averaging across all scenes:", + "bbox": [ + 89, + 590, + 482, + 619 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\text {P a r t i a l M a t c h S c o r e} = \\frac {1}{N _ {\\text {t o t a l}}} \\sum_ {k = 1} ^ {N _ {\\text {t o t a l}}} S \\left(i _ {k}, \\hat {i _ {k}}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 115, + 626, + 482, + 665 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Finally, different weights are assigned to each metric to derive the comprehensive scoring formula OverallScore:", + "bbox": [ + 89, + 671, + 482, + 700 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} O v e r a l l S c o r e = \\alpha \\cdot E x a c t M a t c h A c c u r a c y \\\\ + \\beta \\cdot M a c r o - F 1 \\\\ + \\gamma \\cdot W e i g h t e d - F 1 \\\\ + \\delta \\cdot \\text {P a r t i a l M a t c h S c o r e} \\tag {12} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 132, + 708, + 480, + 774 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where, $\\alpha$ is set to 0.4; $\\beta$ , $\\gamma$ , and $\\delta$ are all set to 0.2, which could be adjusted according to specific tasks.", + "bbox": [ + 89, + 783, + 482, + 812 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Comparative Experiments", + "text_level": 1, + "bbox": [ + 90, + 818, + 331, + 834 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate the performance of our proposed RAD framework on Qwen-VL-2.5-7B VLM and compare it against", + "bbox": [ + 89, + 838, + 482, + 866 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "several other state-to-the-art baseline methods: Lynx [48], CogVLM [40], DriveLM [36], GPT-4o [21] and DriveVLM [37]. Table 1 presents a thorough quantitative comparison between our proposed RAD and these baselines across multiple evaluation criteria. Our RAD consistently outperforms all baseline methods, demonstrating clear advantages in meta-action decision-making for autonomous driving. In particular, RAD achieves an ExactMatchAccuracy of 0.4096, substantially outperforming DriveVLM-Dual's 0.4016, and attains an OverallScore of 0.3956 compared to DriveVLM-Dual's 0.3801.", + "bbox": [ + 510, + 300, + 905, + 451 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A deeper analysis of the remaining metrics further underscores RAD's strengths. Macro - F1, a balanced measure of model performance across all classes, achieves 0.1907, well above DriveVLM-Dual's 01854. Meanwhile, Weighted - F1 of 0.3813 indicates its effectiveness in scenarios where class imbalances exist, significantly outperforming all baselines and reflecting RAD's notable capabilities to handle diverse datasets. Also, PartialMatchScore of 0.5870 also highlights RAD's fine-grained generative capability, which suggests that RAD not only excels at producing entirely correct answers, but also consistently captures partially correct information, an essential trait for more nuanced or multi-faceted decision-making tasks.", + "bbox": [ + 510, + 456, + 905, + 636 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The poor performance of the baseline methods is mainly due to their lack of task-specific training. As a result, these models exhibit limited spatial perception capabilities and poor BEV image comprehension. Additionally, the parameter size constraints and version limitations of the base models used in these baselines hinder their ability to achieve optimal results. However, RAD's superior performance over GPT-4o across all metrics demonstrates the feasibility of specialized VLMs with smaller parameter sizes that rival or even surpass large-scale general-purpose models in complex and domain-specific tasks.", + "bbox": [ + 510, + 640, + 905, + 793 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In summary, the results in Table 1 validate the efficacy and robustness of our RAD model. Through a combination of architectural innovations and targeted training strategies, RAD not only achieves profound performance across multiple metrics but also provides insights into how specialized", + "bbox": [ + 510, + 797, + 905, + 866 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 888, + 502, + 897 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e3f388afce5664fe8f861a256039ed2136bb7f8572ca00a2dcbebc387c54e999.jpg", + "table_caption": [ + "Table 2. Ablation studies on fine-tuning VLMs and RAG pipeline" + ], + "table_footnote": [], + "table_body": "
VLMsMethodExact Match AccuracyMacro-F1Weighted-F1Partial Matching ScoreOverall Score
Qwen-VL-2-2B[9]Vanilla0.21880.03580.10130.43530.2020
Vanilla + RAG0.21450.10490.22780.43190.2387
Fine-tuning0.15430.05280.11940.30170.1565
Fine-tuning + RAG0.26100.13020.25560.45380.2723
Qwen-VL-2-7B[9]Vanilla0.28660.06540.17210.49410.2609
Vanilla + RAG0.34040.14600.32350.54240.3385
Fine-tuning0.29080.07170.19860.45620.2616
Fine-tuning + RAG0.34460.14600.30110.52130.3315
Qwen-VL-2.5-3B[2]Vanilla0.13180.03660.09550.38860.1568
Vanilla + RAG0.12400.02980.08140.38660.1491
Fine-tuning0.21640.05310.13980.39490.2041
Fine-tuning + RAG0.25390.10750.20900.45200.2552
Qwen-VL-2.5-7B[2]Vanilla0.28490.06440.17150.48930.2590
Vanilla + RAG0.35810.19810.33860.55440.3615
Fine-tuning0.34820.10850.28850.53600.3259
Fine-tuning + RAG0.40960.19070.38130.58700.3956
", + "bbox": [ + 92, + 146, + 905, + 368 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "VLMs can excel in intricate autonomous driving tasks.", + "bbox": [ + 90, + 391, + 453, + 405 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Ablation Studies", + "text_level": 1, + "bbox": [ + 90, + 414, + 257, + 429 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In our ablation studies, we mainly investigate the impacts of fine-tuning VLMs and RAG pipeline for spatial perception enhancement based on Qwen-VL-2-2B [9], Qwen-VL-2-7B [9], Qwen-VL-2.5-3B [2] and Qwen-VL-2.5-7B [2] models. The performance of VLMs is evaluated using four distinct methods: vanilla (no fine-tuning), vanilla combined with RAG, only fine-tuning, and fine-tuning combined with RAG (our proposed RAD method).", + "bbox": [ + 89, + 436, + 482, + 546 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The results presented in Table 2 indicate that the combination of fine-tuning and RAG consistently achieves the highest scores across all evaluation metrics, including ExactMatchAccuracy, Macro - F1, Weighted - F1, PartialMatchScore, and OverallScore, for all model variants. Specifically, for Qwen-VL-2.5-7B, our RAD method achieves the highest OverallScore of 0.3956, marking a significant improvement over methods that deploy either fine-tuning or RAG separately. Furthermore, the incorporation of RAG consistently enhances performance for both vanilla and fine-tuned settings across most model scales, validating the effectiveness of retrieval-augmented strategies in improving model performance.", + "bbox": [ + 89, + 548, + 482, + 727 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Notably, for smaller models such as Qwen-VL-2-2B and Qwen-VL-2.5-3B, employing only fine-tuning leads to performance degradation, suggesting that their limited parameter sizes hinder effective learning of domain-specific knowledge through fine-tuning alone. Additionally, for Qwen-VL-2.5-3B model, using RAG without fine-tuning results in a performance drop, likely due to the unique pre-training characteristics of this model. Overall, while fine-tuning or RAG independently can enhance performance in larger-scale models, the best results are consistently achieved by", + "bbox": [ + 89, + 728, + 482, + 866 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "combining these two strategies, underscoring the importance of an integrated approach to maximize VLM effectiveness. From a practical perspective, the combination of fine-tuning and RAG proves particularly suitable for enhancing decision-making capabilities in VLMs. Deploying this optimal configuration can substantially improve VLM performance, with potential applications extending to semantic comprehension, trajectory planning, and other complex autonomous driving tasks.", + "bbox": [ + 510, + 391, + 905, + 516 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 512, + 527, + 633, + 542 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we propose a RAD framework, a novel retrieval-augmented architecture designed to enhance the meta-action decision-making capabilities of VLMs for autonomous driving. Through the integration of fine-tuning VLMs for spatial perception enhancement and BEV image comprehension, RAD effectively enhances VLMs' capability of meta-action decision-making, ensuring higher accuracy, as demonstrated by notable performance gains across key metrics in extensive experimental evaluations.", + "bbox": [ + 510, + 551, + 905, + 675 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Moving forward, we aim to extend RAD in three key directions. First, we plan to incorporate more diverse and fine-grained datasets beyond the NuScenes dataset, encompassing more challenging corner cases and real-world scenarios, to further enhance model robustness. Second, we seek to generalize the RAD framework to additional driving tasks, especially trajectory planning and motion control. Third, integrating chain-of-thought and reinforcement learning into the framework will be crucial for improving decision-making depth and adaptability. While fine-tuning and RAG will remain essential for enhancing VLM generalization, these advancements will strengthen the robustness and reliability of autonomous driving systems by leveraging RAG methods to tackle complex real-world tasks.", + "bbox": [ + 510, + 676, + 907, + 866 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 888, + 504, + 898 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 92, + 124, + 186, + 137 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou, and Tianhang Zhu. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 4", + "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-v1 technical report. arXiv preprint arXiv:2502.13923, 2025. 4, 8", + "[3] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. Nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11621-11631, 2020. 2", + "[4] Tianhui Cai, Yifan Liu, Zewei Zhou, Haoxuan Ma, Seth Z Zhao, Zhiwen Wu, and Jiaqi Ma. Driving with regulation: Interpretable decision-making for autonomous vehicles with retrieval-augmented reasoning via llm. arXiv preprint arXiv:2410.04759, 2024. 2, 3", + "[5] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024. 4", + "[6] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. In Proceedings of the IEEE International Conference on Robotics and Automation, pages 14093-14100, 2024. 3", + "[7] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1", + "[8] Pranav Singh Chib and Pravendra Singh. Recent advancements in end-to-end autonomous driving using deep learning: A survey. IEEE Transactions on Intelligent Vehicles, 9 (1):103-118, 2023. 1", + "[9] Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024. 4, 8", + "[10] Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, and Ziran" + ], + "bbox": [ + 92, + 147, + 482, + 866 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wang. Receive, reason, and react: Drive as you say, with large language models in autonomous vehicles. IEEE Intelligent Transportation Systems Magazine, 16(4):81-94, 2024. 3", + "[11] Can Cui, Yunsheng Ma, Zichong Yang, Yupeng Zhou, Peiran Liu, Juanwu Lu, Lingxi Li, Yaobin Chen, Jitesh H. Panchal, Amr Abdelraouf, Rohit Gupta, Kyungtae Han, and Ziran Wang. Large language models for autonomous driving (llm4ad): Concept, benchmark, experiments, and challenges. arXiv preprint arXiv:2410.15281, 2024. 1", + "[12] Can Cui, Zichong Yang, Yupeng Zhou, Juntong Peng, Sung-Yeon Park, Cong Zhang, Yunsheng Ma, Xu Cao, Wenqian Ye, Yiheng Feng, Jitesh H. Panchal, Lingxi Li, Yaobin Chen, and Ziran Wang. On-board vision-language models for personalized autonomous vehicle motion control: System design and real-world validation. arXiv preprint arXiv:2411.11913, 2024. 3", + "[13] Daocheng Fu, Xin Li, Licheng Wen, Min Dou, Pinlong Cai, Botian Shi, and Yu Qiao. Drive like a human: Rethinking autonomous driving with large language models. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 910-919. IEEE, 2024. 1, 3", + "[14] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997, 2023. 2", + "[15] Sorin Grigorescu, Bogdan Trasnea, Tiberiu Cocias, and Gigel Macesanu. A survey of deep learning techniques for autonomous driving. Journal of Field Robotics, 37(3):362-386, 2020. 1", + "[16] Xu Han, Zonglin Meng, Xin Xia, Xishun Liao, Brian Yueshuai He, Zhaoliang Zheng, Yutong Wang, Hao Xiang, Zewei Zhou, Letian Gao, Lili Fan, Yuke Li, and Jiaqi Ma. Foundation intelligence for smart infrastructure services in transportation 5.0. IEEE Transactions on Intelligent Vehicles, 9(1):39-47, 2024. 1", + "[17] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In Proceedings of the International Conference on Learning Representations, 2022. 4", + "[18] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17853-17862, 2023. 1", + "[19] Yidong Huang, Jacob Sansom, Ziqiao Ma, Felix Gervits, and Joyce Chai. Drivlme: Enhancing llm-based autonomous driving agents with embodied and social experiences. In Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3153-3160. IEEE, 2024.", + "[20] Mohamed Manzour Hussien, Angie Nataly Melo, Augusto Luis Ballardini, Carlota Salinas Maldonado, Ruben Izquierdo, and Miguel Angel Sotelo. Rag-based explainable prediction of road users behaviors for automated driving using knowledge graphs and large language models. Expert Systems with Applications, 265:125914, 2025. 2, 3" + ], + "bbox": [ + 515, + 127, + 904, + 866 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 888, + 504, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[21] Raisa Islam and Owana Marzia Moushi. Gpt-4o: The cutting-edge advancement in multimodal llm. Authorea Preprints, 2024. 7", + "[22] Bo Jiang, Shaoyu Chen, Qing Xu, Bencheng Liao, Jiajie Chen, Helong Zhou, Qian Zhang, Wenyu Liu, Chang Huang, and Xinggang Wang. Vad: Vectorized scene representation for efficient autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8350, 2023. 1, 7", + "[23] Bo Jiang, Shaoyu Chen, Benchcheng Liao, Xingyu Zhang, Wei Yin, Qian Zhang, Chang Huang, Wenyu Liu, and Xinggang Wang. Senna: Bridging large vision-language models and end-to-end autonomous driving. arXiv preprint arXiv:2410.22313, 2024. 3", + "[24] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. Active retrieval augmented generation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 7969-7992, 2023. 3", + "[25] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning, pages 19730-19742, 2023. 2", + "[26] Yixuan Li, Xuesong Wang, Tianyi Wang, and Qian Liu. Characteristics analysis of autonomous vehicle pre-crash scenarios. arXiv preprint arXiv:2502.20789, 2025. 1", + "[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 47(3):2020-2036, 2025. 5", + "[28] Yunsheng Ma, Wenqian Ye, Can Cui, Haiming Zhang, Shuo Xing, Fucai Ke, Jinhong Wang, Chenglin Miao, Jintai Chen, Hamid Rezatofighi, Zhen Li, Guangtao Zheng, Chao Zheng, Tianjiao He, Manmohan Chandraker, Burhaneddin Yaman, Xin Ye, Hang Zhao, and Xu Cao. Position: Prospective of autonomous driving - multimodal llms world models embodied intelligence ai alignment and mamba. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 1010-1026, 2025. 1", + "[29] Jiageng Mao, Yuxi Qian, Junjie Ye, Hang Zhao, and Yue Wang. Gpt-driver: Learning to drive with gpt. arXiv preprint arXiv:2310.01415, 2023. 1", + "[30] Xiangru Mu, Tong Qin, Songan Zhang, Chunjing Xu, and Ming Yang. Pix2planning: End-to-end planning by vision-language model for autonomous driving on carla simulator. In Proceedings of the IEEE Intelligent Vehicles Symposium, pages 2383-2390. IEEE, 2024. 3", + "[31] Chenbin Pan, Burhaneddin Yaman, Tommaso Nesti, Abhirup Mallik, Alessandro G Allievi, Senem Velipasalar, and Liu Ren. Vlp: Vision language planning for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14760-14769, 2024. 1, 3", + "[32] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. In" + ], + "bbox": [ + 92, + 126, + 482, + 866 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "context retrieval-augmented language models. Transactions of the Association for Computational Linguistics, 11:1316-1331, 2023. 3", + "[33] Katrin Renz, Long Chen, Ana-Maria Marcu, Jan Hünermann, Benoit Hanotte, Alice Karsund, Jamie Shotton, Elahe Arani, and Oleg Sinavski. Carllava: Vision language models for camera-only closed-loop driving. arXiv preprint arXiv:2406.10165, 2024. 1", + "[34] Hao Shao, Yuxuan Hu, Letian Wang, Guanglu Song, Steven L Waslander, Yu Liu, and Hongsheng Li. Lmdrive: Closed-loop end-to-end driving with large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15120-15130, 2024. 1", + "[35] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294, 2023. 3", + "[36] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beibwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In Proceedings of the European Conference on Computer Vision, pages 256-274. Springer, 2024. 1, 7", + "[37] Xiaoyu Tian, Junru Gu, Bailin Li, Yicheng Liu, Yang Wang, Zhiyong Zhao, Kun Zhan, Peng Jia, Xianpeng Lang, and Hang Zhao. Drivevm: The convergence of autonomous driving and large vision-language models. arXiv preprint arXiv:2402.12289, 2024. 3, 7", + "[38] Shiyi Wang, Yuxuan Zhu, Zhiheng Li, Yutong Wang, Li Li, and Zhengbing He. Chatgpt as your vehicle co-pilot: An initial attempt. IEEE Transactions on Intelligent Vehicles, 8 (12):4706-4721, 2023. 1", + "[39] Wenhai Wang, Jiangwei Xie, ChuanYang Hu, Haoming Zou, Jianan Fan, Wenwen Tong, Yang Wen, Silei Wu, Hanming Deng, Zhiqi Li, et al. Drivemlm: Aligning multi-modal large language models with behavioral planning states for autonomous driving. arXiv preprint arXiv:2312.09245, 2023. 3", + "[40] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Song XiXuan, et al. Cogvlm: Visual expert for pretrained language models. Advances in Neural Information Processing Systems, 37:121475-121499, 2024. 7", + "[41] Yangyang Wang and Tianyi Wang. Research on dual-clutch intelligent vehicle infrastructure cooperative control based on system delay prediction of two-lane highway on-ramp merging area. *Automotive Innovation*, 7:588–601, 2024. 1", + "[42] Yujin Wang, Quanfeng Liu, Jiaqi Fan, Jinlong Hong, Hongqing Chu, Mengjian Tian, Bingzhao Gao, and Hong Chen. Rac3: Retrieval-augmented corner case comprehension for autonomous driving with vision-language models. arXiv preprint arXiv:2412.11050, 2024. 2", + "[43] Yi Xu, Yuxin Hu, Zaiwei Zhang, Gregory P Meyer, Siva Karthik Mustikovela, Siddhartha Srinivasa, Eric M Wolff, and Xin Huang. Vlm-ad: End-to-end autonomous" + ], + "bbox": [ + 515, + 127, + 904, + 866 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 888, + 509, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "driving through vision-language model supervision. arXiv preprint arXiv:2412.14446, 2024. 4", + "[44] Shota Yamazaki, Chenyu Zhang, Takuya Nanri, Akio Shigekane, Siyuan Wang, Jo Nishiyama, Tao Chu, and Kohei Yokosawa. Explanation for trajectory planning using multimodal large language model for autonomous driving. arXiv preprint arXiv:2411.09971, 2024. 3", + "[45] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin Wang, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. Trajectory-llm: A language-based data generator for trajectory prediction in autonomous driving. In Proceedings of the International Conference on Learning Representations, 2025. 3", + "[46] Jianhao Yuan, Shuyang Sun, Daniel Omeiza, Bo Zhao, Paul Newman, Lars Kunze, and Matthew Gadd. Rag-driver: Generalisable driving explanations with retrieval-augmented in-context learning in multi-modal large language model. arXiv preprint arXiv:2402.10828, 2024. 2, 3", + "[47] Ekim Yurtsever, Jacob Lambert, Alexander Carballo, and Kazuya Takeda. A survey of autonomous driving: Common practices and emerging technologies. IEEE Access, 8: 58443-58469, 2020. 1", + "[48] Yan Zeng, Hanbo Zhang, Jiani Zheng, Jiangnan Xia, Guoqiang Wei, Yang Wei, Yuchen Zhang, Tao Kong, and Ruihua Song. What matters in training a gpt4-style language model with multimodal inputs? In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 7930-7957, 2024. 7", + "[49] Miao Zhang, Zhenlong Fang, Tianyi Wang, Qian Zhang, Shuai Lu, Junfeng Jiao, and Tianyu Shi. A cascading cooperative multi-agent framework for on-ramp merging control integrating large language models. arXiv preprint arXiv:2503.08199, 2025. 1", + "[50] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In Proceedings of the IEEE International Conference on Data Engineering, pages 70-82. IEEE, 2024. 3", + "[51] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of $100+$ language models. arXiv preprint arXiv:2403.13372, 2024. 4" + ], + "bbox": [ + 92, + 126, + 482, + 693 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 888, + 507, + 898 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_model.json b/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7d1b85206970ae3f3d3055fb0ec061daa18ff4bc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_model.json @@ -0,0 +1,2312 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.162, + 0.162, + 0.836, + 0.202 + ], + "angle": 0, + "content": "RAD: Retrieval-Augmented Decision-Making of Meta-Actions with Vision-Language Models in Autonomous Driving" + }, + { + "type": "text", + "bbox": [ + 0.203, + 0.228, + 0.348, + 0.246 + ], + "angle": 0, + "content": "Yujin Wang" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.246, + 0.333, + 0.261 + ], + "angle": 0, + "content": "Junfeng Jiao" + }, + { + "type": "text", + "bbox": [ + 0.203, + 0.261, + 0.348, + 0.278 + ], + "angle": 0, + "content": "Tongji University" + }, + { + "type": "text", + "bbox": [ + 0.349, + 0.229, + 0.468, + 0.246 + ], + "angle": 0, + "content": "Quanfeng Liu" + }, + { + "type": "text", + "bbox": [ + 0.373, + 0.246, + 0.509, + 0.261 + ], + "angle": 0, + "content": "Hongqing Chu1,*" + }, + { + "type": "text", + "bbox": [ + 0.388, + 0.262, + 0.516, + 0.278 + ], + "angle": 0, + "content": "\\(^{2}\\)Yale University" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.229, + 0.639, + 0.246 + ], + "angle": 0, + "content": "Zhengxin Jiang" + }, + { + "type": "text", + "bbox": [ + 0.55, + 0.246, + 0.671, + 0.261 + ], + "angle": 0, + "content": "Bingzhao Gao" + }, + { + "type": "text", + "bbox": [ + 0.71, + 0.229, + 0.793, + 0.246 + ], + "angle": 0, + "content": "nyi Wang2" + }, + { + "type": "text", + "bbox": [ + 0.715, + 0.246, + 0.812, + 0.262 + ], + "angle": 0, + "content": "Hong Chen" + }, + { + "type": "text", + "bbox": [ + 0.558, + 0.262, + 0.792, + 0.278 + ], + "angle": 0, + "content": "3University of Texas at Austin" + }, + { + "type": "text", + "bbox": [ + 0.386, + 0.281, + 0.609, + 0.294 + ], + "angle": 0, + "content": "chuhongqing@tongji.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.325, + 0.326, + 0.34 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.356, + 0.483, + 0.688 + ], + "angle": 0, + "content": "Accurately understanding and deciding high-level meta- actions is essential for ensuring reliable and safe autonomous driving systems. While vision-language models (VLMs) have shown significant potential in various autonomous driving tasks, they often suffer from limitations such as inadequate spatial perception and hallucination, reducing their effectiveness in complex autonomous driving scenarios. To address these challenges, we propose a retrieval-augmented decision-making (RAD) framework, a novel architecture designed to enhance VLMs' capabilities to reliably generate meta-actions in autonomous driving scenes. RAD leverages a retrieval-augmented generation (RAG) pipeline to dynamically improve decision accuracy through a three-stage process consisting of the embedding flow, retrieving flow, and generating flow. Additionally, we fine-tune VLMs on a specifically curated dataset derived from the NuScenes dataset to enhance their spatial perception and bird's-eye view image comprehension capabilities. Extensive experimental evaluations on the curated NuScenes-based dataset demonstrate that RAD outperforms baseline methods across key evaluation metrics, including match accuracy, and F1 score, and self-defined overall score, highlighting its effectiveness in improving meta-action decision-making for autonomous driving tasks." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.717, + 0.222, + 0.731 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.741, + 0.483, + 0.838 + ], + "angle": 0, + "content": "In recent years, the race towards fully autonomous vehicles has spurred extensive research into robust decision-making approaches, a fundamental task in autonomous driving systems [26, 41, 49]. Ensuring safe and efficient motion planning requires continuous interpretation of dynamic environments, real-time reasoning under uncertainty, and efficient integration of vast amounts of multimodal data [28]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.839, + 0.484, + 0.868 + ], + "angle": 0, + "content": "Traditional autonomous driving systems adopt a modular development strategy, in which perception, prediction," + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.327, + 0.905, + 0.437 + ], + "angle": 0, + "content": "planning, and control are developed and optimized independently before being integrated into the vehicle system [15, 47]. However, as the information flow propagates across these modules, errors and delays can accumulate, potentially leading to suboptimal or even unreasonable driving decisions. To further mitigate these errors and improve computational efficiency, end-to-end autonomous driving has emerged as a prominent research direction [7, 8]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.438, + 0.906, + 0.686 + ], + "angle": 0, + "content": "End-to-end refers to a model that directly receives input from sensor data (e.g., cameras, LiDAR) and directly outputs vehicle planning decisions. In recent studies [11, 18, 22], end-to-end autonomous driving algorithms have demonstrated their superiority in both simulation environments and real-world road tests. Moreover, the emergence of foundation models provides a promising solution to enhance motion planning performance, improve generalization across diverse scenarios, and increase interpretability in end-to-end autonomous driving [13, 16, 29, 38]. Trained on huge amounts of human knowledge, these models exhibit advanced comprehension and reasoning capabilities, highlighting the immense potential of artificial intelligence in complex decision-making tasks. Integrating such foundation models into autonomous driving systems could facilitate the development of human-like driving behaviors, advancing the field toward safer and more adaptable autonomous vehicles." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.687, + 0.906, + 0.839 + ], + "angle": 0, + "content": "Autonomous driving tasks require models with robust visual perception capabilities, making vision-language models (VLMs) particularly well-suited for this domain. VLMs trained on large-scale data often demonstrate strong reasoning capabilities, enabling them to infer the evolution of complex driving scenarios. Current research [19, 31, 33, 34, 36] has focused on fine-tuning pre-trained VLMs using visual question-answer (VQA) pairs composed of scene images and corresponding driving actions. This approach enables VLMs to generate feasible trajectories, enhancing their applicability in real-world autonomous driving tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.839, + 0.906, + 0.868 + ], + "angle": 0, + "content": "However, fine-tuning or even full-scale fine-tuning of VLMs using large-scale datasets requires substantial com" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.889, + 0.503, + 0.899 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.139, + 0.131, + 0.862, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.396, + 0.908, + 0.462 + ], + "angle": 0, + "content": "Figure 1. The overview of our RAD method. The framework consists of four working flows, namely embedding flow, retrieving flow, fine-tuning flow and generating flow. The embedding flow encodes front-view images and BEV images into a vector database. Given a query scene, the retrieving flow retrieves the most similar scene from the database. The fine-tuning flow involves fine-tuning VLMs to enhance spatial perception and BEV image comprehension. The generating flow guides VLMs in generating contextually appropriate meta-actions according to the query scene, the retrieved scene, its ground truth meta-action, and proper prompts." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.484, + 0.486, + 0.748 + ], + "angle": 0, + "content": "putational resources. Additionally, deploying VLMs with an extremely large number of parameters on vehicle-end hardware poses significant constraints. To address these challenges, retrieval-augmented generation (RAG) has emerged as a promising approach to enhance the decision-making capabilities of VLMs by incorporating external knowledge bases [14, 42]. The core idea of RAG is to augment generative models with a retrieval module that dynamically retrieves relevant textual information during the generation process. In vision-language tasks, RAG can effectively mitigate limitations caused by knowledge scarcity. By integrating external knowledge bases, models can not only extract information from images but also retrieve supplementary knowledge, thereby improving the robustness and accuracy of the generated outputs. Although the direct application of RAG to the decision-making process in autonomous driving remains limited, an increasing number of studies have explored its potential in specific tasks such as scene understanding and regulation retrieval [4, 20, 46]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.752, + 0.485, + 0.821 + ], + "angle": 0, + "content": "In this work, we propose a retrieval-augmented decision-making (RAD) framework, introducing a novel approach to assist VLMs in generating meta-actions using RAG for the first time, as depicted in Figure 1. The main research contributions of this work are outlined as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.825, + 0.485, + 0.868 + ], + "angle": 0, + "content": "- Pre-Training VLMs for Spatial Perception Tasks: We construct obstacle perception tasks based on the NuScenes dataset [3], incorporating VQA pairs designed" + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.484, + 0.907, + 0.553 + ], + "angle": 0, + "content": "to capture obstacle categories, positions, and other spatial information. This pre-training process enables VLMs to explicitly learn key geometric features such as the locations and sizes of obstacles, leading to improved performance in spatial perception tasks." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.553, + 0.909, + 0.678 + ], + "angle": 0, + "content": "- Establishing an External Knowledge Base with NuScenes Ground Truth Data: We select a subset of scenes containing navigation information, historical trajectory data, and future meta-action ground truth. Furthermore, we generate bird's-eye view (BEV) images corresponding to the scene images. The surround-view images from these scenes are then encoded into vector representations using BLIP-2 [25], alongside the BEV images, to form the knowledge base." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.678, + 0.909, + 0.803 + ], + "angle": 0, + "content": "- Developing a Retrieval and Generation Pipeline for Meta-Action Decision-Making using Fine-Tuned VLMs and RAG: We employ cosine similarity to retrieve the most similar scene from the external knowledge base including the front-view image of the current scene. The corresponding six surround-view images, speed information, navigation data, and ground truth trajectory are then used as auxiliary inputs, guiding the VLM in generating a trustworthy planning trajectory for the current scene." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.553, + 0.909, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.812, + 0.909, + 0.868 + ], + "angle": 0, + "content": "The remainder of this paper is organized as follows: In Section 2, the detailed literature review is conducted. In Section 3, four working flows of the proposed RAD framework are introduced. In Section 4, comparative experiments" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.505, + 0.899 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.092, + 0.127, + 0.484, + 0.154 + ], + "angle": 0, + "content": "and ablation studies are designed. Section 5 summarizes the work and discusses future research directions." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.17, + 0.242, + 0.185 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.194, + 0.484, + 0.224 + ], + "angle": 0, + "content": "2.1. Multimodal Large Language Models in Autonomous Driving" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.228, + 0.486, + 0.755 + ], + "angle": 0, + "content": "Utilizing multimodal large language models (MLLMs) in autonomous driving enhances decision-making by leveraging their extensive knowledge and reasoning capabilities through multisource information such as vision, language, and rules, significantly improving scene understanding, strategy generation, and interpretability. DriveMLM [39] employed MLLMs to generate high-level behavioral decisions (e.g., lane-changing, deceleration, acceleration, etc.), which were then integrated with traditional motion planning modules, balancing flexibility and interpretability. \"Drive as you speak\" [10] enriched large language models (LLMs) with comprehensive environmental data from different vehicle modules, leading to safer decisions. \"Driving with LLMs\" [6] introduced a LLM that generated 10,000 driving scenarios for agent training. \"Drive like a human\" [13] demonstrated LLMs' capabilities of understanding and interacting with environments in closed-loop systems, effectively navigating long-tail autonomous driving scenarios. DriveVLM [37] adopted a multistage reasoning chain that combined scene description, dynamic analysis, and hierarchical planning. Additionally, DriveVLM-Dual incorporated traditional 3D perception algorithms to ensure both cognitive depth and real-time control. Pix2Planning [30] formulated planning as an autoregressive sequence prediction problem, using a vision-language Transformer to generate trajectory points. VLP [31] incorporated linguistic descriptions into the training process and aligned them with visual features, significantly improving cross-city and cross-scenario generalization. To enhance interpretability, some studies [44, 45] introduced \"future trajectory images\", which were processed by multimodal models to generate natural language explanations. Senna [23] further refined the decision-making process by separating high-level meta-actions from low-level trajectory predictions. In this framework, VLMs first produced directional or speed-level decisions before end-to-end models executed precise paths, thereby achieving a hierarchical strategy that was similar to human driving behaviors." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.757, + 0.484, + 0.868 + ], + "angle": 0, + "content": "However, these methods are prone to hallucination, a limitation arising from the reliance of MLLMs on learned associations between visual inputs and language-based reasoning. As a result, they may misinterpret ambiguous or occluded objects, leading to incorrect high-level decision-making. This issue becomes particularly critical in long-tail scenarios, where the model encounters rare or underrepresented driving conditions not well-covered in the training" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.127, + 0.907, + 0.155 + ], + "angle": 0, + "content": "data. Such misinterpretations can ultimately compromise the reliability and safety of the autonomous driving system." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.167, + 0.907, + 0.196 + ], + "angle": 0, + "content": "2.2. Retrieval-Augmented Generation in Vision-Language Models" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.201, + 0.908, + 0.797 + ], + "angle": 0, + "content": "In vision-language tasks, RAG mitigates knowledge limitations by leveraging external knowledge bases, enabling models to extract insights from images while supplementing them with retrieved contextual data. This dual approach significantly helps mitigate model hallucination and improve planning accuracy. Jiang et al. [24] introduced a RAG-based framework for VLMs, demonstrating its effectiveness in complex tasks requiring extensive background knowledge. Their study underscored the limitations of conventional end-to-end VLMs when faced with knowledge deficiencies, whereas RAG facilitated richer contextual integration, enhancing both reasoning and generation. Building on this, Shao et al. [35] further investigated RAG's role in VQA tasks, showing that combining retrieval mechanisms with pre-trained VLMs significantly strengthened model performance in complex reasoning scenarios. Additionally, Ram et al. [32] examined RAG's impact on pre-training and fine-tuning, illustrating that incorporating large-scale external data sources during pre-training improved downstream performance by enhancing cross-modal reasoning, particularly in retrieval-based tasks. Meanwhile, Zheng et al. [50] emphasized RAG's broader advantages, particularly in improving generative flexibility and adaptability in multimodal tasks. Their findings highlighted RAG's effectiveness in handling scenarios lacking sufficient annotations or domain-specific knowledge, reinforcing its potential in bridging knowledge gaps for more informed and context-aware model outputs. Hussien et al. [20] illustrated how RAG-augmented VLMs enhanced cross-modal retrieval, particularly by strengthening associations between images and textual data. For performance optimization, Yuan et al. [46] introduced a dynamic knowledge retrieval mechanism, emphasizing real-time adjustments in retrieval and generation processes based on task-specific requirements. This adaptive approach allowed RAG to selectively retrieve the most relevant background knowledge, improving performance across various multimodal applications. Cai et al. [4] developed a traffic regulation retrieval agent based on RAG, enabling automatic retrieval of relevant traffic rules and guidelines based on the ego vehicle's status. Moreover, Cui et al. [12] incorporated a RAG-based memory module that continuously learned takeover preferences through human feedback to enhance motion planning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.798, + 0.909, + 0.869 + ], + "angle": 0, + "content": "Despite its strong potential, research on directly utilizing RAG to guide VLMs in meta-action decision-making remains limited. To address this gap, we propose the RAD framework, which, for the first time, integrates RAG with pre-training for spatial perception capabilities, enabling" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.505, + 0.899 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.127, + 0.411, + 0.14 + ], + "angle": 0, + "content": "more effective decision-making of meta-actions." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.153, + 0.226, + 0.169 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.177, + 0.484, + 0.522 + ], + "angle": 0, + "content": "As shown in Figure 1, the proposed RAD framework comprises four work flows: embedding flow, retrieving flow, fine-tuning flow and generating flow. Among these, the fine-tuning flow operates independently, as its primary objective is to enhance the spatial perception capabilities of VLMs through separate fine-tuning. In the embedding flow, BEV images are generated to correspond with front-view scene images from the NuScenes dataset. These image pairs are encoded into a vector space using a frozen BLIP-2 model and the separate embeddings are then concatenated and stored in a vector database. In the retrieving flow, a new front-view image and its corresponding BEV image serve as a query. These images are encoded into the vector space using the same frozen BLIP-2 model. Cosine similarity is then computed between the query images and those stored in the database, enabling the retrieval of the most similar scene from the database. Furthermore, based on the relative positional relationships between consecutive scenes in the NuScenes dataset, the ground truth meta-actions executed in each scene can be extracted. Finally, in the generating flow, the query scene, retrieved scene, its ground truth meta-action, and proper prompts serve as inputs to the VLMs. These inputs guide the model to make decisions and generate meta-actions, ensuring more accurate and context-aware autonomous driving behaviors." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.522, + 0.456, + 0.535 + ], + "angle": 0, + "content": "All the extracted meta-actions are shown as follows:" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.546, + 0.415, + 0.56 + ], + "angle": 0, + "content": "(1) Speed up (rapidly) (2) Slow down (rapidly)" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.56, + 0.42, + 0.573 + ], + "angle": 0, + "content": "(3) Turn left/right (4) Drive along the curve" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.573, + 0.46, + 0.586 + ], + "angle": 0, + "content": "(5) Turn around (6) Change lane to the left/right" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.586, + 0.463, + 0.599 + ], + "angle": 0, + "content": "(7) Reverse (8) Shift slightly to the left/right" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.599, + 0.474, + 0.613 + ], + "angle": 0, + "content": "(9) Stop (10) Go straight constantly/slowly" + }, + { + "type": "list", + "bbox": [ + 0.106, + 0.546, + 0.474, + 0.613 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.639, + 0.298, + 0.654 + ], + "angle": 0, + "content": "3.1. The Fine-Tuning Flow" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.66, + 0.483, + 0.839 + ], + "angle": 0, + "content": "Making precise meta-action decisions in autonomous driving requires an accurate understanding of the environment. If a model lacks sufficient spatial perception capabilities, it may fail to construct a reliable environmental representation, potentially leading to obstacle avoidance failures in meta-action decision-making. VLMs typically rely on monocular or surround-view camera inputs and estimate depth information from single-frame images. However, in long-trail scenarios, monocular vision exhibits significant depth estimation errors [5]. Experimental results on the NuScenes dataset indicate that existing VLMs generally lack robust spatial perception, which severely impacts the safety of decision-making and motion control [43]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.839, + 0.484, + 0.868 + ], + "angle": 0, + "content": "To address the aforementioned challenges, VLMs should first undergo fine-tuning to enhance their spatial perception" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.126, + 0.903, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.246, + 0.905, + 0.271 + ], + "angle": 0, + "content": "Figure 2. The process of generating a dataset for spatial perception enhancement based on the NuScenes dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.296, + 0.906, + 0.379 + ], + "angle": 0, + "content": "capabilities. The structure of VLMs typically consists of a vision encoder and an LLM. In this work, we focus on fine-tuning only the LLM component to enhance its spatial perception. We utilize the NuScenes dataset to generate a specified dataset for spatial perception enhancement, following the process illustrated in Figure 2." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.379, + 0.906, + 0.475 + ], + "angle": 0, + "content": "During the image filtering process, it is necessary to ensure the uniqueness of the VQA pairs by cross-referencing the annotated data from the origin NuScenes dataset. The generated dataset for fine-tuning includes over 100,000 training samples, covering key spatial perception tasks such as object class recognition, object distance estimation and object size estimation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.476, + 0.905, + 0.504 + ], + "angle": 0, + "content": "For spatial perception enhancement fine-tuning, the loss function for a single sample is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.528, + 0.514, + 0.906, + 0.607 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} J = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left[ \\lambda_ {1, i} \\left(- \\sum_ {c = 1} ^ {n} y _ {c, i} \\log \\left(p _ {c, i}\\right)\\right) \\right. \\\\ \\left. + \\lambda_ {2, i} \\left(- \\frac {1}{3} \\sum_ {j = 1} ^ {3} \\left(z _ {j, i} - z _ {j, i} ^ {*}\\right) ^ {2}\\right) + \\lambda_ {3, i} \\left(x _ {i} - x _ {i} ^ {*}\\right) ^ {2} \\right] \\tag {1} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.618, + 0.906, + 0.825 + ], + "angle": 0, + "content": "where, \\(N\\) is the batch size during fine-tuning; \\(\\lambda_{1,i}\\) is the loss identifier for object class recognition in the \\(i\\)-th sample (if there is a corresponding class, \\(\\lambda_{1,i}\\) will be set to 1; and otherwise, \\(\\lambda_{1,i}\\) will be set to 0); \\(\\lambda_{2,i}\\) is the loss identifier for object size estimation; \\(\\lambda_{3,i}\\) is the loss identifier for object distance estimation; \\(n\\) is the total number of classes in the classification task; \\(y_{c,i}\\) is the label for the \\(i\\)-th sample belonging to class \\(c\\), represented by one-hot encoding; \\(p_{c,i}\\) is the probability of the \\(i\\)-th sample being classified as class \\(c\\) by the model; \\(z_{j,i}\\) is the output size of the \\(i\\)-th sample in the \\(j\\)-th dimension from the model; \\(z_{j,i}^{*}\\) is the ground truth size of the \\(i\\)-th sample in the \\(j\\)-th dimension; \\(x_{i}\\) is the model's output for the distance from the \\(i\\)-th sample to the reference object; and \\(x_{i}^{*}\\) is the ground truth distance from the \\(i\\)-th sample to the reference object." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.905, + 0.868 + ], + "angle": 0, + "content": "In this work, we fine-tune a series of VLMs, primarily from the Qwen family [1, 2, 9], using low-rank adaptation (LoRA) [17, 51]. The overall training is conducted for" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.504, + 0.899 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.098, + 0.13, + 0.396, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.406, + 0.134, + 0.886, + 0.278 + ], + "angle": 0, + "content": "System: This image illustrates the BEV view of a driving scene, showing the area of 60 meters ahead, 30 meters behind, 30 meters left and right of the ego vehicle. The units of longitudinal and lateral coordinates are meters. The ego vehicle is located at the center \\([0,0]\\), represented by a blue rectangle. The red rectangles represent the objects of vehicle type, including cars, trucks, etc. If there is an arrow on the red rectangle, it means that it will move in the direction of the arrow. The green dots represent pedestrians, and the green arrows also indicate the moving direction. Black dots are static obstacles, including roadblocks, traffic lights, etc." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.292, + 0.877, + 0.32 + ], + "angle": 0, + "content": "Question 1: What kind of object (pedestrian, vehicle, or static obstacle) is located within the coordinate [7.6,8.9] in this image?" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.321, + 0.568, + 0.335 + ], + "angle": 0, + "content": "Answer 1: There is a vehicle located within the coordinate [7.6,8.9]." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.335, + 0.875, + 0.363 + ], + "angle": 0, + "content": "Question 2: What is the central position coordinate of the left-front static obstacle in this image? The result retains one decimal place after the decimal point." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.364, + 0.697, + 0.378 + ], + "angle": 0, + "content": "Answer 2: The central position coordinate of the left-front static obstacle is [24.5,17.2]." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.378, + 0.875, + 0.406 + ], + "angle": 0, + "content": "Question 3: What is the distance from the left-front static obstacle to the left pedestrian in this image? The result retains one decimal place after the decimal point." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.407, + 0.705, + 0.421 + ], + "angle": 0, + "content": "Answer 3: The distance from the left-front static obstacle to the left pedestrian is \\(16.3\\mathrm{m}\\)" + }, + { + "type": "list", + "bbox": [ + 0.104, + 0.292, + 0.877, + 0.421 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.284, + 0.435, + 0.715, + 0.449 + ], + "angle": 0, + "content": "Figure 3. The fine-tuning VQA paradigm for BEV image understanding" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.473, + 0.484, + 0.611 + ], + "angle": 0, + "content": "three epochs. Additionally, following the BEVFormer [27], we generate BEV images from the existing surround-view images in the NuScenes dataset. Intuitively, incorporating BEV images helps the model better understand the relative spatial relationships of objects in driving scenes. Therefore, it is also necessary to train VLMs to recognize and interpret BEV images effectively. The fine-tuning paradigm, as illustrated in Figure 3, follows a similar approach to the VQA pair construction method based on ground truth information to develop a robust ability to understand BEV images." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.632, + 0.294, + 0.646 + ], + "angle": 0, + "content": "3.2. The Embedding Flow" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.656, + 0.484, + 0.794 + ], + "angle": 0, + "content": "In the embedding flow, we encode front-view images from the NuScenes dataset along with the pre-generated BEV images into a unified vector space. Since this embedding operation does not involve cross-modal content, the frozen BLIP-2 model weights can be directly utilized, ensuring computational efficiency and consistency. To maintain the one-to-one correspondence between front-view images and BEV images, their embedding vectors are concatenated within this flow. The resulting concatenated vectors are then uniformly stored in an indexed vector database." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.815, + 0.285, + 0.83 + ], + "angle": 0, + "content": "3.3. The Retrieving Flow" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.839, + 0.485, + 0.868 + ], + "angle": 0, + "content": "The core of the retrieving flow lies in the computation of cosine similarity. Given two image embeddings \\(\\mathbf{v}_i\\) and \\(\\mathbf{v}_j\\)," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.473, + 0.718, + 0.486 + ], + "angle": 0, + "content": "cosine similarity is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.617, + 0.492, + 0.907, + 0.521 + ], + "angle": 0, + "content": "\\[\n\\text {s i m i l a r i t y} _ {i, j} = \\frac {\\mathbf {v} _ {i} \\cdot \\mathbf {v} _ {j}}{\\| \\mathbf {v} _ {i} \\| \\| \\mathbf {v} _ {j} \\|} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.527, + 0.805, + 0.541 + ], + "angle": 0, + "content": "where, \\(\\| *\\|\\) represents the Euclidean norm." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.542, + 0.907, + 0.803 + ], + "angle": 0, + "content": "The main framework of the retrieving flow is illustrated in Figure 4. For a new scene, we first generate its BEV images from the surround-view images. The front-view image and BEV image of the new scene jointly trigger a query scene. The embeddings for the new front-view image and BEV image are then extracted using the frozen BLIP-2 model. Since the vector database stores concatenated embedding vectors, the embeddings for the front-view image and BEV image are retrieved through length decomposition. The cosine similarity between the new front-view image embeddings and those stored in the database is computed and denoted as \\( similarity_{fv} \\). Similarly, the cosine similarity between the new BEV image embeddings and those stored in the database is computed and denoted as \\( similarity_{bev} \\). To flexibly adjust the retrieval preference toward either the front-view image or the BEV image, a hyperparameter \\( \\omega \\) is introduced. In this work, \\( \\omega \\) is set to 0.5 as a balanced weight for retrieval. The overall similarity could be calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.514, + 0.812, + 0.906, + 0.839 + ], + "angle": 0, + "content": "\\[\n\\text {s i m i l a r i t y} = (1 - \\omega) \\cdot \\text {s i m i l a r i t y} _ {f v} + \\omega \\cdot \\text {s i m i l a r i t y} _ {\\text {b e v}} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.84, + 0.906, + 0.867 + ], + "angle": 0, + "content": "The scene with the highest overall similarity is then retrieved from the vector database. Using its index, we can" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.505, + 0.899 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.126, + 0.483, + 0.154 + ], + "angle": 0, + "content": "obtain the corresponding front-view image, BEV image, and pre-extracted ground truth meta-action." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.172, + 0.47, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.13, + 0.436, + 0.444, + 0.45 + ], + "angle": 0, + "content": "Figure 4. The main framework of the retrieving flow" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.476, + 0.291, + 0.49 + ], + "angle": 0, + "content": "3.4. The Generating Flow" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.497, + 0.484, + 0.567 + ], + "angle": 0, + "content": "In the generating flow, we primarily employ prompt engineering to guide VLMs in reasoning based on the retrieved scene and its corresponding meta-action, enabling them to make accurate meta-action decisions for the new scene. The prompts should be divided into two key components:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.569, + 0.483, + 0.597 + ], + "angle": 0, + "content": "- System Prompt: Guide VLMs to make meta-action decisions based on the provided images." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.597, + 0.483, + 0.625 + ], + "angle": 0, + "content": "- RAG-Specific Prompt: Instruct VLMs to understand the retrieved scene images and corresponding meta-actions." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.569, + 0.483, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.629, + 0.484, + 0.713 + ], + "angle": 0, + "content": "For this process, we primarily use the Qwen series of VLMs, as they support multiple image inputs, making prompt design more flexible and effective. With structured and well-designed prompts, the VLMs analyze the front-view image and BEV image of the current scene, ultimately generating a single meta-action as the final output." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.726, + 0.224, + 0.741 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.75, + 0.283, + 0.765 + ], + "angle": 0, + "content": "4.1. Dataset Preparation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.77, + 0.484, + 0.867 + ], + "angle": 0, + "content": "We divide the 34,000 scenes from the NuScenes dataset into three subsets: 10,000 scenes are allocated for fine-tuning VLMs, focusing on enhancing spatial perception and BEV image understanding; 20,000 scenes are embedded in the vector database as prior information; and the remaining 4,000 scenes serve as the test set, used to evaluate the framework's effectiveness and the model's overall performance." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.126, + 0.697, + 0.139 + ], + "angle": 0, + "content": "4.2. Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.146, + 0.906, + 0.228 + ], + "angle": 0, + "content": "To assess the performance, we employ traditional classification metrics such as accuracy, precision, recall and F1 score. Additionally, we introduce a customized partial match score to account for semantically similar but not entirely identical cases. Finally, we utilize a weighted method to compute a comprehensive performance score." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.229, + 0.906, + 0.27 + ], + "angle": 0, + "content": "We firstly adopt ExactMatchAccuracy to evaluate whether the model provides a fully correct meta-action for a given scene, which is formally defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.589, + 0.279, + 0.906, + 0.308 + ], + "angle": 0, + "content": "\\[\nE x a c t M a t c h A c c u r a c y = \\frac {N _ {m a t c h}}{N _ {t o t a l}} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.315, + 0.906, + 0.355 + ], + "angle": 0, + "content": "where, \\( N_{match} \\) is the number of scenes where the generated meta-actions exactly match the ground truth; and \\( N_{total} \\) is the total number of scenes." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.356, + 0.906, + 0.384 + ], + "angle": 0, + "content": "For each meta-action, Precision, Recall, and \\( F1 \\) can be used as evaluation metrics, which are defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.391, + 0.906, + 0.421 + ], + "angle": 0, + "content": "\\[\nP r e c i s i o n _ {i} = \\frac {T P _ {i}}{T P _ {i} + F P _ {i}} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.63, + 0.428, + 0.906, + 0.457 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e c a l l} _ {i} = \\frac {T P _ {i}}{T P _ {i} + F N _ {i}} \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.595, + 0.461, + 0.905, + 0.49 + ], + "angle": 0, + "content": "\\[\nF 1 _ {i} = \\frac {2 \\times \\text {P r e c i s i o n} _ {i} \\times \\text {R e c a l l} _ {i}}{\\text {P r e c i s i o n} _ {i} + \\text {R e c a l l} _ {i}} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.493, + 0.906, + 0.589 + ], + "angle": 0, + "content": "where, \\( TP_{i} \\) is the true positives, and the number of scenes where the generated meta-actions are \\( i \\) and the ground truth are also \\( i \\); \\( FP_{i} \\) is the false positives, and the number of scenes where the generated meta-actions are \\( i \\) but the ground truth are not \\( i \\); \\( FN_{i} \\) is the false negatives, and the number of scenes where the generated meta-actions are not \\( i \\) but the ground truth are \\( i \\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.59, + 0.906, + 0.672 + ], + "angle": 0, + "content": "To evaluate the overall performance across different meta-actions in the test set, Macro \\( -F1 \\) and Weighted \\( -F1 \\) scores are introduced. Macro \\( -F1 \\) is the unweighted average of \\( F1 \\) scores across all meta-actions, while Weighted \\( -F1 \\) is the weighted average of \\( F1 \\) scores, which are defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.681, + 0.905, + 0.719 + ], + "angle": 0, + "content": "\\[\nM a c r o - F 1 = \\frac {1}{K} \\sum_ {i = 1} ^ {K} F 1 _ {i} \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.583, + 0.728, + 0.905, + 0.766 + ], + "angle": 0, + "content": "\\[\nW e i g h t e d - F 1 = \\frac {1}{N _ {\\text {t o t a l}}} \\sum_ {i = 1} ^ {K} n _ {i} F 1 _ {i} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.77, + 0.906, + 0.811 + ], + "angle": 0, + "content": "where, \\(K\\) represents the total number of meta-actions, which is set to 15; and \\(n_i\\) represents the number of scenes where the ground truth meta-action is \\(i\\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.812, + 0.906, + 0.868 + ], + "angle": 0, + "content": "To account for the semantic similarity between certain meta-actions, we introduce a PartialMatchScore. Specifically, meta-actions involving leftward maneuvers—such as turn left, change lane to the left and shift" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.504, + 0.899 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.292, + 0.124, + 0.706, + 0.138 + ], + "angle": 0, + "content": "Table 1. Comparison among different baselines and our RAD method" + }, + { + "type": "table", + "bbox": [ + 0.092, + 0.147, + 0.905, + 0.279 + ], + "angle": 0, + "content": "
MethodExact Match AccuracyMacro-F1Weighted-F1Partial Match ScoreOverall Score
Lynx (Fine-tuning)[48]0.15240.01670.06530.27680.1327
CogVLM (Fine-tuning)[40]0.21780.02040.11050.35630.1846
DriveLM (on LLaMA-LoRA-BIAS-7B)[36]0.14550.04480.12030.30280.1518
DriveLM (on LLaMA-BIAS-7B)[36]0.18960.04090.12120.34250.1693
DriveLM (on LLaMA-CAPTION-7B)[36]0.20340.03800.10800.39520.1896
GPT-4o (Official API)[21]0.29940.11270.22880.43770.2756
DriveVLM[37]0.37430.16710.33250.54620.3589
DriveVLM-Dual (cooperating with VAD[22])[37]0.40160.18540.35060.56130.3801
RAD (Ours, on Qwen-VL-2.5-7B)0.40960.19070.38130.58700.3956
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.301, + 0.484, + 0.495 + ], + "angle": 0, + "content": "slightly to the left--are classified under the left group, while analogous rightward actions form the right group. Similarly, meta-actions indicating forward motion at varying speeds are categorized accordingly, with go straight slowly, slow down, and slow down rapidly mapping to the deceleration group, while both speed up and speed up rapidly mapping to acceleration group. Furthermore, unique behaviors such as go straight constantly, turn around, reverse, stop, and drive along the curve are collectively assigned to a separate unique group. If the generated meta-actions and the ground truth meta-actions are not identical but belong to the same semantic group (excluding the unique group), they are considered partially matched. Thus, the semantic similarity \\( S \\) is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.129, + 0.503, + 0.482, + 0.554 + ], + "angle": 0, + "content": "\\[\nS (i, \\hat {i}) = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\hat {i} \\text {i s t h e s a m e a s} i. \\\\ 0. 5, & \\text {i f} \\hat {i} \\text {p a r t i a l l y m a t c h e s} i. \\\\ 0, & \\text {i f} \\hat {i} \\text {t o t a l l y d i f f e r s f r o m} i. \\end{array} \\right. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.563, + 0.483, + 0.592 + ], + "angle": 0, + "content": "where, \\( i \\) is the ground truth meta-action in one scene; and \\( \\hat{i} \\) is the generated meta-action." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.591, + 0.484, + 0.62 + ], + "angle": 0, + "content": "Then, the average PartialMatchScore is obtained by averaging across all scenes:" + }, + { + "type": "equation", + "bbox": [ + 0.116, + 0.627, + 0.483, + 0.666 + ], + "angle": 0, + "content": "\\[\n\\text {P a r t i a l M a t c h S c o r e} = \\frac {1}{N _ {\\text {t o t a l}}} \\sum_ {k = 1} ^ {N _ {\\text {t o t a l}}} S \\left(i _ {k}, \\hat {i _ {k}}\\right) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.672, + 0.483, + 0.701 + ], + "angle": 0, + "content": "Finally, different weights are assigned to each metric to derive the comprehensive scoring formula OverallScore:" + }, + { + "type": "equation", + "bbox": [ + 0.134, + 0.709, + 0.482, + 0.775 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} O v e r a l l S c o r e = \\alpha \\cdot E x a c t M a t c h A c c u r a c y \\\\ + \\beta \\cdot M a c r o - F 1 \\\\ + \\gamma \\cdot W e i g h t e d - F 1 \\\\ + \\delta \\cdot \\text {P a r t i a l M a t c h S c o r e} \\tag {12} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.784, + 0.483, + 0.813 + ], + "angle": 0, + "content": "where, \\(\\alpha\\) is set to 0.4; \\(\\beta\\), \\(\\gamma\\), and \\(\\delta\\) are all set to 0.2, which could be adjusted according to specific tasks." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.819, + 0.332, + 0.835 + ], + "angle": 0, + "content": "4.3. Comparative Experiments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.839, + 0.484, + 0.868 + ], + "angle": 0, + "content": "We evaluate the performance of our proposed RAD framework on Qwen-VL-2.5-7B VLM and compare it against" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.301, + 0.906, + 0.452 + ], + "angle": 0, + "content": "several other state-to-the-art baseline methods: Lynx [48], CogVLM [40], DriveLM [36], GPT-4o [21] and DriveVLM [37]. Table 1 presents a thorough quantitative comparison between our proposed RAD and these baselines across multiple evaluation criteria. Our RAD consistently outperforms all baseline methods, demonstrating clear advantages in meta-action decision-making for autonomous driving. In particular, RAD achieves an ExactMatchAccuracy of 0.4096, substantially outperforming DriveVLM-Dual's 0.4016, and attains an OverallScore of 0.3956 compared to DriveVLM-Dual's 0.3801." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.457, + 0.907, + 0.637 + ], + "angle": 0, + "content": "A deeper analysis of the remaining metrics further underscores RAD's strengths. Macro - F1, a balanced measure of model performance across all classes, achieves 0.1907, well above DriveVLM-Dual's 01854. Meanwhile, Weighted - F1 of 0.3813 indicates its effectiveness in scenarios where class imbalances exist, significantly outperforming all baselines and reflecting RAD's notable capabilities to handle diverse datasets. Also, PartialMatchScore of 0.5870 also highlights RAD's fine-grained generative capability, which suggests that RAD not only excels at producing entirely correct answers, but also consistently captures partially correct information, an essential trait for more nuanced or multi-faceted decision-making tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.641, + 0.907, + 0.794 + ], + "angle": 0, + "content": "The poor performance of the baseline methods is mainly due to their lack of task-specific training. As a result, these models exhibit limited spatial perception capabilities and poor BEV image comprehension. Additionally, the parameter size constraints and version limitations of the base models used in these baselines hinder their ability to achieve optimal results. However, RAD's superior performance over GPT-4o across all metrics demonstrates the feasibility of specialized VLMs with smaller parameter sizes that rival or even surpass large-scale general-purpose models in complex and domain-specific tasks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.798, + 0.906, + 0.868 + ], + "angle": 0, + "content": "In summary, the results in Table 1 validate the efficacy and robustness of our RAD model. Through a combination of architectural innovations and targeted training strategies, RAD not only achieves profound performance across multiple metrics but also provides insights into how specialized" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.504, + 0.898 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.304, + 0.124, + 0.694, + 0.138 + ], + "angle": 0, + "content": "Table 2. Ablation studies on fine-tuning VLMs and RAG pipeline" + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.147, + 0.907, + 0.369 + ], + "angle": 0, + "content": "
VLMsMethodExact Match AccuracyMacro-F1Weighted-F1Partial Matching ScoreOverall Score
Qwen-VL-2-2B[9]Vanilla0.21880.03580.10130.43530.2020
Vanilla + RAG0.21450.10490.22780.43190.2387
Fine-tuning0.15430.05280.11940.30170.1565
Fine-tuning + RAG0.26100.13020.25560.45380.2723
Qwen-VL-2-7B[9]Vanilla0.28660.06540.17210.49410.2609
Vanilla + RAG0.34040.14600.32350.54240.3385
Fine-tuning0.29080.07170.19860.45620.2616
Fine-tuning + RAG0.34460.14600.30110.52130.3315
Qwen-VL-2.5-3B[2]Vanilla0.13180.03660.09550.38860.1568
Vanilla + RAG0.12400.02980.08140.38660.1491
Fine-tuning0.21640.05310.13980.39490.2041
Fine-tuning + RAG0.25390.10750.20900.45200.2552
Qwen-VL-2.5-7B[2]Vanilla0.28490.06440.17150.48930.2590
Vanilla + RAG0.35810.19810.33860.55440.3615
Fine-tuning0.34820.10850.28850.53600.3259
Fine-tuning + RAG0.40960.19070.38130.58700.3956
" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.392, + 0.454, + 0.406 + ], + "angle": 0, + "content": "VLMs can excel in intricate autonomous driving tasks." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.416, + 0.258, + 0.43 + ], + "angle": 0, + "content": "4.4. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.437, + 0.484, + 0.547 + ], + "angle": 0, + "content": "In our ablation studies, we mainly investigate the impacts of fine-tuning VLMs and RAG pipeline for spatial perception enhancement based on Qwen-VL-2-2B [9], Qwen-VL-2-7B [9], Qwen-VL-2.5-3B [2] and Qwen-VL-2.5-7B [2] models. The performance of VLMs is evaluated using four distinct methods: vanilla (no fine-tuning), vanilla combined with RAG, only fine-tuning, and fine-tuning combined with RAG (our proposed RAD method)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.549, + 0.484, + 0.728 + ], + "angle": 0, + "content": "The results presented in Table 2 indicate that the combination of fine-tuning and RAG consistently achieves the highest scores across all evaluation metrics, including ExactMatchAccuracy, Macro - F1, Weighted - F1, PartialMatchScore, and OverallScore, for all model variants. Specifically, for Qwen-VL-2.5-7B, our RAD method achieves the highest OverallScore of 0.3956, marking a significant improvement over methods that deploy either fine-tuning or RAG separately. Furthermore, the incorporation of RAG consistently enhances performance for both vanilla and fine-tuned settings across most model scales, validating the effectiveness of retrieval-augmented strategies in improving model performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.729, + 0.484, + 0.868 + ], + "angle": 0, + "content": "Notably, for smaller models such as Qwen-VL-2-2B and Qwen-VL-2.5-3B, employing only fine-tuning leads to performance degradation, suggesting that their limited parameter sizes hinder effective learning of domain-specific knowledge through fine-tuning alone. Additionally, for Qwen-VL-2.5-3B model, using RAG without fine-tuning results in a performance drop, likely due to the unique pre-training characteristics of this model. Overall, while fine-tuning or RAG independently can enhance performance in larger-scale models, the best results are consistently achieved by" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.392, + 0.907, + 0.517 + ], + "angle": 0, + "content": "combining these two strategies, underscoring the importance of an integrated approach to maximize VLM effectiveness. From a practical perspective, the combination of fine-tuning and RAG proves particularly suitable for enhancing decision-making capabilities in VLMs. Deploying this optimal configuration can substantially improve VLM performance, with potential applications extending to semantic comprehension, trajectory planning, and other complex autonomous driving tasks." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.529, + 0.634, + 0.543 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.552, + 0.907, + 0.676 + ], + "angle": 0, + "content": "In this work, we propose a RAD framework, a novel retrieval-augmented architecture designed to enhance the meta-action decision-making capabilities of VLMs for autonomous driving. Through the integration of fine-tuning VLMs for spatial perception enhancement and BEV image comprehension, RAD effectively enhances VLMs' capability of meta-action decision-making, ensuring higher accuracy, as demonstrated by notable performance gains across key metrics in extensive experimental evaluations." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.677, + 0.909, + 0.867 + ], + "angle": 0, + "content": "Moving forward, we aim to extend RAD in three key directions. First, we plan to incorporate more diverse and fine-grained datasets beyond the NuScenes dataset, encompassing more challenging corner cases and real-world scenarios, to further enhance model robustness. Second, we seek to generalize the RAD framework to additional driving tasks, especially trajectory planning and motion control. Third, integrating chain-of-thought and reinforcement learning into the framework will be crucial for improving decision-making depth and adaptability. While fine-tuning and RAG will remain essential for enhancing VLM generalization, these advancements will strengthen the robustness and reliability of autonomous driving systems by leveraging RAG methods to tackle complex real-world tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.505, + 0.899 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.125, + 0.188, + 0.139 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.148, + 0.483, + 0.3 + ], + "angle": 0, + "content": "[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou, and Tianhang Zhu. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.301, + 0.483, + 0.401 + ], + "angle": 0, + "content": "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-v1 technical report. arXiv preprint arXiv:2502.13923, 2025. 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.403, + 0.484, + 0.48 + ], + "angle": 0, + "content": "[3] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. Nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11621-11631, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.481, + 0.483, + 0.543 + ], + "angle": 0, + "content": "[4] Tianhui Cai, Yifan Liu, Zewei Zhou, Haoxuan Ma, Seth Z Zhao, Zhiwen Wu, and Jiaqi Ma. Driving with regulation: Interpretable decision-making for autonomous vehicles with retrieval-augmented reasoning via llm. arXiv preprint arXiv:2410.04759, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.545, + 0.483, + 0.62 + ], + "angle": 0, + "content": "[5] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.622, + 0.483, + 0.698 + ], + "angle": 0, + "content": "[6] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. In Proceedings of the IEEE International Conference on Robotics and Automation, pages 14093-14100, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.7, + 0.483, + 0.749 + ], + "angle": 0, + "content": "[7] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.751, + 0.483, + 0.801 + ], + "angle": 0, + "content": "[8] Pranav Singh Chib and Pravendra Singh. Recent advancements in end-to-end autonomous driving using deep learning: A survey. IEEE Transactions on Intelligent Vehicles, 9 (1):103-118, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.803, + 0.483, + 0.853 + ], + "angle": 0, + "content": "[9] Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024. 4, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.854, + 0.483, + 0.867 + ], + "angle": 0, + "content": "[10] Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, and Ziran" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.148, + 0.484, + 0.867 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.548, + 0.128, + 0.905, + 0.177 + ], + "angle": 0, + "content": "Wang. Receive, reason, and react: Drive as you say, with large language models in autonomous vehicles. IEEE Intelligent Transportation Systems Magazine, 16(4):81-94, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.179, + 0.905, + 0.254 + ], + "angle": 0, + "content": "[11] Can Cui, Yunsheng Ma, Zichong Yang, Yupeng Zhou, Peiran Liu, Juanwu Lu, Lingxi Li, Yaobin Chen, Jitesh H. Panchal, Amr Abdelraouf, Rohit Gupta, Kyungtae Han, and Ziran Wang. Large language models for autonomous driving (llm4ad): Concept, benchmark, experiments, and challenges. arXiv preprint arXiv:2410.15281, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.255, + 0.905, + 0.343 + ], + "angle": 0, + "content": "[12] Can Cui, Zichong Yang, Yupeng Zhou, Juntong Peng, Sung-Yeon Park, Cong Zhang, Yunsheng Ma, Xu Cao, Wenqian Ye, Yiheng Feng, Jitesh H. Panchal, Lingxi Li, Yaobin Chen, and Ziran Wang. On-board vision-language models for personalized autonomous vehicle motion control: System design and real-world validation. arXiv preprint arXiv:2411.11913, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.344, + 0.905, + 0.407 + ], + "angle": 0, + "content": "[13] Daocheng Fu, Xin Li, Licheng Wen, Min Dou, Pinlong Cai, Botian Shi, and Yu Qiao. Drive like a human: Rethinking autonomous driving with large language models. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 910-919. IEEE, 2024. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.408, + 0.905, + 0.459 + ], + "angle": 0, + "content": "[14] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.459, + 0.905, + 0.509 + ], + "angle": 0, + "content": "[15] Sorin Grigorescu, Bogdan Trasnea, Tiberiu Cocias, and Gigel Macesanu. A survey of deep learning techniques for autonomous driving. Journal of Field Robotics, 37(3):362-386, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.51, + 0.905, + 0.586 + ], + "angle": 0, + "content": "[16] Xu Han, Zonglin Meng, Xin Xia, Xishun Liao, Brian Yueshuai He, Zhaoliang Zheng, Yutong Wang, Hao Xiang, Zewei Zhou, Letian Gao, Lili Fan, Yuke Li, and Jiaqi Ma. Foundation intelligence for smart infrastructure services in transportation 5.0. IEEE Transactions on Intelligent Vehicles, 9(1):39-47, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.587, + 0.905, + 0.649 + ], + "angle": 0, + "content": "[17] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In Proceedings of the International Conference on Learning Representations, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.65, + 0.905, + 0.714 + ], + "angle": 0, + "content": "[18] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17853-17862, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.714, + 0.905, + 0.778 + ], + "angle": 0, + "content": "[19] Yidong Huang, Jacob Sansom, Ziqiao Ma, Felix Gervits, and Joyce Chai. Drivlme: Enhancing llm-based autonomous driving agents with embodied and social experiences. In Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3153-3160. IEEE, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.79, + 0.905, + 0.867 + ], + "angle": 0, + "content": "[20] Mohamed Manzour Hussien, Angie Nataly Melo, Augusto Luis Ballardini, Carlota Salinas Maldonado, Ruben Izquierdo, and Miguel Angel Sotelo. Rag-based explainable prediction of road users behaviors for automated driving using knowledge graphs and large language models. Expert Systems with Applications, 265:125914, 2025. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.128, + 0.905, + 0.867 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.889, + 0.505, + 0.899 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.127, + 0.484, + 0.165 + ], + "angle": 0, + "content": "[21] Raisa Islam and Owana Marzia Moushi. Gpt-4o: The cutting-edge advancement in multimodal llm. Authorea Preprints, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.166, + 0.484, + 0.242 + ], + "angle": 0, + "content": "[22] Bo Jiang, Shaoyu Chen, Qing Xu, Bencheng Liao, Jiajie Chen, Helong Zhou, Qian Zhang, Wenyu Liu, Chang Huang, and Xinggang Wang. Vad: Vectorized scene representation for efficient autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8350, 2023. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.243, + 0.483, + 0.305 + ], + "angle": 0, + "content": "[23] Bo Jiang, Shaoyu Chen, Benchcheng Liao, Xingyu Zhang, Wei Yin, Qian Zhang, Chang Huang, Wenyu Liu, and Xinggang Wang. Senna: Bridging large vision-language models and end-to-end autonomous driving. arXiv preprint arXiv:2410.22313, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.306, + 0.483, + 0.37 + ], + "angle": 0, + "content": "[24] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. Active retrieval augmented generation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 7969-7992, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.371, + 0.483, + 0.433 + ], + "angle": 0, + "content": "[25] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning, pages 19730-19742, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.434, + 0.483, + 0.471 + ], + "angle": 0, + "content": "[26] Yixuan Li, Xuesong Wang, Tianyi Wang, and Qian Liu. Characteristics analysis of autonomous vehicle pre-crash scenarios. arXiv preprint arXiv:2502.20789, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.472, + 0.483, + 0.547 + ], + "angle": 0, + "content": "[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 47(3):2020-2036, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.549, + 0.484, + 0.662 + ], + "angle": 0, + "content": "[28] Yunsheng Ma, Wenqian Ye, Can Cui, Haiming Zhang, Shuo Xing, Fucai Ke, Jinhong Wang, Chenglin Miao, Jintai Chen, Hamid Rezatofighi, Zhen Li, Guangtao Zheng, Chao Zheng, Tianjiao He, Manmohan Chandraker, Burhaneddin Yaman, Xin Ye, Hang Zhao, and Xu Cao. Position: Prospective of autonomous driving - multimodal llms world models embodied intelligence ai alignment and mamba. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 1010-1026, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.663, + 0.483, + 0.7 + ], + "angle": 0, + "content": "[29] Jiageng Mao, Yuxi Qian, Junjie Ye, Hang Zhao, and Yue Wang. Gpt-driver: Learning to drive with gpt. arXiv preprint arXiv:2310.01415, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.701, + 0.483, + 0.764 + ], + "angle": 0, + "content": "[30] Xiangru Mu, Tong Qin, Songan Zhang, Chunjing Xu, and Ming Yang. Pix2planning: End-to-end planning by vision-language model for autonomous driving on carla simulator. In Proceedings of the IEEE Intelligent Vehicles Symposium, pages 2383-2390. IEEE, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.765, + 0.483, + 0.84 + ], + "angle": 0, + "content": "[31] Chenbin Pan, Burhaneddin Yaman, Tommaso Nesti, Abhirup Mallik, Alessandro G Allievi, Senem Velipasalar, and Liu Ren. Vlp: Vision language planning for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14760-14769, 2024. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.842, + 0.483, + 0.867 + ], + "angle": 0, + "content": "[32] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. In" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.127, + 0.484, + 0.867 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.548, + 0.128, + 0.905, + 0.165 + ], + "angle": 0, + "content": "context retrieval-augmented language models. Transactions of the Association for Computational Linguistics, 11:1316-1331, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.167, + 0.905, + 0.23 + ], + "angle": 0, + "content": "[33] Katrin Renz, Long Chen, Ana-Maria Marcu, Jan Hünermann, Benoit Hanotte, Alice Karsund, Jamie Shotton, Elahe Arani, and Oleg Sinavski. Carllava: Vision language models for camera-only closed-loop driving. arXiv preprint arXiv:2406.10165, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.232, + 0.905, + 0.307 + ], + "angle": 0, + "content": "[34] Hao Shao, Yuxuan Hu, Letian Wang, Guanglu Song, Steven L Waslander, Yu Liu, and Hongsheng Li. Lmdrive: Closed-loop end-to-end driving with large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15120-15130, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.309, + 0.905, + 0.372 + ], + "angle": 0, + "content": "[35] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.374, + 0.905, + 0.451 + ], + "angle": 0, + "content": "[36] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beibwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In Proceedings of the European Conference on Computer Vision, pages 256-274. Springer, 2024. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.452, + 0.905, + 0.515 + ], + "angle": 0, + "content": "[37] Xiaoyu Tian, Junru Gu, Bailin Li, Yicheng Liu, Yang Wang, Zhiyong Zhao, Kun Zhan, Peng Jia, Xianpeng Lang, and Hang Zhao. Drivevm: The convergence of autonomous driving and large vision-language models. arXiv preprint arXiv:2402.12289, 2024. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.517, + 0.905, + 0.568 + ], + "angle": 0, + "content": "[38] Shiyi Wang, Yuxuan Zhu, Zhiheng Li, Yutong Wang, Li Li, and Zhengbing He. Chatgpt as your vehicle co-pilot: An initial attempt. IEEE Transactions on Intelligent Vehicles, 8 (12):4706-4721, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.569, + 0.905, + 0.643 + ], + "angle": 0, + "content": "[39] Wenhai Wang, Jiangwei Xie, ChuanYang Hu, Haoming Zou, Jianan Fan, Wenwen Tong, Yang Wen, Silei Wu, Hanming Deng, Zhiqi Li, et al. Drivemlm: Aligning multi-modal large language models with behavioral planning states for autonomous driving. arXiv preprint arXiv:2312.09245, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.646, + 0.905, + 0.71 + ], + "angle": 0, + "content": "[40] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Song XiXuan, et al. Cogvlm: Visual expert for pretrained language models. Advances in Neural Information Processing Systems, 37:121475-121499, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.712, + 0.905, + 0.763 + ], + "angle": 0, + "content": "[41] Yangyang Wang and Tianyi Wang. Research on dual-clutch intelligent vehicle infrastructure cooperative control based on system delay prediction of two-lane highway on-ramp merging area. *Automotive Innovation*, 7:588–601, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.764, + 0.905, + 0.828 + ], + "angle": 0, + "content": "[42] Yujin Wang, Quanfeng Liu, Jiaqi Fan, Jinlong Hong, Hongqing Chu, Mengjian Tian, Bingzhao Gao, and Hong Chen. Rac3: Retrieval-augmented corner case comprehension for autonomous driving with vision-language models. arXiv preprint arXiv:2412.11050, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.829, + 0.905, + 0.867 + ], + "angle": 0, + "content": "[43] Yi Xu, Yuxin Hu, Zaiwei Zhang, Gregory P Meyer, Siva Karthik Mustikovela, Siddhartha Srinivasa, Eric M Wolff, and Xin Huang. Vlm-ad: End-to-end autonomous" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.128, + 0.905, + 0.867 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.889, + 0.51, + 0.9 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.127, + 0.482, + 0.152 + ], + "angle": 0, + "content": "driving through vision-language model supervision. arXiv preprint arXiv:2412.14446, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.153, + 0.483, + 0.217 + ], + "angle": 0, + "content": "[44] Shota Yamazaki, Chenyu Zhang, Takuya Nanri, Akio Shigekane, Siyuan Wang, Jo Nishiyama, Tao Chu, and Kohei Yokosawa. Explanation for trajectory planning using multimodal large language model for autonomous driving. arXiv preprint arXiv:2411.09971, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.218, + 0.484, + 0.305 + ], + "angle": 0, + "content": "[45] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin Wang, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. Trajectory-llm: A language-based data generator for trajectory prediction in autonomous driving. In Proceedings of the International Conference on Learning Representations, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.308, + 0.483, + 0.371 + ], + "angle": 0, + "content": "[46] Jianhao Yuan, Shuyang Sun, Daniel Omeiza, Bo Zhao, Paul Newman, Lars Kunze, and Matthew Gadd. Rag-driver: Generalisable driving explanations with retrieval-augmented in-context learning in multi-modal large language model. arXiv preprint arXiv:2402.10828, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.372, + 0.482, + 0.422 + ], + "angle": 0, + "content": "[47] Ekim Yurtsever, Jacob Lambert, Alexander Carballo, and Kazuya Takeda. A survey of autonomous driving: Common practices and emerging technologies. IEEE Access, 8: 58443-58469, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.424, + 0.484, + 0.512 + ], + "angle": 0, + "content": "[48] Yan Zeng, Hanbo Zhang, Jiani Zheng, Jiangnan Xia, Guoqiang Wei, Yang Wei, Yuchen Zhang, Tao Kong, and Ruihua Song. What matters in training a gpt4-style language model with multimodal inputs? In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 7930-7957, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.514, + 0.483, + 0.577 + ], + "angle": 0, + "content": "[49] Miao Zhang, Zhenlong Fang, Tianyi Wang, Qian Zhang, Shuai Lu, Junfeng Jiao, and Tianyu Shi. A cascading cooperative multi-agent framework for on-ramp merging control integrating large language models. arXiv preprint arXiv:2503.08199, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.579, + 0.482, + 0.64 + ], + "angle": 0, + "content": "[50] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In Proceedings of the IEEE International Conference on Data Engineering, pages 70-82. IEEE, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.643, + 0.482, + 0.694 + ], + "angle": 0, + "content": "[51] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of \\(100+\\) language models. arXiv preprint arXiv:2403.13372, 2024. 4" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.127, + 0.484, + 0.694 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.889, + 0.508, + 0.899 + ], + "angle": 0, + "content": "11" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_origin.pdf b/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0668cbfd7480b50d5730e319bb09bb60b846b48b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc83d0962734f2630658e9c050aa327a1f11f5e2c04f3569f6f3f162de238ad5 +size 4819262 diff --git a/data/2025/2503_13xxx/2503.13861/full.md b/data/2025/2503_13xxx/2503.13861/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3d60e061b2fb08bb6ce723477a8b3a5ee626b685 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/full.md @@ -0,0 +1,335 @@ +# RAD: Retrieval-Augmented Decision-Making of Meta-Actions with Vision-Language Models in Autonomous Driving + +Yujin Wang + +Junfeng Jiao + +Tongji University + +Quanfeng Liu + +Hongqing Chu1,* + +$^{2}$ Yale University + +Zhengxin Jiang + +Bingzhao Gao + +nyi Wang2 + +Hong Chen + +3University of Texas at Austin + +chuhongqing@tongji.edu.cn + +# Abstract + +Accurately understanding and deciding high-level meta- actions is essential for ensuring reliable and safe autonomous driving systems. While vision-language models (VLMs) have shown significant potential in various autonomous driving tasks, they often suffer from limitations such as inadequate spatial perception and hallucination, reducing their effectiveness in complex autonomous driving scenarios. To address these challenges, we propose a retrieval-augmented decision-making (RAD) framework, a novel architecture designed to enhance VLMs' capabilities to reliably generate meta-actions in autonomous driving scenes. RAD leverages a retrieval-augmented generation (RAG) pipeline to dynamically improve decision accuracy through a three-stage process consisting of the embedding flow, retrieving flow, and generating flow. Additionally, we fine-tune VLMs on a specifically curated dataset derived from the NuScenes dataset to enhance their spatial perception and bird's-eye view image comprehension capabilities. Extensive experimental evaluations on the curated NuScenes-based dataset demonstrate that RAD outperforms baseline methods across key evaluation metrics, including match accuracy, and F1 score, and self-defined overall score, highlighting its effectiveness in improving meta-action decision-making for autonomous driving tasks. + +# 1. Introduction + +In recent years, the race towards fully autonomous vehicles has spurred extensive research into robust decision-making approaches, a fundamental task in autonomous driving systems [26, 41, 49]. Ensuring safe and efficient motion planning requires continuous interpretation of dynamic environments, real-time reasoning under uncertainty, and efficient integration of vast amounts of multimodal data [28]. + +Traditional autonomous driving systems adopt a modular development strategy, in which perception, prediction, + +planning, and control are developed and optimized independently before being integrated into the vehicle system [15, 47]. However, as the information flow propagates across these modules, errors and delays can accumulate, potentially leading to suboptimal or even unreasonable driving decisions. To further mitigate these errors and improve computational efficiency, end-to-end autonomous driving has emerged as a prominent research direction [7, 8]. + +End-to-end refers to a model that directly receives input from sensor data (e.g., cameras, LiDAR) and directly outputs vehicle planning decisions. In recent studies [11, 18, 22], end-to-end autonomous driving algorithms have demonstrated their superiority in both simulation environments and real-world road tests. Moreover, the emergence of foundation models provides a promising solution to enhance motion planning performance, improve generalization across diverse scenarios, and increase interpretability in end-to-end autonomous driving [13, 16, 29, 38]. Trained on huge amounts of human knowledge, these models exhibit advanced comprehension and reasoning capabilities, highlighting the immense potential of artificial intelligence in complex decision-making tasks. Integrating such foundation models into autonomous driving systems could facilitate the development of human-like driving behaviors, advancing the field toward safer and more adaptable autonomous vehicles. + +Autonomous driving tasks require models with robust visual perception capabilities, making vision-language models (VLMs) particularly well-suited for this domain. VLMs trained on large-scale data often demonstrate strong reasoning capabilities, enabling them to infer the evolution of complex driving scenarios. Current research [19, 31, 33, 34, 36] has focused on fine-tuning pre-trained VLMs using visual question-answer (VQA) pairs composed of scene images and corresponding driving actions. This approach enables VLMs to generate feasible trajectories, enhancing their applicability in real-world autonomous driving tasks. + +However, fine-tuning or even full-scale fine-tuning of VLMs using large-scale datasets requires substantial com + +![](images/2fcfd978558698a24c8389607cd68771a9521155361203e02c91eaff8017791f.jpg) +Figure 1. The overview of our RAD method. The framework consists of four working flows, namely embedding flow, retrieving flow, fine-tuning flow and generating flow. The embedding flow encodes front-view images and BEV images into a vector database. Given a query scene, the retrieving flow retrieves the most similar scene from the database. The fine-tuning flow involves fine-tuning VLMs to enhance spatial perception and BEV image comprehension. The generating flow guides VLMs in generating contextually appropriate meta-actions according to the query scene, the retrieved scene, its ground truth meta-action, and proper prompts. + +putational resources. Additionally, deploying VLMs with an extremely large number of parameters on vehicle-end hardware poses significant constraints. To address these challenges, retrieval-augmented generation (RAG) has emerged as a promising approach to enhance the decision-making capabilities of VLMs by incorporating external knowledge bases [14, 42]. The core idea of RAG is to augment generative models with a retrieval module that dynamically retrieves relevant textual information during the generation process. In vision-language tasks, RAG can effectively mitigate limitations caused by knowledge scarcity. By integrating external knowledge bases, models can not only extract information from images but also retrieve supplementary knowledge, thereby improving the robustness and accuracy of the generated outputs. Although the direct application of RAG to the decision-making process in autonomous driving remains limited, an increasing number of studies have explored its potential in specific tasks such as scene understanding and regulation retrieval [4, 20, 46]. + +In this work, we propose a retrieval-augmented decision-making (RAD) framework, introducing a novel approach to assist VLMs in generating meta-actions using RAG for the first time, as depicted in Figure 1. The main research contributions of this work are outlined as follows: + +- Pre-Training VLMs for Spatial Perception Tasks: We construct obstacle perception tasks based on the NuScenes dataset [3], incorporating VQA pairs designed + +to capture obstacle categories, positions, and other spatial information. This pre-training process enables VLMs to explicitly learn key geometric features such as the locations and sizes of obstacles, leading to improved performance in spatial perception tasks. + +- Establishing an External Knowledge Base with NuScenes Ground Truth Data: We select a subset of scenes containing navigation information, historical trajectory data, and future meta-action ground truth. Furthermore, we generate bird's-eye view (BEV) images corresponding to the scene images. The surround-view images from these scenes are then encoded into vector representations using BLIP-2 [25], alongside the BEV images, to form the knowledge base. +- Developing a Retrieval and Generation Pipeline for Meta-Action Decision-Making using Fine-Tuned VLMs and RAG: We employ cosine similarity to retrieve the most similar scene from the external knowledge base including the front-view image of the current scene. The corresponding six surround-view images, speed information, navigation data, and ground truth trajectory are then used as auxiliary inputs, guiding the VLM in generating a trustworthy planning trajectory for the current scene. + +The remainder of this paper is organized as follows: In Section 2, the detailed literature review is conducted. In Section 3, four working flows of the proposed RAD framework are introduced. In Section 4, comparative experiments + +and ablation studies are designed. Section 5 summarizes the work and discusses future research directions. + +# 2. Related Works + +# 2.1. Multimodal Large Language Models in Autonomous Driving + +Utilizing multimodal large language models (MLLMs) in autonomous driving enhances decision-making by leveraging their extensive knowledge and reasoning capabilities through multisource information such as vision, language, and rules, significantly improving scene understanding, strategy generation, and interpretability. DriveMLM [39] employed MLLMs to generate high-level behavioral decisions (e.g., lane-changing, deceleration, acceleration, etc.), which were then integrated with traditional motion planning modules, balancing flexibility and interpretability. "Drive as you speak" [10] enriched large language models (LLMs) with comprehensive environmental data from different vehicle modules, leading to safer decisions. "Driving with LLMs" [6] introduced a LLM that generated 10,000 driving scenarios for agent training. "Drive like a human" [13] demonstrated LLMs' capabilities of understanding and interacting with environments in closed-loop systems, effectively navigating long-tail autonomous driving scenarios. DriveVLM [37] adopted a multistage reasoning chain that combined scene description, dynamic analysis, and hierarchical planning. Additionally, DriveVLM-Dual incorporated traditional 3D perception algorithms to ensure both cognitive depth and real-time control. Pix2Planning [30] formulated planning as an autoregressive sequence prediction problem, using a vision-language Transformer to generate trajectory points. VLP [31] incorporated linguistic descriptions into the training process and aligned them with visual features, significantly improving cross-city and cross-scenario generalization. To enhance interpretability, some studies [44, 45] introduced "future trajectory images", which were processed by multimodal models to generate natural language explanations. Senna [23] further refined the decision-making process by separating high-level meta-actions from low-level trajectory predictions. In this framework, VLMs first produced directional or speed-level decisions before end-to-end models executed precise paths, thereby achieving a hierarchical strategy that was similar to human driving behaviors. + +However, these methods are prone to hallucination, a limitation arising from the reliance of MLLMs on learned associations between visual inputs and language-based reasoning. As a result, they may misinterpret ambiguous or occluded objects, leading to incorrect high-level decision-making. This issue becomes particularly critical in long-tail scenarios, where the model encounters rare or underrepresented driving conditions not well-covered in the training + +data. Such misinterpretations can ultimately compromise the reliability and safety of the autonomous driving system. + +# 2.2. Retrieval-Augmented Generation in Vision-Language Models + +In vision-language tasks, RAG mitigates knowledge limitations by leveraging external knowledge bases, enabling models to extract insights from images while supplementing them with retrieved contextual data. This dual approach significantly helps mitigate model hallucination and improve planning accuracy. Jiang et al. [24] introduced a RAG-based framework for VLMs, demonstrating its effectiveness in complex tasks requiring extensive background knowledge. Their study underscored the limitations of conventional end-to-end VLMs when faced with knowledge deficiencies, whereas RAG facilitated richer contextual integration, enhancing both reasoning and generation. Building on this, Shao et al. [35] further investigated RAG's role in VQA tasks, showing that combining retrieval mechanisms with pre-trained VLMs significantly strengthened model performance in complex reasoning scenarios. Additionally, Ram et al. [32] examined RAG's impact on pre-training and fine-tuning, illustrating that incorporating large-scale external data sources during pre-training improved downstream performance by enhancing cross-modal reasoning, particularly in retrieval-based tasks. Meanwhile, Zheng et al. [50] emphasized RAG's broader advantages, particularly in improving generative flexibility and adaptability in multimodal tasks. Their findings highlighted RAG's effectiveness in handling scenarios lacking sufficient annotations or domain-specific knowledge, reinforcing its potential in bridging knowledge gaps for more informed and context-aware model outputs. Hussien et al. [20] illustrated how RAG-augmented VLMs enhanced cross-modal retrieval, particularly by strengthening associations between images and textual data. For performance optimization, Yuan et al. [46] introduced a dynamic knowledge retrieval mechanism, emphasizing real-time adjustments in retrieval and generation processes based on task-specific requirements. This adaptive approach allowed RAG to selectively retrieve the most relevant background knowledge, improving performance across various multimodal applications. Cai et al. [4] developed a traffic regulation retrieval agent based on RAG, enabling automatic retrieval of relevant traffic rules and guidelines based on the ego vehicle's status. Moreover, Cui et al. [12] incorporated a RAG-based memory module that continuously learned takeover preferences through human feedback to enhance motion planning. + +Despite its strong potential, research on directly utilizing RAG to guide VLMs in meta-action decision-making remains limited. To address this gap, we propose the RAD framework, which, for the first time, integrates RAG with pre-training for spatial perception capabilities, enabling + +more effective decision-making of meta-actions. + +# 3. Methodology + +As shown in Figure 1, the proposed RAD framework comprises four work flows: embedding flow, retrieving flow, fine-tuning flow and generating flow. Among these, the fine-tuning flow operates independently, as its primary objective is to enhance the spatial perception capabilities of VLMs through separate fine-tuning. In the embedding flow, BEV images are generated to correspond with front-view scene images from the NuScenes dataset. These image pairs are encoded into a vector space using a frozen BLIP-2 model and the separate embeddings are then concatenated and stored in a vector database. In the retrieving flow, a new front-view image and its corresponding BEV image serve as a query. These images are encoded into the vector space using the same frozen BLIP-2 model. Cosine similarity is then computed between the query images and those stored in the database, enabling the retrieval of the most similar scene from the database. Furthermore, based on the relative positional relationships between consecutive scenes in the NuScenes dataset, the ground truth meta-actions executed in each scene can be extracted. Finally, in the generating flow, the query scene, retrieved scene, its ground truth meta-action, and proper prompts serve as inputs to the VLMs. These inputs guide the model to make decisions and generate meta-actions, ensuring more accurate and context-aware autonomous driving behaviors. + +All the extracted meta-actions are shown as follows: + +(1) Speed up (rapidly) (2) Slow down (rapidly) +(3) Turn left/right (4) Drive along the curve +(5) Turn around (6) Change lane to the left/right +(7) Reverse (8) Shift slightly to the left/right +(9) Stop (10) Go straight constantly/slowly + +# 3.1. The Fine-Tuning Flow + +Making precise meta-action decisions in autonomous driving requires an accurate understanding of the environment. If a model lacks sufficient spatial perception capabilities, it may fail to construct a reliable environmental representation, potentially leading to obstacle avoidance failures in meta-action decision-making. VLMs typically rely on monocular or surround-view camera inputs and estimate depth information from single-frame images. However, in long-trail scenarios, monocular vision exhibits significant depth estimation errors [5]. Experimental results on the NuScenes dataset indicate that existing VLMs generally lack robust spatial perception, which severely impacts the safety of decision-making and motion control [43]. + +To address the aforementioned challenges, VLMs should first undergo fine-tuning to enhance their spatial perception + +![](images/87a3dad6ccd70533953de17fe61bb786242837c776ed4147096cbd1507c1b284.jpg) +Figure 2. The process of generating a dataset for spatial perception enhancement based on the NuScenes dataset + +capabilities. The structure of VLMs typically consists of a vision encoder and an LLM. In this work, we focus on fine-tuning only the LLM component to enhance its spatial perception. We utilize the NuScenes dataset to generate a specified dataset for spatial perception enhancement, following the process illustrated in Figure 2. + +During the image filtering process, it is necessary to ensure the uniqueness of the VQA pairs by cross-referencing the annotated data from the origin NuScenes dataset. The generated dataset for fine-tuning includes over 100,000 training samples, covering key spatial perception tasks such as object class recognition, object distance estimation and object size estimation. + +For spatial perception enhancement fine-tuning, the loss function for a single sample is defined as follows: + +$$ +\begin{array}{l} J = \frac {1}{N} \sum_ {i = 1} ^ {N} \left[ \lambda_ {1, i} \left(- \sum_ {c = 1} ^ {n} y _ {c, i} \log \left(p _ {c, i}\right)\right) \right. \\ \left. + \lambda_ {2, i} \left(- \frac {1}{3} \sum_ {j = 1} ^ {3} \left(z _ {j, i} - z _ {j, i} ^ {*}\right) ^ {2}\right) + \lambda_ {3, i} \left(x _ {i} - x _ {i} ^ {*}\right) ^ {2} \right] \tag {1} \\ \end{array} +$$ + +where, $N$ is the batch size during fine-tuning; $\lambda_{1,i}$ is the loss identifier for object class recognition in the $i$ -th sample (if there is a corresponding class, $\lambda_{1,i}$ will be set to 1; and otherwise, $\lambda_{1,i}$ will be set to 0); $\lambda_{2,i}$ is the loss identifier for object size estimation; $\lambda_{3,i}$ is the loss identifier for object distance estimation; $n$ is the total number of classes in the classification task; $y_{c,i}$ is the label for the $i$ -th sample belonging to class $c$ , represented by one-hot encoding; $p_{c,i}$ is the probability of the $i$ -th sample being classified as class $c$ by the model; $z_{j,i}$ is the output size of the $i$ -th sample in the $j$ -th dimension from the model; $z_{j,i}^{*}$ is the ground truth size of the $i$ -th sample in the $j$ -th dimension; $x_{i}$ is the model's output for the distance from the $i$ -th sample to the reference object; and $x_{i}^{*}$ is the ground truth distance from the $i$ -th sample to the reference object. + +In this work, we fine-tune a series of VLMs, primarily from the Qwen family [1, 2, 9], using low-rank adaptation (LoRA) [17, 51]. The overall training is conducted for + +![](images/3b2c0252547851bd74c7c00b8d60c2fc30f5ecd67aa4851a41887284e269116f.jpg) +Figure 3. The fine-tuning VQA paradigm for BEV image understanding + +System: This image illustrates the BEV view of a driving scene, showing the area of 60 meters ahead, 30 meters behind, 30 meters left and right of the ego vehicle. The units of longitudinal and lateral coordinates are meters. The ego vehicle is located at the center $[0,0]$ , represented by a blue rectangle. The red rectangles represent the objects of vehicle type, including cars, trucks, etc. If there is an arrow on the red rectangle, it means that it will move in the direction of the arrow. The green dots represent pedestrians, and the green arrows also indicate the moving direction. Black dots are static obstacles, including roadblocks, traffic lights, etc. + +Question 1: What kind of object (pedestrian, vehicle, or static obstacle) is located within the coordinate [7.6,8.9] in this image? +Answer 1: There is a vehicle located within the coordinate [7.6,8.9]. +Question 2: What is the central position coordinate of the left-front static obstacle in this image? The result retains one decimal place after the decimal point. +Answer 2: The central position coordinate of the left-front static obstacle is [24.5,17.2]. +Question 3: What is the distance from the left-front static obstacle to the left pedestrian in this image? The result retains one decimal place after the decimal point. +Answer 3: The distance from the left-front static obstacle to the left pedestrian is $16.3\mathrm{m}$ + +three epochs. Additionally, following the BEVFormer [27], we generate BEV images from the existing surround-view images in the NuScenes dataset. Intuitively, incorporating BEV images helps the model better understand the relative spatial relationships of objects in driving scenes. Therefore, it is also necessary to train VLMs to recognize and interpret BEV images effectively. The fine-tuning paradigm, as illustrated in Figure 3, follows a similar approach to the VQA pair construction method based on ground truth information to develop a robust ability to understand BEV images. + +# 3.2. The Embedding Flow + +In the embedding flow, we encode front-view images from the NuScenes dataset along with the pre-generated BEV images into a unified vector space. Since this embedding operation does not involve cross-modal content, the frozen BLIP-2 model weights can be directly utilized, ensuring computational efficiency and consistency. To maintain the one-to-one correspondence between front-view images and BEV images, their embedding vectors are concatenated within this flow. The resulting concatenated vectors are then uniformly stored in an indexed vector database. + +# 3.3. The Retrieving Flow + +The core of the retrieving flow lies in the computation of cosine similarity. Given two image embeddings $\mathbf{v}_i$ and $\mathbf{v}_j$ , + +cosine similarity is defined as: + +$$ +\text {s i m i l a r i t y} _ {i, j} = \frac {\mathbf {v} _ {i} \cdot \mathbf {v} _ {j}}{\| \mathbf {v} _ {i} \| \| \mathbf {v} _ {j} \|} \tag {2} +$$ + +where, $\| *\|$ represents the Euclidean norm. + +The main framework of the retrieving flow is illustrated in Figure 4. For a new scene, we first generate its BEV images from the surround-view images. The front-view image and BEV image of the new scene jointly trigger a query scene. The embeddings for the new front-view image and BEV image are then extracted using the frozen BLIP-2 model. Since the vector database stores concatenated embedding vectors, the embeddings for the front-view image and BEV image are retrieved through length decomposition. The cosine similarity between the new front-view image embeddings and those stored in the database is computed and denoted as $similarity_{fv}$ . Similarly, the cosine similarity between the new BEV image embeddings and those stored in the database is computed and denoted as $similarity_{bev}$ . To flexibly adjust the retrieval preference toward either the front-view image or the BEV image, a hyperparameter $\omega$ is introduced. In this work, $\omega$ is set to 0.5 as a balanced weight for retrieval. The overall similarity could be calculated as follows: + +$$ +\text {s i m i l a r i t y} = (1 - \omega) \cdot \text {s i m i l a r i t y} _ {f v} + \omega \cdot \text {s i m i l a r i t y} _ {\text {b e v}} \tag {3} +$$ + +The scene with the highest overall similarity is then retrieved from the vector database. Using its index, we can + +obtain the corresponding front-view image, BEV image, and pre-extracted ground truth meta-action. + +![](images/f7ade894530b154e1b79cdbf72256b8bcb4124794ba45c0c288a410898e29806.jpg) +Figure 4. The main framework of the retrieving flow + +# 3.4. The Generating Flow + +In the generating flow, we primarily employ prompt engineering to guide VLMs in reasoning based on the retrieved scene and its corresponding meta-action, enabling them to make accurate meta-action decisions for the new scene. The prompts should be divided into two key components: + +- System Prompt: Guide VLMs to make meta-action decisions based on the provided images. +- RAG-Specific Prompt: Instruct VLMs to understand the retrieved scene images and corresponding meta-actions. + +For this process, we primarily use the Qwen series of VLMs, as they support multiple image inputs, making prompt design more flexible and effective. With structured and well-designed prompts, the VLMs analyze the front-view image and BEV image of the current scene, ultimately generating a single meta-action as the final output. + +# 4. Experiments + +# 4.1. Dataset Preparation + +We divide the 34,000 scenes from the NuScenes dataset into three subsets: 10,000 scenes are allocated for fine-tuning VLMs, focusing on enhancing spatial perception and BEV image understanding; 20,000 scenes are embedded in the vector database as prior information; and the remaining 4,000 scenes serve as the test set, used to evaluate the framework's effectiveness and the model's overall performance. + +# 4.2. Evaluation Metrics + +To assess the performance, we employ traditional classification metrics such as accuracy, precision, recall and F1 score. Additionally, we introduce a customized partial match score to account for semantically similar but not entirely identical cases. Finally, we utilize a weighted method to compute a comprehensive performance score. + +We firstly adopt ExactMatchAccuracy to evaluate whether the model provides a fully correct meta-action for a given scene, which is formally defined as follows: + +$$ +E x a c t M a t c h A c c u r a c y = \frac {N _ {m a t c h}}{N _ {t o t a l}} \tag {4} +$$ + +where, $N_{match}$ is the number of scenes where the generated meta-actions exactly match the ground truth; and $N_{total}$ is the total number of scenes. + +For each meta-action, Precision, Recall, and $F1$ can be used as evaluation metrics, which are defined as follows: + +$$ +P r e c i s i o n _ {i} = \frac {T P _ {i}}{T P _ {i} + F P _ {i}} \tag {5} +$$ + +$$ +\operatorname {R e c a l l} _ {i} = \frac {T P _ {i}}{T P _ {i} + F N _ {i}} \tag {6} +$$ + +$$ +F 1 _ {i} = \frac {2 \times \text {P r e c i s i o n} _ {i} \times \text {R e c a l l} _ {i}}{\text {P r e c i s i o n} _ {i} + \text {R e c a l l} _ {i}} \tag {7} +$$ + +where, $TP_{i}$ is the true positives, and the number of scenes where the generated meta-actions are $i$ and the ground truth are also $i$ ; $FP_{i}$ is the false positives, and the number of scenes where the generated meta-actions are $i$ but the ground truth are not $i$ ; $FN_{i}$ is the false negatives, and the number of scenes where the generated meta-actions are not $i$ but the ground truth are $i$ . + +To evaluate the overall performance across different meta-actions in the test set, Macro $-F1$ and Weighted $-F1$ scores are introduced. Macro $-F1$ is the unweighted average of $F1$ scores across all meta-actions, while Weighted $-F1$ is the weighted average of $F1$ scores, which are defined as: + +$$ +M a c r o - F 1 = \frac {1}{K} \sum_ {i = 1} ^ {K} F 1 _ {i} \tag {8} +$$ + +$$ +W e i g h t e d - F 1 = \frac {1}{N _ {\text {t o t a l}}} \sum_ {i = 1} ^ {K} n _ {i} F 1 _ {i} \tag {9} +$$ + +where, $K$ represents the total number of meta-actions, which is set to 15; and $n_i$ represents the number of scenes where the ground truth meta-action is $i$ . + +To account for the semantic similarity between certain meta-actions, we introduce a PartialMatchScore. Specifically, meta-actions involving leftward maneuvers—such as turn left, change lane to the left and shift + +Table 1. Comparison among different baselines and our RAD method + +
MethodExact Match AccuracyMacro-F1Weighted-F1Partial Match ScoreOverall Score
Lynx (Fine-tuning)[48]0.15240.01670.06530.27680.1327
CogVLM (Fine-tuning)[40]0.21780.02040.11050.35630.1846
DriveLM (on LLaMA-LoRA-BIAS-7B)[36]0.14550.04480.12030.30280.1518
DriveLM (on LLaMA-BIAS-7B)[36]0.18960.04090.12120.34250.1693
DriveLM (on LLaMA-CAPTION-7B)[36]0.20340.03800.10800.39520.1896
GPT-4o (Official API)[21]0.29940.11270.22880.43770.2756
DriveVLM[37]0.37430.16710.33250.54620.3589
DriveVLM-Dual (cooperating with VAD[22])[37]0.40160.18540.35060.56130.3801
RAD (Ours, on Qwen-VL-2.5-7B)0.40960.19070.38130.58700.3956
+ +slightly to the left--are classified under the left group, while analogous rightward actions form the right group. Similarly, meta-actions indicating forward motion at varying speeds are categorized accordingly, with go straight slowly, slow down, and slow down rapidly mapping to the deceleration group, while both speed up and speed up rapidly mapping to acceleration group. Furthermore, unique behaviors such as go straight constantly, turn around, reverse, stop, and drive along the curve are collectively assigned to a separate unique group. If the generated meta-actions and the ground truth meta-actions are not identical but belong to the same semantic group (excluding the unique group), they are considered partially matched. Thus, the semantic similarity $S$ is defined as follows: + +$$ +S (i, \hat {i}) = \left\{ \begin{array}{l l} 1, & \text {i f} \hat {i} \text {i s t h e s a m e a s} i. \\ 0. 5, & \text {i f} \hat {i} \text {p a r t i a l l y m a t c h e s} i. \\ 0, & \text {i f} \hat {i} \text {t o t a l l y d i f f e r s f r o m} i. \end{array} \right. \tag {10} +$$ + +where, $i$ is the ground truth meta-action in one scene; and $\hat{i}$ is the generated meta-action. + +Then, the average PartialMatchScore is obtained by averaging across all scenes: + +$$ +\text {P a r t i a l M a t c h S c o r e} = \frac {1}{N _ {\text {t o t a l}}} \sum_ {k = 1} ^ {N _ {\text {t o t a l}}} S \left(i _ {k}, \hat {i _ {k}}\right) \tag {11} +$$ + +Finally, different weights are assigned to each metric to derive the comprehensive scoring formula OverallScore: + +$$ +\begin{array}{l} O v e r a l l S c o r e = \alpha \cdot E x a c t M a t c h A c c u r a c y \\ + \beta \cdot M a c r o - F 1 \\ + \gamma \cdot W e i g h t e d - F 1 \\ + \delta \cdot \text {P a r t i a l M a t c h S c o r e} \tag {12} \\ \end{array} +$$ + +where, $\alpha$ is set to 0.4; $\beta$ , $\gamma$ , and $\delta$ are all set to 0.2, which could be adjusted according to specific tasks. + +# 4.3. Comparative Experiments + +We evaluate the performance of our proposed RAD framework on Qwen-VL-2.5-7B VLM and compare it against + +several other state-to-the-art baseline methods: Lynx [48], CogVLM [40], DriveLM [36], GPT-4o [21] and DriveVLM [37]. Table 1 presents a thorough quantitative comparison between our proposed RAD and these baselines across multiple evaluation criteria. Our RAD consistently outperforms all baseline methods, demonstrating clear advantages in meta-action decision-making for autonomous driving. In particular, RAD achieves an ExactMatchAccuracy of 0.4096, substantially outperforming DriveVLM-Dual's 0.4016, and attains an OverallScore of 0.3956 compared to DriveVLM-Dual's 0.3801. + +A deeper analysis of the remaining metrics further underscores RAD's strengths. Macro - F1, a balanced measure of model performance across all classes, achieves 0.1907, well above DriveVLM-Dual's 01854. Meanwhile, Weighted - F1 of 0.3813 indicates its effectiveness in scenarios where class imbalances exist, significantly outperforming all baselines and reflecting RAD's notable capabilities to handle diverse datasets. Also, PartialMatchScore of 0.5870 also highlights RAD's fine-grained generative capability, which suggests that RAD not only excels at producing entirely correct answers, but also consistently captures partially correct information, an essential trait for more nuanced or multi-faceted decision-making tasks. + +The poor performance of the baseline methods is mainly due to their lack of task-specific training. As a result, these models exhibit limited spatial perception capabilities and poor BEV image comprehension. Additionally, the parameter size constraints and version limitations of the base models used in these baselines hinder their ability to achieve optimal results. However, RAD's superior performance over GPT-4o across all metrics demonstrates the feasibility of specialized VLMs with smaller parameter sizes that rival or even surpass large-scale general-purpose models in complex and domain-specific tasks. + +In summary, the results in Table 1 validate the efficacy and robustness of our RAD model. Through a combination of architectural innovations and targeted training strategies, RAD not only achieves profound performance across multiple metrics but also provides insights into how specialized + +Table 2. Ablation studies on fine-tuning VLMs and RAG pipeline + +
VLMsMethodExact Match AccuracyMacro-F1Weighted-F1Partial Matching ScoreOverall Score
Qwen-VL-2-2B[9]Vanilla0.21880.03580.10130.43530.2020
Vanilla + RAG0.21450.10490.22780.43190.2387
Fine-tuning0.15430.05280.11940.30170.1565
Fine-tuning + RAG0.26100.13020.25560.45380.2723
Qwen-VL-2-7B[9]Vanilla0.28660.06540.17210.49410.2609
Vanilla + RAG0.34040.14600.32350.54240.3385
Fine-tuning0.29080.07170.19860.45620.2616
Fine-tuning + RAG0.34460.14600.30110.52130.3315
Qwen-VL-2.5-3B[2]Vanilla0.13180.03660.09550.38860.1568
Vanilla + RAG0.12400.02980.08140.38660.1491
Fine-tuning0.21640.05310.13980.39490.2041
Fine-tuning + RAG0.25390.10750.20900.45200.2552
Qwen-VL-2.5-7B[2]Vanilla0.28490.06440.17150.48930.2590
Vanilla + RAG0.35810.19810.33860.55440.3615
Fine-tuning0.34820.10850.28850.53600.3259
Fine-tuning + RAG0.40960.19070.38130.58700.3956
+ +VLMs can excel in intricate autonomous driving tasks. + +# 4.4. Ablation Studies + +In our ablation studies, we mainly investigate the impacts of fine-tuning VLMs and RAG pipeline for spatial perception enhancement based on Qwen-VL-2-2B [9], Qwen-VL-2-7B [9], Qwen-VL-2.5-3B [2] and Qwen-VL-2.5-7B [2] models. The performance of VLMs is evaluated using four distinct methods: vanilla (no fine-tuning), vanilla combined with RAG, only fine-tuning, and fine-tuning combined with RAG (our proposed RAD method). + +The results presented in Table 2 indicate that the combination of fine-tuning and RAG consistently achieves the highest scores across all evaluation metrics, including ExactMatchAccuracy, Macro - F1, Weighted - F1, PartialMatchScore, and OverallScore, for all model variants. Specifically, for Qwen-VL-2.5-7B, our RAD method achieves the highest OverallScore of 0.3956, marking a significant improvement over methods that deploy either fine-tuning or RAG separately. Furthermore, the incorporation of RAG consistently enhances performance for both vanilla and fine-tuned settings across most model scales, validating the effectiveness of retrieval-augmented strategies in improving model performance. + +Notably, for smaller models such as Qwen-VL-2-2B and Qwen-VL-2.5-3B, employing only fine-tuning leads to performance degradation, suggesting that their limited parameter sizes hinder effective learning of domain-specific knowledge through fine-tuning alone. Additionally, for Qwen-VL-2.5-3B model, using RAG without fine-tuning results in a performance drop, likely due to the unique pre-training characteristics of this model. Overall, while fine-tuning or RAG independently can enhance performance in larger-scale models, the best results are consistently achieved by + +combining these two strategies, underscoring the importance of an integrated approach to maximize VLM effectiveness. From a practical perspective, the combination of fine-tuning and RAG proves particularly suitable for enhancing decision-making capabilities in VLMs. Deploying this optimal configuration can substantially improve VLM performance, with potential applications extending to semantic comprehension, trajectory planning, and other complex autonomous driving tasks. + +# 5. Conclusion + +In this work, we propose a RAD framework, a novel retrieval-augmented architecture designed to enhance the meta-action decision-making capabilities of VLMs for autonomous driving. Through the integration of fine-tuning VLMs for spatial perception enhancement and BEV image comprehension, RAD effectively enhances VLMs' capability of meta-action decision-making, ensuring higher accuracy, as demonstrated by notable performance gains across key metrics in extensive experimental evaluations. + +Moving forward, we aim to extend RAD in three key directions. First, we plan to incorporate more diverse and fine-grained datasets beyond the NuScenes dataset, encompassing more challenging corner cases and real-world scenarios, to further enhance model robustness. Second, we seek to generalize the RAD framework to additional driving tasks, especially trajectory planning and motion control. Third, integrating chain-of-thought and reinforcement learning into the framework will be crucial for improving decision-making depth and adaptability. While fine-tuning and RAG will remain essential for enhancing VLM generalization, these advancements will strengthen the robustness and reliability of autonomous driving systems by leveraging RAG methods to tackle complex real-world tasks. + +# References + +[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou, and Tianhang Zhu. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 4 +[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-v1 technical report. arXiv preprint arXiv:2502.13923, 2025. 4, 8 +[3] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. Nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11621-11631, 2020. 2 +[4] Tianhui Cai, Yifan Liu, Zewei Zhou, Haoxuan Ma, Seth Z Zhao, Zhiwen Wu, and Jiaqi Ma. Driving with regulation: Interpretable decision-making for autonomous vehicles with retrieval-augmented reasoning via llm. arXiv preprint arXiv:2410.04759, 2024. 2, 3 +[5] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024. 4 +[6] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. In Proceedings of the IEEE International Conference on Robotics and Automation, pages 14093-14100, 2024. 3 +[7] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1 +[8] Pranav Singh Chib and Pravendra Singh. Recent advancements in end-to-end autonomous driving using deep learning: A survey. IEEE Transactions on Intelligent Vehicles, 9 (1):103-118, 2023. 1 +[9] Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024. 4, 8 +[10] Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, and Ziran + +Wang. Receive, reason, and react: Drive as you say, with large language models in autonomous vehicles. IEEE Intelligent Transportation Systems Magazine, 16(4):81-94, 2024. 3 +[11] Can Cui, Yunsheng Ma, Zichong Yang, Yupeng Zhou, Peiran Liu, Juanwu Lu, Lingxi Li, Yaobin Chen, Jitesh H. Panchal, Amr Abdelraouf, Rohit Gupta, Kyungtae Han, and Ziran Wang. Large language models for autonomous driving (llm4ad): Concept, benchmark, experiments, and challenges. arXiv preprint arXiv:2410.15281, 2024. 1 +[12] Can Cui, Zichong Yang, Yupeng Zhou, Juntong Peng, Sung-Yeon Park, Cong Zhang, Yunsheng Ma, Xu Cao, Wenqian Ye, Yiheng Feng, Jitesh H. Panchal, Lingxi Li, Yaobin Chen, and Ziran Wang. On-board vision-language models for personalized autonomous vehicle motion control: System design and real-world validation. arXiv preprint arXiv:2411.11913, 2024. 3 +[13] Daocheng Fu, Xin Li, Licheng Wen, Min Dou, Pinlong Cai, Botian Shi, and Yu Qiao. Drive like a human: Rethinking autonomous driving with large language models. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 910-919. IEEE, 2024. 1, 3 +[14] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997, 2023. 2 +[15] Sorin Grigorescu, Bogdan Trasnea, Tiberiu Cocias, and Gigel Macesanu. A survey of deep learning techniques for autonomous driving. Journal of Field Robotics, 37(3):362-386, 2020. 1 +[16] Xu Han, Zonglin Meng, Xin Xia, Xishun Liao, Brian Yueshuai He, Zhaoliang Zheng, Yutong Wang, Hao Xiang, Zewei Zhou, Letian Gao, Lili Fan, Yuke Li, and Jiaqi Ma. Foundation intelligence for smart infrastructure services in transportation 5.0. IEEE Transactions on Intelligent Vehicles, 9(1):39-47, 2024. 1 +[17] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In Proceedings of the International Conference on Learning Representations, 2022. 4 +[18] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17853-17862, 2023. 1 +[19] Yidong Huang, Jacob Sansom, Ziqiao Ma, Felix Gervits, and Joyce Chai. Drivlme: Enhancing llm-based autonomous driving agents with embodied and social experiences. In Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3153-3160. IEEE, 2024. +[20] Mohamed Manzour Hussien, Angie Nataly Melo, Augusto Luis Ballardini, Carlota Salinas Maldonado, Ruben Izquierdo, and Miguel Angel Sotelo. Rag-based explainable prediction of road users behaviors for automated driving using knowledge graphs and large language models. Expert Systems with Applications, 265:125914, 2025. 2, 3 + +[21] Raisa Islam and Owana Marzia Moushi. Gpt-4o: The cutting-edge advancement in multimodal llm. Authorea Preprints, 2024. 7 +[22] Bo Jiang, Shaoyu Chen, Qing Xu, Bencheng Liao, Jiajie Chen, Helong Zhou, Qian Zhang, Wenyu Liu, Chang Huang, and Xinggang Wang. Vad: Vectorized scene representation for efficient autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8350, 2023. 1, 7 +[23] Bo Jiang, Shaoyu Chen, Benchcheng Liao, Xingyu Zhang, Wei Yin, Qian Zhang, Chang Huang, Wenyu Liu, and Xinggang Wang. Senna: Bridging large vision-language models and end-to-end autonomous driving. arXiv preprint arXiv:2410.22313, 2024. 3 +[24] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. Active retrieval augmented generation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 7969-7992, 2023. 3 +[25] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning, pages 19730-19742, 2023. 2 +[26] Yixuan Li, Xuesong Wang, Tianyi Wang, and Qian Liu. Characteristics analysis of autonomous vehicle pre-crash scenarios. arXiv preprint arXiv:2502.20789, 2025. 1 +[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 47(3):2020-2036, 2025. 5 +[28] Yunsheng Ma, Wenqian Ye, Can Cui, Haiming Zhang, Shuo Xing, Fucai Ke, Jinhong Wang, Chenglin Miao, Jintai Chen, Hamid Rezatofighi, Zhen Li, Guangtao Zheng, Chao Zheng, Tianjiao He, Manmohan Chandraker, Burhaneddin Yaman, Xin Ye, Hang Zhao, and Xu Cao. Position: Prospective of autonomous driving - multimodal llms world models embodied intelligence ai alignment and mamba. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 1010-1026, 2025. 1 +[29] Jiageng Mao, Yuxi Qian, Junjie Ye, Hang Zhao, and Yue Wang. Gpt-driver: Learning to drive with gpt. arXiv preprint arXiv:2310.01415, 2023. 1 +[30] Xiangru Mu, Tong Qin, Songan Zhang, Chunjing Xu, and Ming Yang. Pix2planning: End-to-end planning by vision-language model for autonomous driving on carla simulator. In Proceedings of the IEEE Intelligent Vehicles Symposium, pages 2383-2390. IEEE, 2024. 3 +[31] Chenbin Pan, Burhaneddin Yaman, Tommaso Nesti, Abhirup Mallik, Alessandro G Allievi, Senem Velipasalar, and Liu Ren. Vlp: Vision language planning for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14760-14769, 2024. 1, 3 +[32] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. In + +context retrieval-augmented language models. Transactions of the Association for Computational Linguistics, 11:1316-1331, 2023. 3 +[33] Katrin Renz, Long Chen, Ana-Maria Marcu, Jan Hünermann, Benoit Hanotte, Alice Karsund, Jamie Shotton, Elahe Arani, and Oleg Sinavski. Carllava: Vision language models for camera-only closed-loop driving. arXiv preprint arXiv:2406.10165, 2024. 1 +[34] Hao Shao, Yuxuan Hu, Letian Wang, Guanglu Song, Steven L Waslander, Yu Liu, and Hongsheng Li. Lmdrive: Closed-loop end-to-end driving with large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15120-15130, 2024. 1 +[35] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294, 2023. 3 +[36] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beibwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In Proceedings of the European Conference on Computer Vision, pages 256-274. Springer, 2024. 1, 7 +[37] Xiaoyu Tian, Junru Gu, Bailin Li, Yicheng Liu, Yang Wang, Zhiyong Zhao, Kun Zhan, Peng Jia, Xianpeng Lang, and Hang Zhao. Drivevm: The convergence of autonomous driving and large vision-language models. arXiv preprint arXiv:2402.12289, 2024. 3, 7 +[38] Shiyi Wang, Yuxuan Zhu, Zhiheng Li, Yutong Wang, Li Li, and Zhengbing He. Chatgpt as your vehicle co-pilot: An initial attempt. IEEE Transactions on Intelligent Vehicles, 8 (12):4706-4721, 2023. 1 +[39] Wenhai Wang, Jiangwei Xie, ChuanYang Hu, Haoming Zou, Jianan Fan, Wenwen Tong, Yang Wen, Silei Wu, Hanming Deng, Zhiqi Li, et al. Drivemlm: Aligning multi-modal large language models with behavioral planning states for autonomous driving. arXiv preprint arXiv:2312.09245, 2023. 3 +[40] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Song XiXuan, et al. Cogvlm: Visual expert for pretrained language models. Advances in Neural Information Processing Systems, 37:121475-121499, 2024. 7 +[41] Yangyang Wang and Tianyi Wang. Research on dual-clutch intelligent vehicle infrastructure cooperative control based on system delay prediction of two-lane highway on-ramp merging area. *Automotive Innovation*, 7:588–601, 2024. 1 +[42] Yujin Wang, Quanfeng Liu, Jiaqi Fan, Jinlong Hong, Hongqing Chu, Mengjian Tian, Bingzhao Gao, and Hong Chen. Rac3: Retrieval-augmented corner case comprehension for autonomous driving with vision-language models. arXiv preprint arXiv:2412.11050, 2024. 2 +[43] Yi Xu, Yuxin Hu, Zaiwei Zhang, Gregory P Meyer, Siva Karthik Mustikovela, Siddhartha Srinivasa, Eric M Wolff, and Xin Huang. Vlm-ad: End-to-end autonomous + +driving through vision-language model supervision. arXiv preprint arXiv:2412.14446, 2024. 4 +[44] Shota Yamazaki, Chenyu Zhang, Takuya Nanri, Akio Shigekane, Siyuan Wang, Jo Nishiyama, Tao Chu, and Kohei Yokosawa. Explanation for trajectory planning using multimodal large language model for autonomous driving. arXiv preprint arXiv:2411.09971, 2024. 3 +[45] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin Wang, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. Trajectory-llm: A language-based data generator for trajectory prediction in autonomous driving. In Proceedings of the International Conference on Learning Representations, 2025. 3 +[46] Jianhao Yuan, Shuyang Sun, Daniel Omeiza, Bo Zhao, Paul Newman, Lars Kunze, and Matthew Gadd. Rag-driver: Generalisable driving explanations with retrieval-augmented in-context learning in multi-modal large language model. arXiv preprint arXiv:2402.10828, 2024. 2, 3 +[47] Ekim Yurtsever, Jacob Lambert, Alexander Carballo, and Kazuya Takeda. A survey of autonomous driving: Common practices and emerging technologies. IEEE Access, 8: 58443-58469, 2020. 1 +[48] Yan Zeng, Hanbo Zhang, Jiani Zheng, Jiangnan Xia, Guoqiang Wei, Yang Wei, Yuchen Zhang, Tao Kong, and Ruihua Song. What matters in training a gpt4-style language model with multimodal inputs? In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 7930-7957, 2024. 7 +[49] Miao Zhang, Zhenlong Fang, Tianyi Wang, Qian Zhang, Shuai Lu, Junfeng Jiao, and Tianyu Shi. A cascading cooperative multi-agent framework for on-ramp merging control integrating large language models. arXiv preprint arXiv:2503.08199, 2025. 1 +[50] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In Proceedings of the IEEE International Conference on Data Engineering, pages 70-82. IEEE, 2024. 3 +[51] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of $100+$ language models. arXiv preprint arXiv:2403.13372, 2024. 4 \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13861/images/09ed85780c8f75fb64c113fce7b65222368cbd404bd18f08b41e2bddf09c45b4.jpg b/data/2025/2503_13xxx/2503.13861/images/09ed85780c8f75fb64c113fce7b65222368cbd404bd18f08b41e2bddf09c45b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e0d7d211434fcd3df3c0316e5da46a5d0236032 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/09ed85780c8f75fb64c113fce7b65222368cbd404bd18f08b41e2bddf09c45b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fe6360ab3672c79ecb0b30bc02ca0c3e26b144d387803123aee781c3186c529 +size 5119 diff --git a/data/2025/2503_13xxx/2503.13861/images/29645f7b3daab9c7ce3cbb55863b61cbca30cc4276ffb7a25bf80e85d19ae6e5.jpg b/data/2025/2503_13xxx/2503.13861/images/29645f7b3daab9c7ce3cbb55863b61cbca30cc4276ffb7a25bf80e85d19ae6e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e821f7c86f5b3401ba59b77bed7a47f17fbe6deb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/29645f7b3daab9c7ce3cbb55863b61cbca30cc4276ffb7a25bf80e85d19ae6e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40a292e92058628a16b8f8756562a2b2ddd25625f663eb074639d549de83c680 +size 11385 diff --git a/data/2025/2503_13xxx/2503.13861/images/2d8d2883b76d7cd196db676b2df2edcba3e32a7592692122cf8ba7c15aeac6ab.jpg b/data/2025/2503_13xxx/2503.13861/images/2d8d2883b76d7cd196db676b2df2edcba3e32a7592692122cf8ba7c15aeac6ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68bf5bafcb828c846135edae985f493347d07e6f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/2d8d2883b76d7cd196db676b2df2edcba3e32a7592692122cf8ba7c15aeac6ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d2101ea1012ef7bf9c63bb0376f9bc0ccd67356a17b41308ce83ced2a6e90ca +size 6260 diff --git a/data/2025/2503_13xxx/2503.13861/images/2fcfd978558698a24c8389607cd68771a9521155361203e02c91eaff8017791f.jpg b/data/2025/2503_13xxx/2503.13861/images/2fcfd978558698a24c8389607cd68771a9521155361203e02c91eaff8017791f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a08cff18d1d5580f8c0d4ec057e9205504992e6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/2fcfd978558698a24c8389607cd68771a9521155361203e02c91eaff8017791f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5077d55521a5e79d55b0c7be715cceb083d782154c7c101e6cf463528c770c82 +size 101186 diff --git a/data/2025/2503_13xxx/2503.13861/images/3b2c0252547851bd74c7c00b8d60c2fc30f5ecd67aa4851a41887284e269116f.jpg b/data/2025/2503_13xxx/2503.13861/images/3b2c0252547851bd74c7c00b8d60c2fc30f5ecd67aa4851a41887284e269116f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fdbbbd92aa429908eb279d955b29afea723f5b06 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/3b2c0252547851bd74c7c00b8d60c2fc30f5ecd67aa4851a41887284e269116f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44c0b1183921935a826df1a888c7111acabc67260b60c25edcbbe48cea7fa1ef +size 15529 diff --git a/data/2025/2503_13xxx/2503.13861/images/3f61232938f867f2cdbad0347d3059b08c7fd819b074780e2d9aa04a664b586f.jpg b/data/2025/2503_13xxx/2503.13861/images/3f61232938f867f2cdbad0347d3059b08c7fd819b074780e2d9aa04a664b586f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6f22e664a32c6cc6350b74b73a67fa514cd0f62 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/3f61232938f867f2cdbad0347d3059b08c7fd819b074780e2d9aa04a664b586f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90336aa3ca7e4eb08e956e1bb0c344fa62b490ddaa517ac4bc67c7181952eed8 +size 6012 diff --git a/data/2025/2503_13xxx/2503.13861/images/4dac6ad10347d403506bfc914ce4ef973aeec42a36f10d613b8d99977c95bf36.jpg b/data/2025/2503_13xxx/2503.13861/images/4dac6ad10347d403506bfc914ce4ef973aeec42a36f10d613b8d99977c95bf36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b9a0ee2e13e6a4083679f6a169b1e7acf2395a6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/4dac6ad10347d403506bfc914ce4ef973aeec42a36f10d613b8d99977c95bf36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a1c9bb7054aa359bd50f13160446255b99ae77aed1febbb7edcb4a498e734ef +size 4834 diff --git a/data/2025/2503_13xxx/2503.13861/images/655a4581731b9427a0d6c399e2749e4943164e08be7faa3883bbde4bfe157228.jpg b/data/2025/2503_13xxx/2503.13861/images/655a4581731b9427a0d6c399e2749e4943164e08be7faa3883bbde4bfe157228.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89c047fa17e9aaabc797b92baddbc50da52107d6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/655a4581731b9427a0d6c399e2749e4943164e08be7faa3883bbde4bfe157228.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da4d9bf125001b299708bfc0870e73e42cdba07d1f000df9a9ca82c2d20901e5 +size 7029 diff --git a/data/2025/2503_13xxx/2503.13861/images/7fa8a5818551c36b52d6e493c15ef043dab065c73dc51516b5d7e6dfe7997138.jpg b/data/2025/2503_13xxx/2503.13861/images/7fa8a5818551c36b52d6e493c15ef043dab065c73dc51516b5d7e6dfe7997138.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60b3f9087ca1f0ad9035c3cecb631480f96c9b7f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/7fa8a5818551c36b52d6e493c15ef043dab065c73dc51516b5d7e6dfe7997138.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26052fa2acd0f96dd0c51e31ef644b23bf7ac0acb2a06708bc4d1f071e2b3eb +size 83405 diff --git a/data/2025/2503_13xxx/2503.13861/images/81b2f018b737780464db4e962c3972beec1e545f54eb1e5a1dbc58090d016e89.jpg b/data/2025/2503_13xxx/2503.13861/images/81b2f018b737780464db4e962c3972beec1e545f54eb1e5a1dbc58090d016e89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e412e9a60938b92b240af19059ab8e4a1c3b0d4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/81b2f018b737780464db4e962c3972beec1e545f54eb1e5a1dbc58090d016e89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a65a8a646bdf47071231bd21ca68b53ae5afc37a3a8fe19d49678afb0160f4dc +size 7753 diff --git a/data/2025/2503_13xxx/2503.13861/images/87a3dad6ccd70533953de17fe61bb786242837c776ed4147096cbd1507c1b284.jpg b/data/2025/2503_13xxx/2503.13861/images/87a3dad6ccd70533953de17fe61bb786242837c776ed4147096cbd1507c1b284.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ce5a1997b5ed57e6659e0bb9c8fb6127e3c9cac --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/87a3dad6ccd70533953de17fe61bb786242837c776ed4147096cbd1507c1b284.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a26b5afdc26d8c38d647f0492c76c9780290475c099438eb76eab4f154f16ae9 +size 30578 diff --git a/data/2025/2503_13xxx/2503.13861/images/8cb4d2ecd54a11f70060695e1d49571e4243a7a6e24b887c775f9f87609978cc.jpg b/data/2025/2503_13xxx/2503.13861/images/8cb4d2ecd54a11f70060695e1d49571e4243a7a6e24b887c775f9f87609978cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48d055ad1269555dc49f8ba2bf933acbea87f77f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/8cb4d2ecd54a11f70060695e1d49571e4243a7a6e24b887c775f9f87609978cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d63d600f2425135b667d1e8561ec84e3f08fcacecf70f36e717abec519bbdccd +size 4871 diff --git a/data/2025/2503_13xxx/2503.13861/images/a04a2037e80fd1c1c41cccc3fec74d73065625beb5a8c4249407686e02debda5.jpg b/data/2025/2503_13xxx/2503.13861/images/a04a2037e80fd1c1c41cccc3fec74d73065625beb5a8c4249407686e02debda5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf0c1106b01d340cd6e400623a5b2f1b26e3224c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/a04a2037e80fd1c1c41cccc3fec74d73065625beb5a8c4249407686e02debda5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e178f282182bfbbab567b323940fa6fa9ea5f3edcc304508aae34ae8f9acf1f +size 13224 diff --git a/data/2025/2503_13xxx/2503.13861/images/c1ec6df0c98c8f6773b2670f9041739281f3f3f95f4fe3aa726afab718393c1b.jpg b/data/2025/2503_13xxx/2503.13861/images/c1ec6df0c98c8f6773b2670f9041739281f3f3f95f4fe3aa726afab718393c1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ab6017a58f7c3a55eaef576a875c0912cf2bdd1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/c1ec6df0c98c8f6773b2670f9041739281f3f3f95f4fe3aa726afab718393c1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba810d5387d0c06e9d0d82bc69a6d075d256b325cbd5f176059689483122e4b2 +size 13965 diff --git a/data/2025/2503_13xxx/2503.13861/images/da248ba9dcd1bc9f1704559853a126ef1485a2dc0f5950b16e31408a69604135.jpg b/data/2025/2503_13xxx/2503.13861/images/da248ba9dcd1bc9f1704559853a126ef1485a2dc0f5950b16e31408a69604135.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c8cdea6f7d97200082bfcbb0e6762b97008333f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/da248ba9dcd1bc9f1704559853a126ef1485a2dc0f5950b16e31408a69604135.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:603bb99ebe4cd8cdd2248eccf1be84f84551a0805252528ffdf30b1152983f4d +size 4669 diff --git a/data/2025/2503_13xxx/2503.13861/images/e3f388afce5664fe8f861a256039ed2136bb7f8572ca00a2dcbebc387c54e999.jpg b/data/2025/2503_13xxx/2503.13861/images/e3f388afce5664fe8f861a256039ed2136bb7f8572ca00a2dcbebc387c54e999.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc1d246a6f231cb5f1f224d70231c93d477d316e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/e3f388afce5664fe8f861a256039ed2136bb7f8572ca00a2dcbebc387c54e999.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d496257a3d7d8d3fdffb4dab9415fbb9375a57c52c81bf49c2108ae4ff9cc196 +size 116901 diff --git a/data/2025/2503_13xxx/2503.13861/images/eb21cd003827e303ea11e9e88d96e90cd5e7ce759836936848ef07153bba86a5.jpg b/data/2025/2503_13xxx/2503.13861/images/eb21cd003827e303ea11e9e88d96e90cd5e7ce759836936848ef07153bba86a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18cb1752762610149c014d5e9cf907dd06a87ba5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/eb21cd003827e303ea11e9e88d96e90cd5e7ce759836936848ef07153bba86a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1d1ae32cd8d31c8a1eab96f2982def84d5a6a95ccabeed1120991d131432673 +size 7965 diff --git a/data/2025/2503_13xxx/2503.13861/images/f7ade894530b154e1b79cdbf72256b8bcb4124794ba45c0c288a410898e29806.jpg b/data/2025/2503_13xxx/2503.13861/images/f7ade894530b154e1b79cdbf72256b8bcb4124794ba45c0c288a410898e29806.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f20a10070b8a053de6a10fe3a19f55506a30e78 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/images/f7ade894530b154e1b79cdbf72256b8bcb4124794ba45c0c288a410898e29806.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e1d80c6cda1498e444e7960d019b03e530ee3983a520d7663840b6a6c17ee2f +size 53598 diff --git a/data/2025/2503_13xxx/2503.13861/layout.json b/data/2025/2503_13xxx/2503.13861/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..600b98d9f58666c9f17c3660cd1ef00e10d2e631 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13861/layout.json @@ -0,0 +1,8146 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 96, + 136, + 497, + 169 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 136, + 497, + 169 + ], + "spans": [ + { + "bbox": [ + 96, + 136, + 497, + 169 + ], + "type": "text", + "content": "RAD: Retrieval-Augmented Decision-Making of Meta-Actions with Vision-Language Models in Autonomous Driving" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 120, + 191, + 207, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 191, + 207, + 206 + ], + "spans": [ + { + "bbox": [ + 120, + 191, + 207, + 206 + ], + "type": "text", + "content": "Yujin Wang" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 206, + 198, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 206, + 198, + 219 + ], + "spans": [ + { + "bbox": [ + 133, + 206, + 198, + 219 + ], + "type": "text", + "content": "Junfeng Jiao" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 219, + 207, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 219, + 207, + 233 + ], + "spans": [ + { + "bbox": [ + 120, + 219, + 207, + 233 + ], + "type": "text", + "content": "Tongji University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 207, + 192, + 278, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 192, + 278, + 206 + ], + "spans": [ + { + "bbox": [ + 207, + 192, + 278, + 206 + ], + "type": "text", + "content": "Quanfeng Liu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 221, + 206, + 302, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 206, + 302, + 219 + ], + "spans": [ + { + "bbox": [ + 221, + 206, + 302, + 219 + ], + "type": "text", + "content": "Hongqing Chu1,*" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 230, + 220, + 307, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 220, + 307, + 233 + ], + "spans": [ + { + "bbox": [ + 230, + 220, + 307, + 233 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 230, + 220, + 307, + 233 + ], + "type": "text", + "content": "Yale University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 192, + 380, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 192, + 380, + 206 + ], + "spans": [ + { + "bbox": [ + 303, + 192, + 380, + 206 + ], + "type": "text", + "content": "Zhengxin Jiang" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 327, + 206, + 399, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 206, + 399, + 219 + ], + "spans": [ + { + "bbox": [ + 327, + 206, + 399, + 219 + ], + "type": "text", + "content": "Bingzhao Gao" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 422, + 192, + 471, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 192, + 471, + 206 + ], + "spans": [ + { + "bbox": [ + 422, + 192, + 471, + 206 + ], + "type": "text", + "content": "nyi Wang2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 425, + 206, + 483, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 206, + 483, + 220 + ], + "spans": [ + { + "bbox": [ + 425, + 206, + 483, + 220 + ], + "type": "text", + "content": "Hong Chen" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 332, + 220, + 471, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 220, + 471, + 233 + ], + "spans": [ + { + "bbox": [ + 332, + 220, + 471, + 233 + ], + "type": "text", + "content": "3University of Texas at Austin" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 229, + 236, + 362, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 236, + 362, + 247 + ], + "spans": [ + { + "bbox": [ + 229, + 236, + 362, + 247 + ], + "type": "text", + "content": "chuhongqing@tongji.edu.cn" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 148, + 273, + 193, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 273, + 193, + 285 + ], + "spans": [ + { + "bbox": [ + 148, + 273, + 193, + 285 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 299, + 287, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 299, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 53, + 299, + 287, + 578 + ], + "type": "text", + "content": "Accurately understanding and deciding high-level meta- actions is essential for ensuring reliable and safe autonomous driving systems. While vision-language models (VLMs) have shown significant potential in various autonomous driving tasks, they often suffer from limitations such as inadequate spatial perception and hallucination, reducing their effectiveness in complex autonomous driving scenarios. To address these challenges, we propose a retrieval-augmented decision-making (RAD) framework, a novel architecture designed to enhance VLMs' capabilities to reliably generate meta-actions in autonomous driving scenes. RAD leverages a retrieval-augmented generation (RAG) pipeline to dynamically improve decision accuracy through a three-stage process consisting of the embedding flow, retrieving flow, and generating flow. Additionally, we fine-tune VLMs on a specifically curated dataset derived from the NuScenes dataset to enhance their spatial perception and bird's-eye view image comprehension capabilities. Extensive experimental evaluations on the curated NuScenes-based dataset demonstrate that RAD outperforms baseline methods across key evaluation metrics, including match accuracy, and F1 score, and self-defined overall score, highlighting its effectiveness in improving meta-action decision-making for autonomous driving tasks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 54, + 602, + 132, + 614 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 602, + 132, + 614 + ], + "spans": [ + { + "bbox": [ + 54, + 602, + 132, + 614 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 623, + 287, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 623, + 287, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 623, + 287, + 704 + ], + "type": "text", + "content": "In recent years, the race towards fully autonomous vehicles has spurred extensive research into robust decision-making approaches, a fundamental task in autonomous driving systems [26, 41, 49]. Ensuring safe and efficient motion planning requires continuous interpretation of dynamic environments, real-time reasoning under uncertainty, and efficient integration of vast amounts of multimodal data [28]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "type": "text", + "content": "Traditional autonomous driving systems adopt a modular development strategy, in which perception, prediction," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 275, + 538, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 275, + 538, + 367 + ], + "spans": [ + { + "bbox": [ + 304, + 275, + 538, + 367 + ], + "type": "text", + "content": "planning, and control are developed and optimized independently before being integrated into the vehicle system [15, 47]. However, as the information flow propagates across these modules, errors and delays can accumulate, potentially leading to suboptimal or even unreasonable driving decisions. To further mitigate these errors and improve computational efficiency, end-to-end autonomous driving has emerged as a prominent research direction [7, 8]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 368, + 539, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 368, + 539, + 576 + ], + "spans": [ + { + "bbox": [ + 304, + 368, + 539, + 576 + ], + "type": "text", + "content": "End-to-end refers to a model that directly receives input from sensor data (e.g., cameras, LiDAR) and directly outputs vehicle planning decisions. In recent studies [11, 18, 22], end-to-end autonomous driving algorithms have demonstrated their superiority in both simulation environments and real-world road tests. Moreover, the emergence of foundation models provides a promising solution to enhance motion planning performance, improve generalization across diverse scenarios, and increase interpretability in end-to-end autonomous driving [13, 16, 29, 38]. Trained on huge amounts of human knowledge, these models exhibit advanced comprehension and reasoning capabilities, highlighting the immense potential of artificial intelligence in complex decision-making tasks. Integrating such foundation models into autonomous driving systems could facilitate the development of human-like driving behaviors, advancing the field toward safer and more adaptable autonomous vehicles." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 577, + 539, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 577, + 539, + 705 + ], + "spans": [ + { + "bbox": [ + 304, + 577, + 539, + 705 + ], + "type": "text", + "content": "Autonomous driving tasks require models with robust visual perception capabilities, making vision-language models (VLMs) particularly well-suited for this domain. VLMs trained on large-scale data often demonstrate strong reasoning capabilities, enabling them to infer the evolution of complex driving scenarios. Current research [19, 31, 33, 34, 36] has focused on fine-tuning pre-trained VLMs using visual question-answer (VQA) pairs composed of scene images and corresponding driving actions. This approach enables VLMs to generate feasible trajectories, enhancing their applicability in real-world autonomous driving tasks." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 705, + 539, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 705, + 539, + 729 + ], + "spans": [ + { + "bbox": [ + 305, + 705, + 539, + 729 + ], + "type": "text", + "content": "However, fine-tuning or even full-scale fine-tuning of VLMs using large-scale datasets requires substantial com" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 747, + 299, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 747, + 299, + 756 + ], + "spans": [ + { + "bbox": [ + 294, + 747, + 299, + 756 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 110, + 512, + 322 + ], + "blocks": [ + { + "bbox": [ + 82, + 110, + 512, + 322 + ], + "lines": [ + { + "bbox": [ + 82, + 110, + 512, + 322 + ], + "spans": [ + { + "bbox": [ + 82, + 110, + 512, + 322 + ], + "type": "image", + "image_path": "2fcfd978558698a24c8389607cd68771a9521155361203e02c91eaff8017791f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 53, + 333, + 540, + 388 + ], + "lines": [ + { + "bbox": [ + 53, + 333, + 540, + 388 + ], + "spans": [ + { + "bbox": [ + 53, + 333, + 540, + 388 + ], + "type": "text", + "content": "Figure 1. The overview of our RAD method. The framework consists of four working flows, namely embedding flow, retrieving flow, fine-tuning flow and generating flow. The embedding flow encodes front-view images and BEV images into a vector database. Given a query scene, the retrieving flow retrieves the most similar scene from the database. The fine-tuning flow involves fine-tuning VLMs to enhance spatial perception and BEV image comprehension. The generating flow guides VLMs in generating contextually appropriate meta-actions according to the query scene, the retrieved scene, its ground truth meta-action, and proper prompts." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 407, + 289, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 407, + 289, + 629 + ], + "spans": [ + { + "bbox": [ + 52, + 407, + 289, + 629 + ], + "type": "text", + "content": "putational resources. Additionally, deploying VLMs with an extremely large number of parameters on vehicle-end hardware poses significant constraints. To address these challenges, retrieval-augmented generation (RAG) has emerged as a promising approach to enhance the decision-making capabilities of VLMs by incorporating external knowledge bases [14, 42]. The core idea of RAG is to augment generative models with a retrieval module that dynamically retrieves relevant textual information during the generation process. In vision-language tasks, RAG can effectively mitigate limitations caused by knowledge scarcity. By integrating external knowledge bases, models can not only extract information from images but also retrieve supplementary knowledge, thereby improving the robustness and accuracy of the generated outputs. Although the direct application of RAG to the decision-making process in autonomous driving remains limited, an increasing number of studies have explored its potential in specific tasks such as scene understanding and regulation retrieval [4, 20, 46]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 632, + 288, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 632, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 53, + 632, + 288, + 690 + ], + "type": "text", + "content": "In this work, we propose a retrieval-augmented decision-making (RAD) framework, introducing a novel approach to assist VLMs in generating meta-actions using RAG for the first time, as depicted in Figure 1. The main research contributions of this work are outlined as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 693, + 288, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 693, + 288, + 729 + ], + "spans": [ + { + "bbox": [ + 54, + 693, + 288, + 729 + ], + "type": "text", + "content": "- Pre-Training VLMs for Spatial Perception Tasks: We construct obstacle perception tasks based on the NuScenes dataset [3], incorporating VQA pairs designed" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 312, + 407, + 539, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 407, + 539, + 465 + ], + "spans": [ + { + "bbox": [ + 312, + 407, + 539, + 465 + ], + "type": "text", + "content": "to capture obstacle categories, positions, and other spatial information. This pre-training process enables VLMs to explicitly learn key geometric features such as the locations and sizes of obstacles, leading to improved performance in spatial perception tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 465, + 540, + 675 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 305, + 465, + 540, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 465, + 540, + 570 + ], + "spans": [ + { + "bbox": [ + 305, + 465, + 540, + 570 + ], + "type": "text", + "content": "- Establishing an External Knowledge Base with NuScenes Ground Truth Data: We select a subset of scenes containing navigation information, historical trajectory data, and future meta-action ground truth. Furthermore, we generate bird's-eye view (BEV) images corresponding to the scene images. The surround-view images from these scenes are then encoded into vector representations using BLIP-2 [25], alongside the BEV images, to form the knowledge base." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 570, + 540, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 570, + 540, + 675 + ], + "spans": [ + { + "bbox": [ + 305, + 570, + 540, + 675 + ], + "type": "text", + "content": "- Developing a Retrieval and Generation Pipeline for Meta-Action Decision-Making using Fine-Tuned VLMs and RAG: We employ cosine similarity to retrieve the most similar scene from the external knowledge base including the front-view image of the current scene. The corresponding six surround-view images, speed information, navigation data, and ground truth trajectory are then used as auxiliary inputs, guiding the VLM in generating a trustworthy planning trajectory for the current scene." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 305, + 682, + 540, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 682, + 540, + 729 + ], + "spans": [ + { + "bbox": [ + 305, + 682, + 540, + 729 + ], + "type": "text", + "content": "The remainder of this paper is organized as follows: In Section 2, the detailed literature review is conducted. In Section 3, four working flows of the proposed RAD framework are introduced. In Section 4, comparative experiments" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 106, + 287, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 106, + 287, + 129 + ], + "spans": [ + { + "bbox": [ + 54, + 106, + 287, + 129 + ], + "type": "text", + "content": "and ablation studies are designed. Section 5 summarizes the work and discusses future research directions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 142, + 143, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 142, + 143, + 155 + ], + "spans": [ + { + "bbox": [ + 54, + 142, + 143, + 155 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 163, + 287, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 163, + 287, + 188 + ], + "spans": [ + { + "bbox": [ + 54, + 163, + 287, + 188 + ], + "type": "text", + "content": "2.1. Multimodal Large Language Models in Autonomous Driving" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 191, + 289, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 191, + 289, + 634 + ], + "spans": [ + { + "bbox": [ + 55, + 191, + 289, + 634 + ], + "type": "text", + "content": "Utilizing multimodal large language models (MLLMs) in autonomous driving enhances decision-making by leveraging their extensive knowledge and reasoning capabilities through multisource information such as vision, language, and rules, significantly improving scene understanding, strategy generation, and interpretability. DriveMLM [39] employed MLLMs to generate high-level behavioral decisions (e.g., lane-changing, deceleration, acceleration, etc.), which were then integrated with traditional motion planning modules, balancing flexibility and interpretability. \"Drive as you speak\" [10] enriched large language models (LLMs) with comprehensive environmental data from different vehicle modules, leading to safer decisions. \"Driving with LLMs\" [6] introduced a LLM that generated 10,000 driving scenarios for agent training. \"Drive like a human\" [13] demonstrated LLMs' capabilities of understanding and interacting with environments in closed-loop systems, effectively navigating long-tail autonomous driving scenarios. DriveVLM [37] adopted a multistage reasoning chain that combined scene description, dynamic analysis, and hierarchical planning. Additionally, DriveVLM-Dual incorporated traditional 3D perception algorithms to ensure both cognitive depth and real-time control. Pix2Planning [30] formulated planning as an autoregressive sequence prediction problem, using a vision-language Transformer to generate trajectory points. VLP [31] incorporated linguistic descriptions into the training process and aligned them with visual features, significantly improving cross-city and cross-scenario generalization. To enhance interpretability, some studies [44, 45] introduced \"future trajectory images\", which were processed by multimodal models to generate natural language explanations. Senna [23] further refined the decision-making process by separating high-level meta-actions from low-level trajectory predictions. In this framework, VLMs first produced directional or speed-level decisions before end-to-end models executed precise paths, thereby achieving a hierarchical strategy that was similar to human driving behaviors." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 636, + 287, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 636, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 54, + 636, + 287, + 729 + ], + "type": "text", + "content": "However, these methods are prone to hallucination, a limitation arising from the reliance of MLLMs on learned associations between visual inputs and language-based reasoning. As a result, they may misinterpret ambiguous or occluded objects, leading to incorrect high-level decision-making. This issue becomes particularly critical in long-tail scenarios, where the model encounters rare or underrepresented driving conditions not well-covered in the training" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 106, + 539, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 106, + 539, + 130 + ], + "spans": [ + { + "bbox": [ + 305, + 106, + 539, + 130 + ], + "type": "text", + "content": "data. Such misinterpretations can ultimately compromise the reliability and safety of the autonomous driving system." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 140, + 539, + 164 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 140, + 539, + 164 + ], + "spans": [ + { + "bbox": [ + 305, + 140, + 539, + 164 + ], + "type": "text", + "content": "2.2. Retrieval-Augmented Generation in Vision-Language Models" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 169, + 540, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 169, + 540, + 670 + ], + "spans": [ + { + "bbox": [ + 305, + 169, + 540, + 670 + ], + "type": "text", + "content": "In vision-language tasks, RAG mitigates knowledge limitations by leveraging external knowledge bases, enabling models to extract insights from images while supplementing them with retrieved contextual data. This dual approach significantly helps mitigate model hallucination and improve planning accuracy. Jiang et al. [24] introduced a RAG-based framework for VLMs, demonstrating its effectiveness in complex tasks requiring extensive background knowledge. Their study underscored the limitations of conventional end-to-end VLMs when faced with knowledge deficiencies, whereas RAG facilitated richer contextual integration, enhancing both reasoning and generation. Building on this, Shao et al. [35] further investigated RAG's role in VQA tasks, showing that combining retrieval mechanisms with pre-trained VLMs significantly strengthened model performance in complex reasoning scenarios. Additionally, Ram et al. [32] examined RAG's impact on pre-training and fine-tuning, illustrating that incorporating large-scale external data sources during pre-training improved downstream performance by enhancing cross-modal reasoning, particularly in retrieval-based tasks. Meanwhile, Zheng et al. [50] emphasized RAG's broader advantages, particularly in improving generative flexibility and adaptability in multimodal tasks. Their findings highlighted RAG's effectiveness in handling scenarios lacking sufficient annotations or domain-specific knowledge, reinforcing its potential in bridging knowledge gaps for more informed and context-aware model outputs. Hussien et al. [20] illustrated how RAG-augmented VLMs enhanced cross-modal retrieval, particularly by strengthening associations between images and textual data. For performance optimization, Yuan et al. [46] introduced a dynamic knowledge retrieval mechanism, emphasizing real-time adjustments in retrieval and generation processes based on task-specific requirements. This adaptive approach allowed RAG to selectively retrieve the most relevant background knowledge, improving performance across various multimodal applications. Cai et al. [4] developed a traffic regulation retrieval agent based on RAG, enabling automatic retrieval of relevant traffic rules and guidelines based on the ego vehicle's status. Moreover, Cui et al. [12] incorporated a RAG-based memory module that continuously learned takeover preferences through human feedback to enhance motion planning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 671, + 540, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 671, + 540, + 730 + ], + "spans": [ + { + "bbox": [ + 305, + 671, + 540, + 730 + ], + "type": "text", + "content": "Despite its strong potential, research on directly utilizing RAG to guide VLMs in meta-action decision-making remains limited. To address this gap, we propose the RAD framework, which, for the first time, integrates RAG with pre-training for spatial perception capabilities, enabling" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 106, + 244, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 106, + 244, + 117 + ], + "spans": [ + { + "bbox": [ + 54, + 106, + 244, + 117 + ], + "type": "text", + "content": "more effective decision-making of meta-actions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 128, + 134, + 142 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 128, + 134, + 142 + ], + "spans": [ + { + "bbox": [ + 54, + 128, + 134, + 142 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 287, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 287, + 439 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 287, + 439 + ], + "type": "text", + "content": "As shown in Figure 1, the proposed RAD framework comprises four work flows: embedding flow, retrieving flow, fine-tuning flow and generating flow. Among these, the fine-tuning flow operates independently, as its primary objective is to enhance the spatial perception capabilities of VLMs through separate fine-tuning. In the embedding flow, BEV images are generated to correspond with front-view scene images from the NuScenes dataset. These image pairs are encoded into a vector space using a frozen BLIP-2 model and the separate embeddings are then concatenated and stored in a vector database. In the retrieving flow, a new front-view image and its corresponding BEV image serve as a query. These images are encoded into the vector space using the same frozen BLIP-2 model. Cosine similarity is then computed between the query images and those stored in the database, enabling the retrieval of the most similar scene from the database. Furthermore, based on the relative positional relationships between consecutive scenes in the NuScenes dataset, the ground truth meta-actions executed in each scene can be extracted. Finally, in the generating flow, the query scene, retrieved scene, its ground truth meta-action, and proper prompts serve as inputs to the VLMs. These inputs guide the model to make decisions and generate meta-actions, ensuring more accurate and context-aware autonomous driving behaviors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 65, + 439, + 271, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 439, + 271, + 449 + ], + "spans": [ + { + "bbox": [ + 65, + 439, + 271, + 449 + ], + "type": "text", + "content": "All the extracted meta-actions are shown as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 459, + 282, + 515 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 63, + 459, + 246, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 459, + 246, + 470 + ], + "spans": [ + { + "bbox": [ + 63, + 459, + 246, + 470 + ], + "type": "text", + "content": "(1) Speed up (rapidly) (2) Slow down (rapidly)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 63, + 470, + 249, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 470, + 249, + 481 + ], + "spans": [ + { + "bbox": [ + 63, + 470, + 249, + 481 + ], + "type": "text", + "content": "(3) Turn left/right (4) Drive along the curve" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 481, + 273, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 481, + 273, + 492 + ], + "spans": [ + { + "bbox": [ + 63, + 481, + 273, + 492 + ], + "type": "text", + "content": "(5) Turn around (6) Change lane to the left/right" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 492, + 275, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 492, + 275, + 503 + ], + "spans": [ + { + "bbox": [ + 63, + 492, + 275, + 503 + ], + "type": "text", + "content": "(7) Reverse (8) Shift slightly to the left/right" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 63, + 503, + 282, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 503, + 282, + 515 + ], + "spans": [ + { + "bbox": [ + 63, + 503, + 282, + 515 + ], + "type": "text", + "content": "(9) Stop (10) Go straight constantly/slowly" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 54, + 537, + 177, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 537, + 177, + 550 + ], + "spans": [ + { + "bbox": [ + 54, + 537, + 177, + 550 + ], + "type": "text", + "content": "3.1. The Fine-Tuning Flow" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 555, + 287, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 555, + 287, + 705 + ], + "spans": [ + { + "bbox": [ + 53, + 555, + 287, + 705 + ], + "type": "text", + "content": "Making precise meta-action decisions in autonomous driving requires an accurate understanding of the environment. If a model lacks sufficient spatial perception capabilities, it may fail to construct a reliable environmental representation, potentially leading to obstacle avoidance failures in meta-action decision-making. VLMs typically rely on monocular or surround-view camera inputs and estimate depth information from single-frame images. However, in long-trail scenarios, monocular vision exhibits significant depth estimation errors [5]. Experimental results on the NuScenes dataset indicate that existing VLMs generally lack robust spatial perception, which severely impacts the safety of decision-making and motion control [43]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "type": "text", + "content": "To address the aforementioned challenges, VLMs should first undergo fine-tuning to enhance their spatial perception" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 310, + 105, + 537, + 197 + ], + "blocks": [ + { + "bbox": [ + 310, + 105, + 537, + 197 + ], + "lines": [ + { + "bbox": [ + 310, + 105, + 537, + 197 + ], + "spans": [ + { + "bbox": [ + 310, + 105, + 537, + 197 + ], + "type": "image", + "image_path": "87a3dad6ccd70533953de17fe61bb786242837c776ed4147096cbd1507c1b284.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 206, + 538, + 227 + ], + "lines": [ + { + "bbox": [ + 305, + 206, + 538, + 227 + ], + "spans": [ + { + "bbox": [ + 305, + 206, + 538, + 227 + ], + "type": "text", + "content": "Figure 2. The process of generating a dataset for spatial perception enhancement based on the NuScenes dataset" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 248, + 539, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 248, + 539, + 318 + ], + "spans": [ + { + "bbox": [ + 304, + 248, + 539, + 318 + ], + "type": "text", + "content": "capabilities. The structure of VLMs typically consists of a vision encoder and an LLM. In this work, we focus on fine-tuning only the LLM component to enhance its spatial perception. We utilize the NuScenes dataset to generate a specified dataset for spatial perception enhancement, following the process illustrated in Figure 2." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 318, + 539, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 539, + 399 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 539, + 399 + ], + "type": "text", + "content": "During the image filtering process, it is necessary to ensure the uniqueness of the VQA pairs by cross-referencing the annotated data from the origin NuScenes dataset. The generated dataset for fine-tuning includes over 100,000 training samples, covering key spatial perception tasks such as object class recognition, object distance estimation and object size estimation." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 400, + 538, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 400, + 538, + 423 + ], + "spans": [ + { + "bbox": [ + 305, + 400, + 538, + 423 + ], + "type": "text", + "content": "For spatial perception enhancement fine-tuning, the loss function for a single sample is defined as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 432, + 539, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 432, + 539, + 510 + ], + "spans": [ + { + "bbox": [ + 314, + 432, + 539, + 510 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} J = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left[ \\lambda_ {1, i} \\left(- \\sum_ {c = 1} ^ {n} y _ {c, i} \\log \\left(p _ {c, i}\\right)\\right) \\right. \\\\ \\left. + \\lambda_ {2, i} \\left(- \\frac {1}{3} \\sum_ {j = 1} ^ {3} \\left(z _ {j, i} - z _ {j, i} ^ {*}\\right) ^ {2}\\right) + \\lambda_ {3, i} \\left(x _ {i} - x _ {i} ^ {*}\\right) ^ {2} \\right] \\tag {1} \\\\ \\end{array}", + "image_path": "c1ec6df0c98c8f6773b2670f9041739281f3f3f95f4fe3aa726afab718393c1b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "spans": [ + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the batch size during fine-tuning; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\lambda_{1,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the loss identifier for object class recognition in the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th sample (if there is a corresponding class, " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\lambda_{1,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " will be set to 1; and otherwise, " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\lambda_{1,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " will be set to 0); " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\lambda_{2,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the loss identifier for object size estimation; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\lambda_{3,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the loss identifier for object distance estimation; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the total number of classes in the classification task; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "y_{c,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the label for the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th sample belonging to class " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": ", represented by one-hot encoding; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "p_{c,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the probability of the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th sample being classified as class " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " by the model; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "z_{j,i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the output size of the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th sample in the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th dimension from the model; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "z_{j,i}^{*}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the ground truth size of the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th sample in the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th dimension; " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the model's output for the distance from the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th sample to the reference object; and " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "x_{i}^{*}" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": " is the ground truth distance from the " + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 519, + 539, + 693 + ], + "type": "text", + "content": "-th sample to the reference object." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 694, + 538, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 694, + 538, + 729 + ], + "spans": [ + { + "bbox": [ + 304, + 694, + 538, + 729 + ], + "type": "text", + "content": "In this work, we fine-tune a series of VLMs, primarily from the Qwen family [1, 2, 9], using low-rank adaptation (LoRA) [17, 51]. The overall training is conducted for" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 299, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 299, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 299, + 756 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 109, + 235, + 237 + ], + "blocks": [ + { + "bbox": [ + 58, + 109, + 235, + 237 + ], + "lines": [ + { + "bbox": [ + 58, + 109, + 235, + 237 + ], + "spans": [ + { + "bbox": [ + 58, + 109, + 235, + 237 + ], + "type": "image", + "image_path": "3b2c0252547851bd74c7c00b8d60c2fc30f5ecd67aa4851a41887284e269116f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 168, + 365, + 425, + 377 + ], + "lines": [ + { + "bbox": [ + 168, + 365, + 425, + 377 + ], + "spans": [ + { + "bbox": [ + 168, + 365, + 425, + 377 + ], + "type": "text", + "content": "Figure 3. The fine-tuning VQA paradigm for BEV image understanding" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 241, + 112, + 527, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 112, + 527, + 233 + ], + "spans": [ + { + "bbox": [ + 241, + 112, + 527, + 233 + ], + "type": "text", + "content": "System: This image illustrates the BEV view of a driving scene, showing the area of 60 meters ahead, 30 meters behind, 30 meters left and right of the ego vehicle. The units of longitudinal and lateral coordinates are meters. The ego vehicle is located at the center " + }, + { + "bbox": [ + 241, + 112, + 527, + 233 + ], + "type": "inline_equation", + "content": "[0,0]" + }, + { + "bbox": [ + 241, + 112, + 527, + 233 + ], + "type": "text", + "content": ", represented by a blue rectangle. The red rectangles represent the objects of vehicle type, including cars, trucks, etc. If there is an arrow on the red rectangle, it means that it will move in the direction of the arrow. The green dots represent pedestrians, and the green arrows also indicate the moving direction. Black dots are static obstacles, including roadblocks, traffic lights, etc." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 245, + 521, + 354 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 61, + 245, + 521, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 245, + 521, + 269 + ], + "spans": [ + { + "bbox": [ + 61, + 245, + 521, + 269 + ], + "type": "text", + "content": "Question 1: What kind of object (pedestrian, vehicle, or static obstacle) is located within the coordinate [7.6,8.9] in this image?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 269, + 337, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 269, + 337, + 281 + ], + "spans": [ + { + "bbox": [ + 61, + 269, + 337, + 281 + ], + "type": "text", + "content": "Answer 1: There is a vehicle located within the coordinate [7.6,8.9]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 281, + 520, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 281, + 520, + 305 + ], + "spans": [ + { + "bbox": [ + 61, + 281, + 520, + 305 + ], + "type": "text", + "content": "Question 2: What is the central position coordinate of the left-front static obstacle in this image? The result retains one decimal place after the decimal point." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 306, + 414, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 306, + 414, + 317 + ], + "spans": [ + { + "bbox": [ + 61, + 306, + 414, + 317 + ], + "type": "text", + "content": "Answer 2: The central position coordinate of the left-front static obstacle is [24.5,17.2]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 317, + 520, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 317, + 520, + 341 + ], + "spans": [ + { + "bbox": [ + 61, + 317, + 520, + 341 + ], + "type": "text", + "content": "Question 3: What is the distance from the left-front static obstacle to the left pedestrian in this image? The result retains one decimal place after the decimal point." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 342, + 419, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 342, + 419, + 354 + ], + "spans": [ + { + "bbox": [ + 61, + 342, + 419, + 354 + ], + "type": "text", + "content": "Answer 3: The distance from the left-front static obstacle to the left pedestrian is " + }, + { + "bbox": [ + 61, + 342, + 419, + 354 + ], + "type": "inline_equation", + "content": "16.3\\mathrm{m}" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 53, + 397, + 287, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 397, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 53, + 397, + 287, + 513 + ], + "type": "text", + "content": "three epochs. Additionally, following the BEVFormer [27], we generate BEV images from the existing surround-view images in the NuScenes dataset. Intuitively, incorporating BEV images helps the model better understand the relative spatial relationships of objects in driving scenes. Therefore, it is also necessary to train VLMs to recognize and interpret BEV images effectively. The fine-tuning paradigm, as illustrated in Figure 3, follows a similar approach to the VQA pair construction method based on ground truth information to develop a robust ability to understand BEV images." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 54, + 531, + 174, + 543 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 531, + 174, + 543 + ], + "spans": [ + { + "bbox": [ + 54, + 531, + 174, + 543 + ], + "type": "text", + "content": "3.2. The Embedding Flow" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 551, + 287, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 551, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 53, + 551, + 287, + 667 + ], + "type": "text", + "content": "In the embedding flow, we encode front-view images from the NuScenes dataset along with the pre-generated BEV images into a unified vector space. Since this embedding operation does not involve cross-modal content, the frozen BLIP-2 model weights can be directly utilized, ensuring computational efficiency and consistency. To maintain the one-to-one correspondence between front-view images and BEV images, their embedding vectors are concatenated within this flow. The resulting concatenated vectors are then uniformly stored in an indexed vector database." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 54, + 685, + 169, + 698 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 685, + 169, + 698 + ], + "spans": [ + { + "bbox": [ + 54, + 685, + 169, + 698 + ], + "type": "text", + "content": "3.3. The Retrieving Flow" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 705, + 288, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 705, + 288, + 729 + ], + "spans": [ + { + "bbox": [ + 53, + 705, + 288, + 729 + ], + "type": "text", + "content": "The core of the retrieving flow lies in the computation of cosine similarity. Given two image embeddings " + }, + { + "bbox": [ + 53, + 705, + 288, + 729 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_i" + }, + { + "bbox": [ + 53, + 705, + 288, + 729 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 53, + 705, + 288, + 729 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_j" + }, + { + "bbox": [ + 53, + 705, + 288, + 729 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 397, + 427, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 397, + 427, + 408 + ], + "spans": [ + { + "bbox": [ + 305, + 397, + 427, + 408 + ], + "type": "text", + "content": "cosine similarity is defined as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 367, + 413, + 539, + 438 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 413, + 539, + 438 + ], + "spans": [ + { + "bbox": [ + 367, + 413, + 539, + 438 + ], + "type": "interline_equation", + "content": "\\text {s i m i l a r i t y} _ {i, j} = \\frac {\\mathbf {v} _ {i} \\cdot \\mathbf {v} _ {j}}{\\| \\mathbf {v} _ {i} \\| \\| \\mathbf {v} _ {j} \\|} \\tag {2}", + "image_path": "09ed85780c8f75fb64c113fce7b65222368cbd404bd18f08b41e2bddf09c45b4.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 443, + 478, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 443, + 478, + 454 + ], + "spans": [ + { + "bbox": [ + 305, + 443, + 478, + 454 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 305, + 443, + 478, + 454 + ], + "type": "inline_equation", + "content": "\\| *\\|" + }, + { + "bbox": [ + 305, + 443, + 478, + 454 + ], + "type": "text", + "content": " represents the Euclidean norm." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "spans": [ + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "text", + "content": "The main framework of the retrieving flow is illustrated in Figure 4. For a new scene, we first generate its BEV images from the surround-view images. The front-view image and BEV image of the new scene jointly trigger a query scene. The embeddings for the new front-view image and BEV image are then extracted using the frozen BLIP-2 model. Since the vector database stores concatenated embedding vectors, the embeddings for the front-view image and BEV image are retrieved through length decomposition. The cosine similarity between the new front-view image embeddings and those stored in the database is computed and denoted as " + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "inline_equation", + "content": "similarity_{fv}" + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "text", + "content": ". Similarly, the cosine similarity between the new BEV image embeddings and those stored in the database is computed and denoted as " + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "inline_equation", + "content": "similarity_{bev}" + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "text", + "content": ". To flexibly adjust the retrieval preference toward either the front-view image or the BEV image, a hyperparameter " + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "text", + "content": " is introduced. In this work, " + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 305, + 455, + 539, + 675 + ], + "type": "text", + "content": " is set to 0.5 as a balanced weight for retrieval. The overall similarity could be calculated as follows:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 682, + 539, + 705 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 682, + 539, + 705 + ], + "spans": [ + { + "bbox": [ + 305, + 682, + 539, + 705 + ], + "type": "interline_equation", + "content": "\\text {s i m i l a r i t y} = (1 - \\omega) \\cdot \\text {s i m i l a r i t y} _ {f v} + \\omega \\cdot \\text {s i m i l a r i t y} _ {\\text {b e v}} \\tag {3}", + "image_path": "655a4581731b9427a0d6c399e2749e4943164e08be7faa3883bbde4bfe157228.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 706, + 539, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 706, + 539, + 729 + ], + "spans": [ + { + "bbox": [ + 305, + 706, + 539, + 729 + ], + "type": "text", + "content": "The scene with the highest overall similarity is then retrieved from the vector database. Using its index, we can" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 105, + 287, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 105, + 287, + 129 + ], + "spans": [ + { + "bbox": [ + 54, + 105, + 287, + 129 + ], + "type": "text", + "content": "obtain the corresponding front-view image, BEV image, and pre-extracted ground truth meta-action." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 70, + 144, + 279, + 354 + ], + "blocks": [ + { + "bbox": [ + 70, + 144, + 279, + 354 + ], + "lines": [ + { + "bbox": [ + 70, + 144, + 279, + 354 + ], + "spans": [ + { + "bbox": [ + 70, + 144, + 279, + 354 + ], + "type": "image", + "image_path": "f7ade894530b154e1b79cdbf72256b8bcb4124794ba45c0c288a410898e29806.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 366, + 264, + 378 + ], + "lines": [ + { + "bbox": [ + 77, + 366, + 264, + 378 + ], + "spans": [ + { + "bbox": [ + 77, + 366, + 264, + 378 + ], + "type": "text", + "content": "Figure 4. The main framework of the retrieving flow" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 400, + 173, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 400, + 173, + 412 + ], + "spans": [ + { + "bbox": [ + 54, + 400, + 173, + 412 + ], + "type": "text", + "content": "3.4. The Generating Flow" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 417, + 287, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 417, + 287, + 476 + ], + "spans": [ + { + "bbox": [ + 53, + 417, + 287, + 476 + ], + "type": "text", + "content": "In the generating flow, we primarily employ prompt engineering to guide VLMs in reasoning based on the retrieved scene and its corresponding meta-action, enabling them to make accurate meta-action decisions for the new scene. The prompts should be divided into two key components:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 478, + 287, + 525 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 54, + 478, + 287, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 478, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 54, + 478, + 287, + 502 + ], + "type": "text", + "content": "- System Prompt: Guide VLMs to make meta-action decisions based on the provided images." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 502, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 502, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 54, + 502, + 287, + 525 + ], + "type": "text", + "content": "- RAG-Specific Prompt: Instruct VLMs to understand the retrieved scene images and corresponding meta-actions." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 53, + 528, + 287, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 528, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 53, + 528, + 287, + 599 + ], + "type": "text", + "content": "For this process, we primarily use the Qwen series of VLMs, as they support multiple image inputs, making prompt design more flexible and effective. With structured and well-designed prompts, the VLMs analyze the front-view image and BEV image of the current scene, ultimately generating a single meta-action as the final output." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 54, + 610, + 133, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 610, + 133, + 623 + ], + "spans": [ + { + "bbox": [ + 54, + 610, + 133, + 623 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 54, + 630, + 168, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 630, + 168, + 643 + ], + "spans": [ + { + "bbox": [ + 54, + 630, + 168, + 643 + ], + "type": "text", + "content": "4.1. Dataset Preparation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 647, + 287, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 647, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 53, + 647, + 287, + 729 + ], + "type": "text", + "content": "We divide the 34,000 scenes from the NuScenes dataset into three subsets: 10,000 scenes are allocated for fine-tuning VLMs, focusing on enhancing spatial perception and BEV image understanding; 20,000 scenes are embedded in the vector database as prior information; and the remaining 4,000 scenes serve as the test set, used to evaluate the framework's effectiveness and the model's overall performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 105, + 414, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 105, + 414, + 116 + ], + "spans": [ + { + "bbox": [ + 305, + 105, + 414, + 116 + ], + "type": "text", + "content": "4.2. Evaluation Metrics" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 122, + 539, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 122, + 539, + 191 + ], + "spans": [ + { + "bbox": [ + 305, + 122, + 539, + 191 + ], + "type": "text", + "content": "To assess the performance, we employ traditional classification metrics such as accuracy, precision, recall and F1 score. Additionally, we introduce a customized partial match score to account for semantically similar but not entirely identical cases. Finally, we utilize a weighted method to compute a comprehensive performance score." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 192, + 539, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 192, + 539, + 227 + ], + "spans": [ + { + "bbox": [ + 305, + 192, + 539, + 227 + ], + "type": "text", + "content": "We firstly adopt ExactMatchAccuracy to evaluate whether the model provides a fully correct meta-action for a given scene, which is formally defined as follows:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 350, + 234, + 539, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 234, + 539, + 259 + ], + "spans": [ + { + "bbox": [ + 350, + 234, + 539, + 259 + ], + "type": "interline_equation", + "content": "E x a c t M a t c h A c c u r a c y = \\frac {N _ {m a t c h}}{N _ {t o t a l}} \\tag {4}", + "image_path": "3f61232938f867f2cdbad0347d3059b08c7fd819b074780e2d9aa04a664b586f.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 264, + 539, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 264, + 539, + 298 + ], + "spans": [ + { + "bbox": [ + 305, + 264, + 539, + 298 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 305, + 264, + 539, + 298 + ], + "type": "inline_equation", + "content": "N_{match}" + }, + { + "bbox": [ + 305, + 264, + 539, + 298 + ], + "type": "text", + "content": " is the number of scenes where the generated meta-actions exactly match the ground truth; and " + }, + { + "bbox": [ + 305, + 264, + 539, + 298 + ], + "type": "inline_equation", + "content": "N_{total}" + }, + { + "bbox": [ + 305, + 264, + 539, + 298 + ], + "type": "text", + "content": " is the total number of scenes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 299, + 539, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 299, + 539, + 322 + ], + "spans": [ + { + "bbox": [ + 305, + 299, + 539, + 322 + ], + "type": "text", + "content": "For each meta-action, Precision, Recall, and " + }, + { + "bbox": [ + 305, + 299, + 539, + 322 + ], + "type": "inline_equation", + "content": "F1" + }, + { + "bbox": [ + 305, + 299, + 539, + 322 + ], + "type": "text", + "content": " can be used as evaluation metrics, which are defined as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 367, + 328, + 539, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 328, + 539, + 354 + ], + "spans": [ + { + "bbox": [ + 367, + 328, + 539, + 354 + ], + "type": "interline_equation", + "content": "P r e c i s i o n _ {i} = \\frac {T P _ {i}}{T P _ {i} + F P _ {i}} \\tag {5}", + "image_path": "8cb4d2ecd54a11f70060695e1d49571e4243a7a6e24b887c775f9f87609978cc.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 374, + 359, + 539, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 359, + 539, + 384 + ], + "spans": [ + { + "bbox": [ + 374, + 359, + 539, + 384 + ], + "type": "interline_equation", + "content": "\\operatorname {R e c a l l} _ {i} = \\frac {T P _ {i}}{T P _ {i} + F N _ {i}} \\tag {6}", + "image_path": "da248ba9dcd1bc9f1704559853a126ef1485a2dc0f5950b16e31408a69604135.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 354, + 387, + 538, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 387, + 538, + 412 + ], + "spans": [ + { + "bbox": [ + 354, + 387, + 538, + 412 + ], + "type": "interline_equation", + "content": "F 1 _ {i} = \\frac {2 \\times \\text {P r e c i s i o n} _ {i} \\times \\text {R e c a l l} _ {i}}{\\text {P r e c i s i o n} _ {i} + \\text {R e c a l l} _ {i}} \\tag {7}", + "image_path": "81b2f018b737780464db4e962c3972beec1e545f54eb1e5a1dbc58090d016e89.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "spans": [ + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "TP_{i}" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": " is the true positives, and the number of scenes where the generated meta-actions are " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": " and the ground truth are also " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "FP_{i}" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": " is the false positives, and the number of scenes where the generated meta-actions are " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": " but the ground truth are not " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "FN_{i}" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": " is the false negatives, and the number of scenes where the generated meta-actions are not " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": " but the ground truth are " + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 414, + 539, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "spans": [ + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "content": "To evaluate the overall performance across different meta-actions in the test set, Macro " + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "inline_equation", + "content": "-F1" + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "content": " and Weighted " + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "inline_equation", + "content": "-F1" + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "content": " scores are introduced. Macro " + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "inline_equation", + "content": "-F1" + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "content": " is the unweighted average of " + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "inline_equation", + "content": "F1" + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "content": " scores across all meta-actions, while Weighted " + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "inline_equation", + "content": "-F1" + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "content": " is the weighted average of " + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "inline_equation", + "content": "F1" + }, + { + "bbox": [ + 305, + 496, + 539, + 565 + ], + "type": "text", + "content": " scores, which are defined as:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 365, + 572, + 538, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 572, + 538, + 604 + ], + "spans": [ + { + "bbox": [ + 365, + 572, + 538, + 604 + ], + "type": "interline_equation", + "content": "M a c r o - F 1 = \\frac {1}{K} \\sum_ {i = 1} ^ {K} F 1 _ {i} \\tag {8}", + "image_path": "4dac6ad10347d403506bfc914ce4ef973aeec42a36f10d613b8d99977c95bf36.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 346, + 612, + 538, + 644 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 612, + 538, + 644 + ], + "spans": [ + { + "bbox": [ + 346, + 612, + 538, + 644 + ], + "type": "interline_equation", + "content": "W e i g h t e d - F 1 = \\frac {1}{N _ {\\text {t o t a l}}} \\sum_ {i = 1} ^ {K} n _ {i} F 1 _ {i} \\tag {9}", + "image_path": "2d8d2883b76d7cd196db676b2df2edcba3e32a7592692122cf8ba7c15aeac6ab.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "spans": [ + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "text", + "content": " represents the total number of meta-actions, which is set to 15; and " + }, + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "text", + "content": " represents the number of scenes where the ground truth meta-action is " + }, + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 647, + 539, + 682 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 682, + 539, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 682, + 539, + 729 + ], + "spans": [ + { + "bbox": [ + 305, + 682, + 539, + 729 + ], + "type": "text", + "content": "To account for the semantic similarity between certain meta-actions, we introduce a PartialMatchScore. Specifically, meta-actions involving leftward maneuvers—such as turn left, change lane to the left and shift" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 299, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 299, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 299, + 756 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 54, + 123, + 538, + 234 + ], + "blocks": [ + { + "bbox": [ + 173, + 104, + 420, + 116 + ], + "lines": [ + { + "bbox": [ + 173, + 104, + 420, + 116 + ], + "spans": [ + { + "bbox": [ + 173, + 104, + 420, + 116 + ], + "type": "text", + "content": "Table 1. Comparison among different baselines and our RAD method" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 54, + 123, + 538, + 234 + ], + "lines": [ + { + "bbox": [ + 54, + 123, + 538, + 234 + ], + "spans": [ + { + "bbox": [ + 54, + 123, + 538, + 234 + ], + "type": "table", + "html": "
MethodExact Match AccuracyMacro-F1Weighted-F1Partial Match ScoreOverall Score
Lynx (Fine-tuning)[48]0.15240.01670.06530.27680.1327
CogVLM (Fine-tuning)[40]0.21780.02040.11050.35630.1846
DriveLM (on LLaMA-LoRA-BIAS-7B)[36]0.14550.04480.12030.30280.1518
DriveLM (on LLaMA-BIAS-7B)[36]0.18960.04090.12120.34250.1693
DriveLM (on LLaMA-CAPTION-7B)[36]0.20340.03800.10800.39520.1896
GPT-4o (Official API)[21]0.29940.11270.22880.43770.2756
DriveVLM[37]0.37430.16710.33250.54620.3589
DriveVLM-Dual (cooperating with VAD[22])[37]0.40160.18540.35060.56130.3801
RAD (Ours, on Qwen-VL-2.5-7B)0.40960.19070.38130.58700.3956
", + "image_path": "7fa8a5818551c36b52d6e493c15ef043dab065c73dc51516b5d7e6dfe7997138.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 253, + 287, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 253, + 287, + 416 + ], + "spans": [ + { + "bbox": [ + 53, + 253, + 287, + 416 + ], + "type": "text", + "content": "slightly to the left--are classified under the left group, while analogous rightward actions form the right group. Similarly, meta-actions indicating forward motion at varying speeds are categorized accordingly, with go straight slowly, slow down, and slow down rapidly mapping to the deceleration group, while both speed up and speed up rapidly mapping to acceleration group. Furthermore, unique behaviors such as go straight constantly, turn around, reverse, stop, and drive along the curve are collectively assigned to a separate unique group. If the generated meta-actions and the ground truth meta-actions are not identical but belong to the same semantic group (excluding the unique group), they are considered partially matched. Thus, the semantic similarity " + }, + { + "bbox": [ + 53, + 253, + 287, + 416 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 53, + 253, + 287, + 416 + ], + "type": "text", + "content": " is defined as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 423, + 286, + 465 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 423, + 286, + 465 + ], + "spans": [ + { + "bbox": [ + 76, + 423, + 286, + 465 + ], + "type": "interline_equation", + "content": "S (i, \\hat {i}) = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\hat {i} \\text {i s t h e s a m e a s} i. \\\\ 0. 5, & \\text {i f} \\hat {i} \\text {p a r t i a l l y m a t c h e s} i. \\\\ 0, & \\text {i f} \\hat {i} \\text {t o t a l l y d i f f e r s f r o m} i. \\end{array} \\right. \\tag {10}", + "image_path": "29645f7b3daab9c7ce3cbb55863b61cbca30cc4276ffb7a25bf80e85d19ae6e5.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 473, + 287, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 473, + 287, + 497 + ], + "spans": [ + { + "bbox": [ + 53, + 473, + 287, + 497 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 53, + 473, + 287, + 497 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 53, + 473, + 287, + 497 + ], + "type": "text", + "content": " is the ground truth meta-action in one scene; and " + }, + { + "bbox": [ + 53, + 473, + 287, + 497 + ], + "type": "inline_equation", + "content": "\\hat{i}" + }, + { + "bbox": [ + 53, + 473, + 287, + 497 + ], + "type": "text", + "content": " is the generated meta-action." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 497, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 497, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 53, + 497, + 287, + 521 + ], + "type": "text", + "content": "Then, the average PartialMatchScore is obtained by averaging across all scenes:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 527, + 287, + 560 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 527, + 287, + 560 + ], + "spans": [ + { + "bbox": [ + 69, + 527, + 287, + 560 + ], + "type": "interline_equation", + "content": "\\text {P a r t i a l M a t c h S c o r e} = \\frac {1}{N _ {\\text {t o t a l}}} \\sum_ {k = 1} ^ {N _ {\\text {t o t a l}}} S \\left(i _ {k}, \\hat {i _ {k}}\\right) \\tag {11}", + "image_path": "eb21cd003827e303ea11e9e88d96e90cd5e7ce759836936848ef07153bba86a5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 565, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 565, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 53, + 565, + 287, + 589 + ], + "type": "text", + "content": "Finally, different weights are assigned to each metric to derive the comprehensive scoring formula OverallScore:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 79, + 596, + 286, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 596, + 286, + 651 + ], + "spans": [ + { + "bbox": [ + 79, + 596, + 286, + 651 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} O v e r a l l S c o r e = \\alpha \\cdot E x a c t M a t c h A c c u r a c y \\\\ + \\beta \\cdot M a c r o - F 1 \\\\ + \\gamma \\cdot W e i g h t e d - F 1 \\\\ + \\delta \\cdot \\text {P a r t i a l M a t c h S c o r e} \\tag {12} \\\\ \\end{array}", + "image_path": "a04a2037e80fd1c1c41cccc3fec74d73065625beb5a8c4249407686e02debda5.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "spans": [ + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "text", + "content": " is set to 0.4; " + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 53, + 659, + 287, + 683 + ], + "type": "text", + "content": " are all set to 0.2, which could be adjusted according to specific tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 54, + 688, + 197, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 688, + 197, + 702 + ], + "spans": [ + { + "bbox": [ + 54, + 688, + 197, + 702 + ], + "type": "text", + "content": "4.3. Comparative Experiments" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 53, + 705, + 287, + 729 + ], + "type": "text", + "content": "We evaluate the performance of our proposed RAD framework on Qwen-VL-2.5-7B VLM and compare it against" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 253, + 539, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 253, + 539, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 253, + 539, + 380 + ], + "type": "text", + "content": "several other state-to-the-art baseline methods: Lynx [48], CogVLM [40], DriveLM [36], GPT-4o [21] and DriveVLM [37]. Table 1 presents a thorough quantitative comparison between our proposed RAD and these baselines across multiple evaluation criteria. Our RAD consistently outperforms all baseline methods, demonstrating clear advantages in meta-action decision-making for autonomous driving. In particular, RAD achieves an ExactMatchAccuracy of 0.4096, substantially outperforming DriveVLM-Dual's 0.4016, and attains an OverallScore of 0.3956 compared to DriveVLM-Dual's 0.3801." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 384, + 539, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 539, + 535 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 539, + 535 + ], + "type": "text", + "content": "A deeper analysis of the remaining metrics further underscores RAD's strengths. Macro - F1, a balanced measure of model performance across all classes, achieves 0.1907, well above DriveVLM-Dual's 01854. Meanwhile, Weighted - F1 of 0.3813 indicates its effectiveness in scenarios where class imbalances exist, significantly outperforming all baselines and reflecting RAD's notable capabilities to handle diverse datasets. Also, PartialMatchScore of 0.5870 also highlights RAD's fine-grained generative capability, which suggests that RAD not only excels at producing entirely correct answers, but also consistently captures partially correct information, an essential trait for more nuanced or multi-faceted decision-making tasks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 539, + 539, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 539, + 539, + 667 + ], + "spans": [ + { + "bbox": [ + 304, + 539, + 539, + 667 + ], + "type": "text", + "content": "The poor performance of the baseline methods is mainly due to their lack of task-specific training. As a result, these models exhibit limited spatial perception capabilities and poor BEV image comprehension. Additionally, the parameter size constraints and version limitations of the base models used in these baselines hinder their ability to achieve optimal results. However, RAD's superior performance over GPT-4o across all metrics demonstrates the feasibility of specialized VLMs with smaller parameter sizes that rival or even surpass large-scale general-purpose models in complex and domain-specific tasks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 671, + 539, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 671, + 539, + 729 + ], + "spans": [ + { + "bbox": [ + 304, + 671, + 539, + 729 + ], + "type": "text", + "content": "In summary, the results in Table 1 validate the efficacy and robustness of our RAD model. Through a combination of architectural innovations and targeted training strategies, RAD not only achieves profound performance across multiple metrics but also provides insights into how specialized" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 299, + 755 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 299, + 755 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 299, + 755 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 55, + 123, + 539, + 310 + ], + "blocks": [ + { + "bbox": [ + 180, + 104, + 412, + 116 + ], + "lines": [ + { + "bbox": [ + 180, + 104, + 412, + 116 + ], + "spans": [ + { + "bbox": [ + 180, + 104, + 412, + 116 + ], + "type": "text", + "content": "Table 2. Ablation studies on fine-tuning VLMs and RAG pipeline" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 123, + 539, + 310 + ], + "lines": [ + { + "bbox": [ + 55, + 123, + 539, + 310 + ], + "spans": [ + { + "bbox": [ + 55, + 123, + 539, + 310 + ], + "type": "table", + "html": "
VLMsMethodExact Match AccuracyMacro-F1Weighted-F1Partial Matching ScoreOverall Score
Qwen-VL-2-2B[9]Vanilla0.21880.03580.10130.43530.2020
Vanilla + RAG0.21450.10490.22780.43190.2387
Fine-tuning0.15430.05280.11940.30170.1565
Fine-tuning + RAG0.26100.13020.25560.45380.2723
Qwen-VL-2-7B[9]Vanilla0.28660.06540.17210.49410.2609
Vanilla + RAG0.34040.14600.32350.54240.3385
Fine-tuning0.29080.07170.19860.45620.2616
Fine-tuning + RAG0.34460.14600.30110.52130.3315
Qwen-VL-2.5-3B[2]Vanilla0.13180.03660.09550.38860.1568
Vanilla + RAG0.12400.02980.08140.38660.1491
Fine-tuning0.21640.05310.13980.39490.2041
Fine-tuning + RAG0.25390.10750.20900.45200.2552
Qwen-VL-2.5-7B[2]Vanilla0.28490.06440.17150.48930.2590
Vanilla + RAG0.35810.19810.33860.55440.3615
Fine-tuning0.34820.10850.28850.53600.3259
Fine-tuning + RAG0.40960.19070.38130.58700.3956
", + "image_path": "e3f388afce5664fe8f861a256039ed2136bb7f8572ca00a2dcbebc387c54e999.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 329, + 270, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 329, + 270, + 341 + ], + "spans": [ + { + "bbox": [ + 54, + 329, + 270, + 341 + ], + "type": "text", + "content": "VLMs can excel in intricate autonomous driving tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 349, + 153, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 349, + 153, + 361 + ], + "spans": [ + { + "bbox": [ + 54, + 349, + 153, + 361 + ], + "type": "text", + "content": "4.4. Ablation Studies" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 367, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 367, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 53, + 367, + 287, + 460 + ], + "type": "text", + "content": "In our ablation studies, we mainly investigate the impacts of fine-tuning VLMs and RAG pipeline for spatial perception enhancement based on Qwen-VL-2-2B [9], Qwen-VL-2-7B [9], Qwen-VL-2.5-3B [2] and Qwen-VL-2.5-7B [2] models. The performance of VLMs is evaluated using four distinct methods: vanilla (no fine-tuning), vanilla combined with RAG, only fine-tuning, and fine-tuning combined with RAG (our proposed RAD method)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 461, + 287, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 461, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 53, + 461, + 287, + 612 + ], + "type": "text", + "content": "The results presented in Table 2 indicate that the combination of fine-tuning and RAG consistently achieves the highest scores across all evaluation metrics, including ExactMatchAccuracy, Macro - F1, Weighted - F1, PartialMatchScore, and OverallScore, for all model variants. Specifically, for Qwen-VL-2.5-7B, our RAD method achieves the highest OverallScore of 0.3956, marking a significant improvement over methods that deploy either fine-tuning or RAG separately. Furthermore, the incorporation of RAG consistently enhances performance for both vanilla and fine-tuned settings across most model scales, validating the effectiveness of retrieval-augmented strategies in improving model performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 613, + 287, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 613, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 53, + 613, + 287, + 729 + ], + "type": "text", + "content": "Notably, for smaller models such as Qwen-VL-2-2B and Qwen-VL-2.5-3B, employing only fine-tuning leads to performance degradation, suggesting that their limited parameter sizes hinder effective learning of domain-specific knowledge through fine-tuning alone. Additionally, for Qwen-VL-2.5-3B model, using RAG without fine-tuning results in a performance drop, likely due to the unique pre-training characteristics of this model. Overall, while fine-tuning or RAG independently can enhance performance in larger-scale models, the best results are consistently achieved by" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 329, + 539, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 539, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 539, + 434 + ], + "type": "text", + "content": "combining these two strategies, underscoring the importance of an integrated approach to maximize VLM effectiveness. From a practical perspective, the combination of fine-tuning and RAG proves particularly suitable for enhancing decision-making capabilities in VLMs. Deploying this optimal configuration can substantially improve VLM performance, with potential applications extending to semantic comprehension, trajectory planning, and other complex autonomous driving tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 444, + 377, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 444, + 377, + 456 + ], + "spans": [ + { + "bbox": [ + 305, + 444, + 377, + 456 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 464, + 539, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 464, + 539, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 464, + 539, + 568 + ], + "type": "text", + "content": "In this work, we propose a RAD framework, a novel retrieval-augmented architecture designed to enhance the meta-action decision-making capabilities of VLMs for autonomous driving. Through the integration of fine-tuning VLMs for spatial perception enhancement and BEV image comprehension, RAD effectively enhances VLMs' capability of meta-action decision-making, ensuring higher accuracy, as demonstrated by notable performance gains across key metrics in extensive experimental evaluations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 569, + 540, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 540, + 729 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 540, + 729 + ], + "type": "text", + "content": "Moving forward, we aim to extend RAD in three key directions. First, we plan to incorporate more diverse and fine-grained datasets beyond the NuScenes dataset, encompassing more challenging corner cases and real-world scenarios, to further enhance model robustness. Second, we seek to generalize the RAD framework to additional driving tasks, especially trajectory planning and motion control. Third, integrating chain-of-thought and reinforcement learning into the framework will be crucial for improving decision-making depth and adaptability. While fine-tuning and RAG will remain essential for enhancing VLM generalization, these advancements will strengthen the robustness and reliability of autonomous driving systems by leveraging RAG methods to tackle complex real-world tasks." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 105, + 111, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 105, + 111, + 116 + ], + "spans": [ + { + "bbox": [ + 55, + 105, + 111, + 116 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 124, + 287, + 729 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 60, + 124, + 287, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 124, + 287, + 252 + ], + "spans": [ + { + "bbox": [ + 60, + 124, + 287, + 252 + ], + "type": "text", + "content": "[1] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou, and Tianhang Zhu. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 60, + 253, + 287, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 253, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 60, + 253, + 287, + 337 + ], + "type": "text", + "content": "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-v1 technical report. arXiv preprint arXiv:2502.13923, 2025. 4, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 338, + 287, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 338, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 60, + 338, + 287, + 403 + ], + "type": "text", + "content": "[3] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. Nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11621-11631, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 404, + 287, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 404, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 60, + 404, + 287, + 456 + ], + "type": "text", + "content": "[4] Tianhui Cai, Yifan Liu, Zewei Zhou, Haoxuan Ma, Seth Z Zhao, Zhiwen Wu, and Jiaqi Ma. Driving with regulation: Interpretable decision-making for autonomous vehicles with retrieval-augmented reasoning via llm. arXiv preprint arXiv:2410.04759, 2024. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 458, + 287, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 458, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 60, + 458, + 287, + 521 + ], + "type": "text", + "content": "[5] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 523, + 287, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 523, + 287, + 587 + ], + "spans": [ + { + "bbox": [ + 60, + 523, + 287, + 587 + ], + "type": "text", + "content": "[6] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. In Proceedings of the IEEE International Conference on Robotics and Automation, pages 14093-14100, 2024. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 60, + 588, + 287, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 588, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 60, + 588, + 287, + 629 + ], + "type": "text", + "content": "[7] Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. End-to-end autonomous driving: Challenges and frontiers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 60, + 631, + 287, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 631, + 287, + 673 + ], + "spans": [ + { + "bbox": [ + 60, + 631, + 287, + 673 + ], + "type": "text", + "content": "[8] Pranav Singh Chib and Pravendra Singh. Recent advancements in end-to-end autonomous driving using deep learning: A survey. IEEE Transactions on Intelligent Vehicles, 9 (1):103-118, 2023. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 60, + 675, + 287, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 675, + 287, + 717 + ], + "spans": [ + { + "bbox": [ + 60, + 675, + 287, + 717 + ], + "type": "text", + "content": "[9] Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024. 4, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 718, + 287, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 718, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 55, + 718, + 287, + 729 + ], + "type": "text", + "content": "[10] Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, and Ziran" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 107, + 538, + 729 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 326, + 107, + 538, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 107, + 538, + 148 + ], + "spans": [ + { + "bbox": [ + 326, + 107, + 538, + 148 + ], + "type": "text", + "content": "Wang. Receive, reason, and react: Drive as you say, with large language models in autonomous vehicles. IEEE Intelligent Transportation Systems Magazine, 16(4):81-94, 2024. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 150, + 538, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 150, + 538, + 213 + ], + "spans": [ + { + "bbox": [ + 307, + 150, + 538, + 213 + ], + "type": "text", + "content": "[11] Can Cui, Yunsheng Ma, Zichong Yang, Yupeng Zhou, Peiran Liu, Juanwu Lu, Lingxi Li, Yaobin Chen, Jitesh H. Panchal, Amr Abdelraouf, Rohit Gupta, Kyungtae Han, and Ziran Wang. Large language models for autonomous driving (llm4ad): Concept, benchmark, experiments, and challenges. arXiv preprint arXiv:2410.15281, 2024. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 214, + 538, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 214, + 538, + 288 + ], + "spans": [ + { + "bbox": [ + 307, + 214, + 538, + 288 + ], + "type": "text", + "content": "[12] Can Cui, Zichong Yang, Yupeng Zhou, Juntong Peng, Sung-Yeon Park, Cong Zhang, Yunsheng Ma, Xu Cao, Wenqian Ye, Yiheng Feng, Jitesh H. Panchal, Lingxi Li, Yaobin Chen, and Ziran Wang. On-board vision-language models for personalized autonomous vehicle motion control: System design and real-world validation. arXiv preprint arXiv:2411.11913, 2024. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 289, + 538, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 289, + 538, + 342 + ], + "spans": [ + { + "bbox": [ + 308, + 289, + 538, + 342 + ], + "type": "text", + "content": "[13] Daocheng Fu, Xin Li, Licheng Wen, Min Dou, Pinlong Cai, Botian Shi, and Yu Qiao. Drive like a human: Rethinking autonomous driving with large language models. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 910-919. IEEE, 2024. 1, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 343, + 538, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 343, + 538, + 386 + ], + "spans": [ + { + "bbox": [ + 308, + 343, + 538, + 386 + ], + "type": "text", + "content": "[14] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997, 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 386, + 538, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 386, + 538, + 428 + ], + "spans": [ + { + "bbox": [ + 308, + 386, + 538, + 428 + ], + "type": "text", + "content": "[15] Sorin Grigorescu, Bogdan Trasnea, Tiberiu Cocias, and Gigel Macesanu. A survey of deep learning techniques for autonomous driving. Journal of Field Robotics, 37(3):362-386, 2020. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 428, + 538, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 428, + 538, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 428, + 538, + 492 + ], + "type": "text", + "content": "[16] Xu Han, Zonglin Meng, Xin Xia, Xishun Liao, Brian Yueshuai He, Zhaoliang Zheng, Yutong Wang, Hao Xiang, Zewei Zhou, Letian Gao, Lili Fan, Yuke Li, and Jiaqi Ma. Foundation intelligence for smart infrastructure services in transportation 5.0. IEEE Transactions on Intelligent Vehicles, 9(1):39-47, 2024. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 493, + 538, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 538, + 545 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 538, + 545 + ], + "type": "text", + "content": "[17] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In Proceedings of the International Conference on Learning Representations, 2022. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 546, + 538, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 546, + 538, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 546, + 538, + 600 + ], + "type": "text", + "content": "[18] Yihan Hu, Jiazhi Yang, Li Chen, Keyu Li, Chonghao Sima, Xizhou Zhu, Siqi Chai, Senyao Du, Tianwei Lin, Wenhai Wang, et al. Planning-oriented autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17853-17862, 2023. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 600, + 538, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 600, + 538, + 654 + ], + "spans": [ + { + "bbox": [ + 308, + 600, + 538, + 654 + ], + "type": "text", + "content": "[19] Yidong Huang, Jacob Sansom, Ziqiao Ma, Felix Gervits, and Joyce Chai. Drivlme: Enhancing llm-based autonomous driving agents with embodied and social experiences. In Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3153-3160. IEEE, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 664, + 538, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 664, + 538, + 729 + ], + "spans": [ + { + "bbox": [ + 307, + 664, + 538, + 729 + ], + "type": "text", + "content": "[20] Mohamed Manzour Hussien, Angie Nataly Melo, Augusto Luis Ballardini, Carlota Salinas Maldonado, Ruben Izquierdo, and Miguel Angel Sotelo. Rag-based explainable prediction of road users behaviors for automated driving using knowledge graphs and large language models. Expert Systems with Applications, 265:125914, 2025. 2, 3" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "spans": [ + { + "bbox": [ + 293, + 747, + 300, + 756 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 106, + 287, + 729 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 55, + 106, + 287, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 106, + 287, + 138 + ], + "spans": [ + { + "bbox": [ + 55, + 106, + 287, + 138 + ], + "type": "text", + "content": "[21] Raisa Islam and Owana Marzia Moushi. Gpt-4o: The cutting-edge advancement in multimodal llm. Authorea Preprints, 2024. 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 139, + 287, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 139, + 287, + 203 + ], + "spans": [ + { + "bbox": [ + 55, + 139, + 287, + 203 + ], + "type": "text", + "content": "[22] Bo Jiang, Shaoyu Chen, Qing Xu, Bencheng Liao, Jiajie Chen, Helong Zhou, Qian Zhang, Wenyu Liu, Chang Huang, and Xinggang Wang. Vad: Vectorized scene representation for efficient autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8340-8350, 2023. 1, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 204, + 287, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 204, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 55, + 204, + 287, + 256 + ], + "type": "text", + "content": "[23] Bo Jiang, Shaoyu Chen, Benchcheng Liao, Xingyu Zhang, Wei Yin, Qian Zhang, Chang Huang, Wenyu Liu, and Xinggang Wang. Senna: Bridging large vision-language models and end-to-end autonomous driving. arXiv preprint arXiv:2410.22313, 2024. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 257, + 287, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 257, + 287, + 311 + ], + "spans": [ + { + "bbox": [ + 55, + 257, + 287, + 311 + ], + "type": "text", + "content": "[24] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. Active retrieval augmented generation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 7969-7992, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 312, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 287, + 364 + ], + "type": "text", + "content": "[25] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning, pages 19730-19742, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 364, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 364, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 55, + 364, + 287, + 396 + ], + "type": "text", + "content": "[26] Yixuan Li, Xuesong Wang, Tianyi Wang, and Qian Liu. Characteristics analysis of autonomous vehicle pre-crash scenarios. arXiv preprint arXiv:2502.20789, 2025. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 396, + 287, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 396, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 55, + 396, + 287, + 460 + ], + "type": "text", + "content": "[27] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Qiao Yu, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from lidar-camera via spatiotemporal transformers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 47(3):2020-2036, 2025. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 461, + 287, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 461, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 55, + 461, + 287, + 556 + ], + "type": "text", + "content": "[28] Yunsheng Ma, Wenqian Ye, Can Cui, Haiming Zhang, Shuo Xing, Fucai Ke, Jinhong Wang, Chenglin Miao, Jintai Chen, Hamid Rezatofighi, Zhen Li, Guangtao Zheng, Chao Zheng, Tianjiao He, Manmohan Chandraker, Burhaneddin Yaman, Xin Ye, Hang Zhao, and Xu Cao. Position: Prospective of autonomous driving - multimodal llms world models embodied intelligence ai alignment and mamba. In Proceedings of the Winter Conference on Applications of Computer Vision Workshops, pages 1010-1026, 2025. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 557, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 557, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 55, + 557, + 287, + 588 + ], + "type": "text", + "content": "[29] Jiageng Mao, Yuxi Qian, Junjie Ye, Hang Zhao, and Yue Wang. Gpt-driver: Learning to drive with gpt. arXiv preprint arXiv:2310.01415, 2023. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 589, + 287, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 589, + 287, + 642 + ], + "spans": [ + { + "bbox": [ + 55, + 589, + 287, + 642 + ], + "type": "text", + "content": "[30] Xiangru Mu, Tong Qin, Songan Zhang, Chunjing Xu, and Ming Yang. Pix2planning: End-to-end planning by vision-language model for autonomous driving on carla simulator. In Proceedings of the IEEE Intelligent Vehicles Symposium, pages 2383-2390. IEEE, 2024. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 643, + 287, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 643, + 287, + 706 + ], + "spans": [ + { + "bbox": [ + 55, + 643, + 287, + 706 + ], + "type": "text", + "content": "[31] Chenbin Pan, Burhaneddin Yaman, Tommaso Nesti, Abhirup Mallik, Alessandro G Allievi, Senem Velipasalar, and Liu Ren. Vlp: Vision language planning for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14760-14769, 2024. 1, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 708, + 287, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 708, + 287, + 729 + ], + "spans": [ + { + "bbox": [ + 55, + 708, + 287, + 729 + ], + "type": "text", + "content": "[32] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. In" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 107, + 538, + 729 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 326, + 107, + 538, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 107, + 538, + 138 + ], + "spans": [ + { + "bbox": [ + 326, + 107, + 538, + 138 + ], + "type": "text", + "content": "context retrieval-augmented language models. Transactions of the Association for Computational Linguistics, 11:1316-1331, 2023. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 140, + 538, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 538, + 193 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 538, + 193 + ], + "type": "text", + "content": "[33] Katrin Renz, Long Chen, Ana-Maria Marcu, Jan Hünermann, Benoit Hanotte, Alice Karsund, Jamie Shotton, Elahe Arani, and Oleg Sinavski. Carllava: Vision language models for camera-only closed-loop driving. arXiv preprint arXiv:2406.10165, 2024. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 195, + 538, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 538, + 258 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 538, + 258 + ], + "type": "text", + "content": "[34] Hao Shao, Yuxuan Hu, Letian Wang, Guanglu Song, Steven L Waslander, Yu Liu, and Hongsheng Li. Lmdrive: Closed-loop end-to-end driving with large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15120-15130, 2024. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 259, + 538, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 259, + 538, + 312 + ], + "spans": [ + { + "bbox": [ + 307, + 259, + 538, + 312 + ], + "type": "text", + "content": "[35] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 314, + 538, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 314, + 538, + 379 + ], + "spans": [ + { + "bbox": [ + 307, + 314, + 538, + 379 + ], + "type": "text", + "content": "[36] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beibwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In Proceedings of the European Conference on Computer Vision, pages 256-274. Springer, 2024. 1, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 380, + 538, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 380, + 538, + 433 + ], + "spans": [ + { + "bbox": [ + 307, + 380, + 538, + 433 + ], + "type": "text", + "content": "[37] Xiaoyu Tian, Junru Gu, Bailin Li, Yicheng Liu, Yang Wang, Zhiyong Zhao, Kun Zhan, Peng Jia, Xianpeng Lang, and Hang Zhao. Drivevm: The convergence of autonomous driving and large vision-language models. arXiv preprint arXiv:2402.12289, 2024. 3, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 434, + 538, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 538, + 477 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 538, + 477 + ], + "type": "text", + "content": "[38] Shiyi Wang, Yuxuan Zhu, Zhiheng Li, Yutong Wang, Li Li, and Zhengbing He. Chatgpt as your vehicle co-pilot: An initial attempt. IEEE Transactions on Intelligent Vehicles, 8 (12):4706-4721, 2023. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 478, + 538, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 538, + 540 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 538, + 540 + ], + "type": "text", + "content": "[39] Wenhai Wang, Jiangwei Xie, ChuanYang Hu, Haoming Zou, Jianan Fan, Wenwen Tong, Yang Wen, Silei Wu, Hanming Deng, Zhiqi Li, et al. Drivemlm: Aligning multi-modal large language models with behavioral planning states for autonomous driving. arXiv preprint arXiv:2312.09245, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 543, + 538, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 543, + 538, + 597 + ], + "spans": [ + { + "bbox": [ + 307, + 543, + 538, + 597 + ], + "type": "text", + "content": "[40] Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Song XiXuan, et al. Cogvlm: Visual expert for pretrained language models. Advances in Neural Information Processing Systems, 37:121475-121499, 2024. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 598, + 538, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 598, + 538, + 641 + ], + "spans": [ + { + "bbox": [ + 307, + 598, + 538, + 641 + ], + "type": "text", + "content": "[41] Yangyang Wang and Tianyi Wang. Research on dual-clutch intelligent vehicle infrastructure cooperative control based on system delay prediction of two-lane highway on-ramp merging area. *Automotive Innovation*, 7:588–601, 2024. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 642, + 538, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 642, + 538, + 696 + ], + "spans": [ + { + "bbox": [ + 307, + 642, + 538, + 696 + ], + "type": "text", + "content": "[42] Yujin Wang, Quanfeng Liu, Jiaqi Fan, Jinlong Hong, Hongqing Chu, Mengjian Tian, Bingzhao Gao, and Hong Chen. Rac3: Retrieval-augmented corner case comprehension for autonomous driving with vision-language models. arXiv preprint arXiv:2412.11050, 2024. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 697, + 538, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 697, + 538, + 729 + ], + "spans": [ + { + "bbox": [ + 307, + 697, + 538, + 729 + ], + "type": "text", + "content": "[43] Yi Xu, Yuxin Hu, Zaiwei Zhang, Gregory P Meyer, Siva Karthik Mustikovela, Siddhartha Srinivasa, Eric M Wolff, and Xin Huang. Vlm-ad: End-to-end autonomous" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 292, + 747, + 303, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 747, + 303, + 756 + ], + "spans": [ + { + "bbox": [ + 292, + 747, + 303, + 756 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 106, + 287, + 583 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 74, + 106, + 286, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 106, + 286, + 127 + ], + "spans": [ + { + "bbox": [ + 74, + 106, + 286, + 127 + ], + "type": "text", + "content": "driving through vision-language model supervision. arXiv preprint arXiv:2412.14446, 2024. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 128, + 287, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 128, + 287, + 182 + ], + "spans": [ + { + "bbox": [ + 55, + 128, + 287, + 182 + ], + "type": "text", + "content": "[44] Shota Yamazaki, Chenyu Zhang, Takuya Nanri, Akio Shigekane, Siyuan Wang, Jo Nishiyama, Tao Chu, and Kohei Yokosawa. Explanation for trajectory planning using multimodal large language model for autonomous driving. arXiv preprint arXiv:2411.09971, 2024. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 183, + 287, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 183, + 287, + 256 + ], + "spans": [ + { + "bbox": [ + 55, + 183, + 287, + 256 + ], + "type": "text", + "content": "[45] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin Wang, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. Trajectory-llm: A language-based data generator for trajectory prediction in autonomous driving. In Proceedings of the International Conference on Learning Representations, 2025. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 259, + 287, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 259, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 55, + 259, + 287, + 312 + ], + "type": "text", + "content": "[46] Jianhao Yuan, Shuyang Sun, Daniel Omeiza, Bo Zhao, Paul Newman, Lars Kunze, and Matthew Gadd. Rag-driver: Generalisable driving explanations with retrieval-augmented in-context learning in multi-modal large language model. arXiv preprint arXiv:2402.10828, 2024. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 312, + 286, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 286, + 354 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 286, + 354 + ], + "type": "text", + "content": "[47] Ekim Yurtsever, Jacob Lambert, Alexander Carballo, and Kazuya Takeda. A survey of autonomous driving: Common practices and emerging technologies. IEEE Access, 8: 58443-58469, 2020. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 356, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 356, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 55, + 356, + 287, + 430 + ], + "type": "text", + "content": "[48] Yan Zeng, Hanbo Zhang, Jiani Zheng, Jiangnan Xia, Guoqiang Wei, Yang Wei, Yuchen Zhang, Tao Kong, and Ruihua Song. What matters in training a gpt4-style language model with multimodal inputs? In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 7930-7957, 2024. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 432, + 287, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 432, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 55, + 432, + 287, + 485 + ], + "type": "text", + "content": "[49] Miao Zhang, Zhenlong Fang, Tianyi Wang, Qian Zhang, Shuai Lu, Junfeng Jiao, and Tianyu Shi. A cascading cooperative multi-agent framework for on-ramp merging control integrating large language models. arXiv preprint arXiv:2503.08199, 2025. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 486, + 286, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 486, + 286, + 538 + ], + "spans": [ + { + "bbox": [ + 55, + 486, + 286, + 538 + ], + "type": "text", + "content": "[50] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In Proceedings of the IEEE International Conference on Data Engineering, pages 70-82. IEEE, 2024. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 540, + 286, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 540, + 286, + 583 + ], + "spans": [ + { + "bbox": [ + 55, + 540, + 286, + 583 + ], + "type": "text", + "content": "[51] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. Llamafactory: Unified efficient fine-tuning of " + }, + { + "bbox": [ + 55, + 540, + 286, + 583 + ], + "type": "inline_equation", + "content": "100+" + }, + { + "bbox": [ + 55, + 540, + 286, + 583 + ], + "type": "text", + "content": " language models. arXiv preprint arXiv:2403.13372, 2024. 4" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 292, + 747, + 302, + 756 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 747, + 302, + 756 + ], + "spans": [ + { + "bbox": [ + 292, + 747, + 302, + 756 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_content_list.json b/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6f8fb2fc13ebf3f0480f9d7461b92184210187dd --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_content_list.json @@ -0,0 +1,2941 @@ +[ + { + "type": "text", + "text": "MMR: A LARGE-SCALE BENCHMARK DATASET FOR MULTI-TARGET AND MULTI-GRANULARITY REASONING SEGMENTATION", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Donggon Jang* Yucheol Cho* Suin Lee Taehyeon Kim Dae-Shik Kim† \nDepartment of Electrical Engineering, KAIST", + "bbox": [ + 179, + 194, + 689, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{jdg900,yc_cho,suinlee,rlaxogus0814,daeshik}@kaist.ac.kr", + "bbox": [ + 179, + 223, + 725, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 273, + 547, + 289 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The fusion of Large Language Models (LLMs) with vision models is pioneering new possibilities in user-interactive vision-language tasks. A notable application is reasoning segmentation, where models generate pixel-level segmentation masks by comprehending implicit meanings in human instructions. However, seamless human-AI interaction demands more than just object-level recognition; it requires understanding both objects and the functions of their detailed parts, particularly in multi-target scenarios. For example, when instructing a robot to \"turn on the TV\", there could be various ways to accomplish this command. Recognizing multiple objects capable of turning on the TV, such as the TV itself or a remote control (multi-target), provides more flexible options and aids in finding the optimized scenario. Furthermore, understanding specific parts of these objects, like the TV's button or the remote's button (part-level), is important for completing the action. Unfortunately, current reasoning segmentation datasets predominantly focus on a single target object-level reasoning, which limits the detailed recognition of an object's parts in multi-target contexts. To address this gap, we construct a large-scale dataset called Multi-target and Multi-granularity Reasoning (MMR). MMR comprises 194K complex and implicit instructions that consider multi-target, object-level, and part-level aspects, based on pre-existing image-mask sets. This dataset supports diverse and context-aware interactions by hierarchically providing object and part information. Moreover, we propose a straightforward yet effective framework for multi-target, object-level, and part-level reasoning segmentation. Experimental results on MMR show that the proposed method can reason effectively in multi-target and multi-granularity scenarios, while the existing reasoning segmentation model still has room for improvement. The dataset is available at https://github.com/jdg900/MMR.", + "bbox": [ + 228, + 305, + 769, + 654 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 680, + 336, + 695 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human-machine interaction is a key focus in AI for real-world applications, driving interest in multimodal perception models that integrate vision and language modalities. The model perceives the context within the image related to explicit text query inputs and predicts pixel-level masks or bounding boxes accordingly. For example, Open Vocabulary Segmentation (OVS) (Liang et al., 2023; Cho et al., 2023; Xu et al., 2023), leveraging models like CLIP (Radford et al., 2021), generates segmentation masks from open-set text categories. Similarly, Referring Expression Segmentation (RES) (Wang et al., 2023; Hu et al., 2023; Liu et al., 2023a; Yang et al., 2022) predicts the segmentation mask corresponding to the objects referenced by the text input within the image. However, these models encounter challenges with implicit and complex text queries, limiting their effectiveness in real-world scenarios.", + "bbox": [ + 169, + 712, + 826, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The emergence of Large Language Models (LLMs) (Zheng et al., 2024; Roumeliotis & Tselikas, 2023; Achiam et al., 2023; Zhang et al., 2023a) offers promising solutions to this challenge. Recent", + "bbox": [ + 169, + 857, + 823, + 887 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal Contribution †Corresponding Author", + "bbox": [ + 189, + 896, + 336, + 922 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13881v1 [cs.CV] 18 Mar 2025", + "bbox": [ + 22, + 258, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "studies (Bai et al., 2023; Li et al., 2023; Liu et al., 2024; Zhu et al., 2023; Zhang et al., 2023b; Chen et al., 2023; You et al., 2023) have witnessed that multimodal LLMs with superior reasoning capabilities can effectively perform vision tasks when given implicit text inputs. However, current multimodal LLMs primarily provide information corresponding to images or regions in text form, lacking pixel-level mask generation.", + "bbox": [ + 169, + 103, + 823, + 174 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these limitations, LISA (Lai et al., 2023) introduces reasoning segmentation. Unlike previous tasks that rely on explicit text (e.g., \"steak\"), reasoning segmentation handles implicit queries that require intricate reasoning or world knowledge (e.g., \"the food with most protein\"), by combining LLMs with the Segment Anything Model (SAM) (Kirillov et al., 2023) that has robust mask generation capabilities. LISA also introduces ReasonSeg, a benchmark dataset for reasoning segmentation. ReasonSeg consists of 1,218 image-instruction pairs, each containing implicit text question-answer pairs that involve complex reasoning for each image. Nevertheless, ReasonSeg has two limitations: 1) It does not adequately address scenarios involving multiple targets, and 2) it primarily focuses on object-level reasoning, treating part-level targets ambiguously. Although the recently proposed MUSE dataset by PixelLM (Ren et al., 2023) addresses multi-target object-level reasoning, it does not consider part-level reasoning. These observations underscore that current datasets for reasoning segmentation overlook the complexities of multiple targets and part-level scenarios, concentrating instead solely on object-level reasoning. This limitation restricts more advanced functionalities in reasoning segmentation.", + "bbox": [ + 169, + 180, + 826, + 377 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we introduce a Multi-target and Multi-granularity Reasoning segmentation (MMR) dataset to overcome these limitations, which covers both multiple targets and fine-grained part-level reasoning. We collect image and mask annotations from the publicly available PACO-LVIS dataset (Ramanathan et al., 2023). These annotations include class names and bounding box information of objects and parts. Then, inspired by LLaVA (Liu et al., 2024), we generate intricate question-answer pairs using the GPT-4V API (Achiam et al., 2023). Through this, the MMR dataset contains a vast collection of 194K complex and implicit instructions for comprehensive reasoning segmentation. A distinguishing characteristic of the proposed MMR dataset is its ability to handle multiple objects and diverse parts in the question-answer pairs. This diverse granularity enables models to reason and comprehend complex questions about both multiple target objects and their parts within a single query, providing more meaningful and high-quality masks.", + "bbox": [ + 169, + 381, + 823, + 536 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, we propose a simple yet effective model, Multi-target and Multi-granularity Segmentation Assistant (M²SA), for multi-target, object-level, and part-level reasoning segmentation. The M²SA model incorporates an early local feature fusion and multiple [SEG] tokens, which enables the model to enhance fine-grained visual understanding and consider multi-target segmentation. Experimental results on benchmarks, such as MMR, single-target referring expression segmentation datasets, and a multi-granularity referring expression segmentation dataset, demonstrate that M²SA outperforms existing state-of-the-art methods. We believe that our dataset and model serve as a valuable resource for potential applications in real-world reasoning segmentation tasks, offering enhanced versatility and robustness.", + "bbox": [ + 169, + 541, + 826, + 667 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are summarized as follows:", + "bbox": [ + 169, + 672, + 473, + 688 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We construct the MMR dataset, which includes 194K complex and implicit question pairs for multi-target, object-level, and part-level reasoning segmentation. This dataset facilitates advanced reasoning segmentation tasks in open-world scenarios.", + "- We propose $\\mathbf{M}^2\\mathbf{SA}$ for multi-target, object-level, and part-level reasoning segmentation. It incorporates an early local feature fusion and multiple [SEG] tokens to improve fine-grained visual understanding and segment multiple targets.", + "- Experimental results on MMR and other benchmarks show that $\\mathbf{M}^2\\mathbf{SA}$ outperforms state-of-the-art methods, validating the effectiveness of its components." + ], + "bbox": [ + 215, + 698, + 821, + 815 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 837, + 344, + 852 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Multimodal Large Language Models Recent advancements (Peng et al., 2023; Taori et al., 2023; Touvron et al., 2023; Zhang et al., 2022) in multimodal Large Language Models (LLMs) have greatly improved the integration between language models and vision tasks by comprehensively understanding and recognizing multiple modalities. Recently proposed models such as BLIP-2 (Li et al., 2023),", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Flamingo (Alayrac et al., 2022), MiniGPT-4 (Zhu et al., 2023), llama-adapter (Gao et al., 2023; Zhang et al., 2023a), LLaVA (Liu et al., 2024), InstructBLIP (Dai et al., 2024), InternGPT (Liu et al., 2023b), and QwenVL (Bai et al., 2023) have shown superiority at multimodal tasks such as visual question-answering and captioning, leveraging the multimodal understanding capability of LLMs. While these methods have demonstrated improved performance in vision-language tasks through instructional tuning, they only provide the text output about the visual target and focus on a holistic understanding of global information in the image. Therefore, their applicability is limited in tasks requiring finer-grained understanding at the pixel level.", + "bbox": [ + 169, + 103, + 826, + 217 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reasoning Segmentation The task of reasoning segmentation, introduced by LISA (Lai et al., 2023), is understanding implicit text instruction and providing a corresponding mask for the answer. This task is more challenging and important than the referring expression segmentation task which deals with explicit and simple text queries. For instance, when a user wants to segment a pepper in an image, handling an implicit query like 'the food with a spicy taste' instead of a direct reference such as 'the pepper' is significant for improving human-AI interaction. To tackle this, LISA introduces ReasonSeg, a benchmark containing implicit text queries that require complex reasoning for each image. Recently, PixelLM (Ren et al., 2023), has addressed the limitation of ReasonSeg which considers only a single target in a query text. PixelLM constructs MUSE, a new dataset with multiple target objects in the text instructions. However, both studies are still limited to object-level reasoning segmentation. Methods such as GSVA (Xia et al., 2024) and GLaMM (Rasheed et al., 2024) have also been proposed, but they focus on frameworks for object-level reasoning segmentation rather than introducing new datasets. In this paper, we extend these existing tasks and propose a new benchmark dataset that considers both part-level and object-level reasoning.", + "bbox": [ + 169, + 229, + 826, + 426 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Part-level Segmentation Recent research (Li et al., 2022; Kirillov et al., 2019; Michieli et al., 2020; Zhou et al., 2021; Pan et al., 2023) has delved into a fine-grained understanding of objects at the part-level. For the part-level visual understanding, datasets with detailed annotations for each part are required. To this end, some initial studies (Gong et al., 2017; Li et al., 2017; Yang et al., 2019; Wah et al., 2011; Jia et al., 2020; Zheng et al., 2018) have introduced datasets with part-level masks on specific domains, such as human body parts (Gong et al., 2017; Li et al., 2017; Yang et al., 2019), bird parts (Wah et al., 2011), and fashion cloth parts (Jia et al., 2020; Zheng et al., 2018). Moreover, recognizing the need for annotations on general objects, some approaches (Chen et al., 2014; Mo et al., 2019; He et al., 2022; Zhou et al., 2019; Meletis et al., 2020; Ramanathan et al., 2023; Wei et al., 2024) have extended the existing object-level datasets by including more fine-grained annotations. Furthermore, there has been an attempt (Wang et al., 2023) to extend the previous Referring Expression Segmentation (RES) task to provide part-level segmentation masks matching explicit text queries. In line with this effort, our work introduces a new dataset that includes multiple target parts and diverse implicit text queries for multi-granularity reasoning segmentation.", + "bbox": [ + 169, + 439, + 826, + 636 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 MMR DATASET", + "text_level": 1, + "bbox": [ + 171, + 654, + 341, + 671 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Current publicly available datasets for reasoning segmentation primarily emphasize object-level reasoning. Consequently, Multimodal Large Language Models (MLLMs) often struggle with questions that involve multiple targets or require reasoning at both the object- and part-levels. To address these limitations, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset. MMR includes multi-target, object-level, and part-level reasoning scenarios. This dataset comprises images and masks from the publicly available PACO dataset (Ramanathan et al., 2023), supplemented with implicit and complex question-answer pairs generated by the GPT-API (Achiam et al., 2023). Unlike existing datasets, MMR includes large-scale question-answer pairs that consider multiple target cases and require reasoning at both the object- and part-levels, enhancing its versatility and applicability. In the following sections, we detail the dataset generation process (Sec. 3.1), describe the data filtering process (Sec. 3.2), provide a statistical analysis of MMR (Sec. 3.3), and highlight its distinctiveness compared to existing datasets (Sec. 3.4).", + "bbox": [ + 169, + 686, + 826, + 854 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 DATA GENERATION", + "text_level": 1, + "bbox": [ + 171, + 869, + 352, + 883 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To generate a multi-target, object-level, and part-level reasoning segmentation dataset, we leverage the PACO-LVIS dataset (Ramanathan et al., 2023). PACO-LVIS includes 456 object-specific part", + "bbox": [ + 169, + 895, + 823, + 926 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e98e400479af6f6c5ba0198fac9f6a5a8eabd87b74f6f8c3931fa032e5554076.jpg", + "image_caption": [ + "Figure 1: The prompt used in our data creation process with GPT-4V." + ], + "image_footnote": [], + "bbox": [ + 250, + 103, + 750, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "classes across 75 object categories, offering 502K part-level masks and bounding boxes annotated across 273K object-level masks and bounding boxes. By utilizing these comprehensive images and multi-granularity mask annotations, we can reduce annotation costs while ensuring detailed and accurate segmentation data. To create intricate and implicit question-answer pairs for multiple target and multi-granularity reasoning, we employ a GPT-assisted data generation scheme similar to LLaVA (Liu et al., 2024). Specifically, we adopt GPT-4V API which has robust visual understanding capabilities. Fig. 1 illustrates the entire data generation process.", + "bbox": [ + 169, + 363, + 823, + 460 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To guide the GPT-4V API effectively, we carefully craft prompts that include GPT role, object and part information, task prompts, and requirements. GPT role defines the persona of the GPT-4V API, informing it about the context and objectives of the data generation process. Object & part information provides comprehensive annotations, such as object and part names within the image and their corresponding bounding box coordinates. Task prompt informs the GPT-4V API about the task definition and considerations for generating question-answer pairs. Requirements set the rules and patterns that the GPT-4V API should follow when generating question-answer pairs (e.g., \"questions should avoid direct mention of coordinates of objects or parts\" or \"Q&A pairs should contain multiple objects or parts\"). Please see the Appendix A.5 for the detailed prompt.", + "bbox": [ + 169, + 468, + 823, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The GPT-4V-assisted data generation follows a two-step process: 1) Global Caption Generation: GPT-4V API first generates a global caption based on the image to foster a deep understanding of its context. 2) Question-Answer Pair Generation: Leveraging this global caption along with object and part information, GPT-4V autonomously crafts multi-target, multi-granularity question-answer pairs. Carefully designed prompts and a two-step generation process enable GPT-4V to deeply comprehend image context and generate contextually relevant question-answer pairs.", + "bbox": [ + 169, + 599, + 823, + 684 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 DATA FILTERING", + "text_level": 1, + "bbox": [ + 171, + 700, + 333, + 713 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Despite meticulously crafted prompts for guiding GPT-4V, occasional deviations from established rules result in the generation of subpar question-answer pairs. These deviations include questions that reveal explicit target coordinates or provide overly direct hints, as well as answers that offer irrelevant information or omit essential details. To enhance the reliability of the question-answer pairs in our dataset, a rigorous filtering process is essential. Therefore, we engage four skilled human inspectors to review the dataset according to strict criteria:", + "bbox": [ + 169, + 726, + 823, + 809 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Logicality and Reasoning: Questions should avoid explicit target coordinates or strong hints. Non-compliant questions and their corresponding answers are removed. For example, a question like \"Which part of this animal [coordinates] uses its sense of smell?\" would be excluded.", + "- Coherence and Relevance: Answers lacking essential target information or containing irrelevant details are corrected for precision and relevance. This includes cases where answers mention objects or parts not provided in the annotations." + ], + "bbox": [ + 215, + 821, + 823, + 924 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3a43e4506a5918098ff444f068e5cb49842f6b8b3de45cc8ae4635bc58eb33b9.jpg", + "image_caption": [ + "Figure 2: An example from the MMR dataset generated through our data creation process. The left and right pictures show the object- and part-level segmentation masks, respectively." + ], + "image_footnote": [ + "Caption: A knife is inserted vertically into a sandwich on a cutting board, with another knife lying beside it and bottles in the background." + ], + "bbox": [ + 240, + 101, + 488, + 234 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/dba0b0f92114ba306981b6085c8e195b4cca087c05a93e961e6e03f14f63fb7c.jpg", + "image_caption": [], + "image_footnote": [ + "Question1: What item on the table is designed to be held at one end while the other end is meant for cutting through food? Answer1: The knife_1 [204.79, 2.4, 238.63, 226.53] is designed with a knife_1's handle [205, 2, 239, 126] to be held while the knife_1's blade [213, 121, 237, 226] is meant for cutting through food.", + "Question2: Which object on the table appears to be in the process of being used to keep a sandwich upright? Answer2: The knife_2 [304.84, 320.65, 615.34, 427.0] with its knife_2's blade [305, 321, 601, 422] inserted into the sandwich is being used to keep it upright.", + "Question3: If I wanted to read the product information on a container in view, which part should I look at? Answer3: To read the product information, you should look at the bottle_1's label [460, 105, 500, 282] or the bottle_2's label [300, 4, 413, 176].", + "Question4: Which objects in the scene are meant to contain liquids, and what part of them touches the surface they rest on? Answer4: The objects meant to contain liquids are bottle_1[459.07, 0.0, 603.49, 315.78] and bottle_2[296.85, 1.15, 416.19, 242.2]. The part that touches the surface they rest on is bottle_1's base [463, 287, 596, 316] and bottle_2's base [307, 220, 400, 241]." + ], + "bbox": [ + 490, + 101, + 754, + 236 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Clarity and Precision: Questions and answers should be clear, concise, and free of ambiguity. For example, ill-defined data, such as asking about the function of an object or part from a segmentation perspective, is removed (e.g., \"What is the function of object_1?\"). Answers should provide precise information that directly addresses the question without causing confusion.", + "bbox": [ + 215, + 429, + 823, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Originally, 222K question-answer pairs are generated. Of these, $12.6\\%$ are filtered out through a review process conducted by the four inspectors, resulting in the final MMR dataset. Since dataset generation is a key contribution to our work, each inspector thoroughly reviews the entire set of 222K question-answer pairs. To minimize human error, we only filter out question-answer pairs flagged by two or more inspectors. This meticulous filtering regimen ensures the integrity and trustworthiness of the MMR dataset. An example of the generated question-answer pairs is illustrated in Fig. 2.", + "bbox": [ + 169, + 512, + 823, + 597 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 DATA STATISTICS", + "text_level": 1, + "bbox": [ + 171, + 613, + 339, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The MMR dataset includes 194,398 intricate and implicit question-answer pairs with 57,643 corresponding images and masks selected from PACO-LVIS. The entire dataset is split into distinct sets for training (154,127 pairs), validation (8,194 pairs), and test (32,077 pairs). Moreover, the test set is further categorized into three subsets: object-only, part-only, and mixed sets, providing a benchmark for evaluating multi-granularity reasoning segmentation capabilities.", + "bbox": [ + 169, + 638, + 823, + 709 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Additionally, our dataset inherits a rich coverage of 75 object categories and 445 part categories from PACO-LVIS, enhancing its diversity and utility. We delve into the frequency distribution per object and part category across question-answer pairs. Fig. 3 (b) and (d) provide a comprehensive overview of the number of questions per object category and part category, respectively. The results show that our dataset encompasses a wide range of categories, ensuring that the question-answer pairs are not biased toward specific categories and exhibit a high level of diversity. Furthermore, the word clouds illustrated in Fig. 3 (a) and (c) highlight the prevalent head object and part categories, respectively. These word clouds demonstrate that our question-answer pairs are grounded in common and general objects and their associated parts. Fig. 3 (e) presents statistics on the number of targets in each question-answer pair. On average, there are 1.8 targets per answer, with the maximum number of targets in a single pair being 16. This demonstrates that our dataset can consider multiple targets in an image and cover diverse target reasoning. To evaluate the comprehensiveness of both objects and parts in the proposed dataset, we compare their occurrences within the total question-answer pairs. As depicted in Fig. 3 (f), there are 114,704 descriptions for objects and 226,869 for parts, maintaining a ratio of approximately 1:2. This ratio is reasonable because objects typically", + "bbox": [ + 169, + 715, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6af68df24ea5fe2bcd2e8a1c6b52f8c4a4dbe4f613d06f3921cae1f077f251c6.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 287, + 108, + 398, + 191 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/49d95cca8a99e7183736adb72e5992742a6708b40215ddab704ed4836b67c487.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 452, + 103, + 568, + 191 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/75402cb6ca335d4d37dec42055f59df83bb9e5047c9952f863c51e27ad68564e.jpg", + "image_caption": [ + "(e)" + ], + "image_footnote": [], + "bbox": [ + 596, + 112, + 740, + 194 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/27ebdc79105d81dce6346da94f8ea4a8348668259ea47a09d9ab995347bce18b.jpg", + "image_caption": [ + "Figure 3: Statistics of the proposed MMR dataset. (a) the word cloud for the object categories, (b) the number of objects per each object category in questions (log scale), (c) the word cloud for the part categories, (d) the number of parts per each part category in questions (log scale), (e) the distribution of target count in answers, and (f) the total number of expressions of objects and parts." + ], + "image_footnote": [], + "bbox": [ + 256, + 205, + 406, + 292 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/727c972b90b1ecc04fd743aed83f08e577ab1f60f02b3b714179cc2aabd64bfc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 205, + 573, + 292 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/78c36daf8e08ee87adcae76de957465e2efddd4d69ab427ee4a382fa5cb691f7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 586, + 205, + 740, + 292 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/28ee61eabb716ada21fdfdfa49acf467f7748bdb90d86f1e4ef1c603b2d049a3.jpg", + "table_caption": [ + "Table 1: Comparison among several reasoning segmentation datasets, including ReasonSeg (Lai et al., 2023), MUSE (Ren et al., 2023), and the proposed MMR. Here, part-level is an expression that refers to various parts of an object that appear in the image." + ], + "table_footnote": [], + "table_body": "
DatasetsObject-levelPart-levelMulti-target# of Q&A pairsGPT models
ReasonSeg×1.2KGPT-3.5
MUSE×214KGPT-4V
MMR194KGPT-4V
", + "bbox": [ + 253, + 444, + 743, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "consist of multiple parts. Therefore, it reflects a balanced distribution, contributing to the dataset's comprehensiveness and facilitating multi-granularity knowledge understanding.", + "bbox": [ + 169, + 527, + 823, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 COMPARISON WITH EXISTING REASONING SEGMENTATION DATASETS", + "text_level": 1, + "bbox": [ + 171, + 577, + 702, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Tab. 1 presents a comparative overview of existing reasoning segmentation datasets and the proposed MMR dataset. As observed, MMR offers several notable advantages over existing datasets.", + "bbox": [ + 169, + 604, + 823, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "First, MMR contains 194K question-answer pairs, comparable to MUSE (Ren et al., 2023), and far exceeds ReasonSeg (Lai et al., 2023) which has only 1,218 question-answer pairs primarily designed for validation and testing purposes. This extensive scale facilitates both training and evaluation for reasoning segmentation.", + "bbox": [ + 169, + 638, + 823, + 695 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Second, MMR supports question-answer pairs covering multi-target and multi-granularity (object-level and part-level) visual reasoning. Although MUSE includes multi-target instances, its coverage is limited to object-level reasoning. This lack of part-level detail reduces its effectiveness in fine-grained visual tasks. Part-level reasoning in MMR enables a more comprehensive understanding of visual contexts and hierarchical relationships between parts and objects. While ReasonSeg appears to include part-level reasoning, ReasonSeg often has ambiguous boundaries between objects and their parts because it doesn't specify which object a part belongs to. For instance, in a scene with a \"car\" and a \"tire\", ReasonSeg considers the \"tire\" as part of the \"car\", even if the tire is not attached. In contrast, MMR clearly distinguishes the boundaries between objects and their parts by specifying hierarchy like which object a part belongs to based on their spatial context. Additionally, unlike ReasonSeg, MMR distinguishes multiple objects of the same class within a single image at the instance level. For example, ReasonSeg might group all buses in a scene under a single \"Bus\" label. On the other hand, MMR treats them as distinct entities like \"Bus_1,\" \"Bus_2\", etc. Also, ReasonSeg treats all screens simply as \"screen,\" whereas MMR would specify \"laptop_1's screen,\" \"laptop_2's screen,\" and so forth. This allows MMR to handle objects or parts of the same class separately by considering their spatial context within the image.", + "bbox": [ + 169, + 702, + 826, + 924 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Third, MMR leverages the advanced visual understanding capabilities of GPT-4V for question-answer generation. GPT-4V receives the image along with information such as class names and bounding boxes of objects and parts, enabling detailed and contextually accurate question-answer generation. In comparison, ReasonSeg generates questions using the language-specialized GPT-3.5 and pre-trained image tagging models, which do not fully capture the visual context, leading to less relevant question-answer pairs with the image.", + "bbox": [ + 169, + 103, + 823, + 188 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In summary, MMR provides a substantial improvement over ReasonSeg and MUSE by including large-scale, multi-target, and multi-granularity question-answer pairs. It strengthens real-world applicability, making it a valuable asset for advancing research in reasoning-based segmentation tasks.", + "bbox": [ + 169, + 194, + 823, + 238 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 BASELINE FRAMEWOK", + "text_level": 1, + "bbox": [ + 171, + 262, + 397, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We propose a novel baseline framework for multi-target and multi-granularity reasoning segmentation, $\\mathbf{M}^2\\mathbf{SA}$ . $\\mathbf{M}^2\\mathbf{SA}$ enhances the LISA framework with two key features: 1) Early Local Feature Fusion and 2) multiple [SEG] tokens. For Early Local Feature Fusion, we extract local features from the early layer of the SAM's vision encoder, which contains fine-grained details such as image edges and boundaries. These local features are fused with the global semantic context features from the last layer of SAM's vi", + "bbox": [ + 169, + 297, + 388, + 532 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cb1cb6ee8f4d1046e00e0441d898b982ead7bd659cd7608fd7812d75c403cf50.jpg", + "image_caption": [ + "Figure 4: The overview of $\\mathbf{M}^2\\mathrm{SA}$ framework." + ], + "image_footnote": [], + "bbox": [ + 401, + 324, + 826, + 488 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "sion encoder for more informative visual features in the mask decoder. Multiple [SEG] tokens overcome the LISA framework's limitation of a single [SEG] token, which struggles to segment multiple targets simultaneously.. To overcome this, we propose utilizing multiple [SEG] tokens. In our MMR dataset, we append a [SEG] token to each target object and part in the answer annotations (e.g., \"When closing the laptop, laptop computer's screen [SEG] would come into contact with laptop computer's base panel [SEG].\"). This approach enables the model to predict separate [SEG] tokens for each target, reducing ambiguity among multiple targets.", + "bbox": [ + 169, + 532, + 823, + 630 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Model Architecture Fig. 4 presents the overall architecture of the proposed $\\mathbf{M}^2\\mathbf{SA}$ framework, which integrates two core components: Segment Anything Model (SAM)(Kirillov et al., 2023) and Multimodal Large Language Model (MLLM), specifically LLaVA(Liu et al., 2024). SAM module consists of SAM Vision Encoder $(E)$ and SAM Mask Decoder $(D)$ , while the MLLM comprises CLIP Vision Encoder $(I)$ , vision-to-text projector $(\\psi)$ , and Large Language Model $(F)$ . The image $x_{img} \\in R^{h \\times w \\times 3}$ is fed into the SAM Vision Encoder $(E)$ , which generates global context features $v_g = E(x_{img}) \\in R^{h/16 \\times w/16 \\times c}$ and early local features $v_l = E_l(x_{img}) \\in R^{h/16 \\times w/16 \\times c'}$ . To align the channel dimensions of $v_l$ with $v_g$ , the early local features $v_l$ are passed through two convolution layers, resulting in refined features $\\hat{v}_l \\in R^{h/16 \\times w/16 \\times c}$ . $v_g$ and $\\hat{v}_l$ are then summed to obtain visual features $v_{seg} \\in R^{h/16 \\times w/16 \\times c}$ for segmentation. Simultaneously, the image $x_{img}$ is input into the CLIP Vision Encoder $(I)$ , producing visual token embeddings $f_{img} = \\psi(I(x_{img})) \\in R^{N_{img} \\times d}$ , which are mapped to the LLM input space using the vision-to-text projector $\\psi$ . In parallel, the text queries $x_{txt}$ are tokenized by the $F$ 's tokenizer, producing text token embeddings $f_{txt} \\in R^{N_{txt} \\times d}$ . The visual token embeddings $f_{img}$ and text token embeddings $f_{txt}$ are concatenated and processed by LLM $F$ , resulting in output response $\\hat{y}_{txt} = F(\\text{concat}(f_{img}, f_{txt}))$ . $\\hat{y}_{txt}$ contains the textual response to the text query and special [SEG] tokens that correspond to each target entity to be segmented. These multiple [SEG] token embeddings are extracted and projected into SAM's prompt space via the projector $\\phi$ , resulting in embeddings $f_{seg} = \\phi(\\hat{y}_{txt}[SEG]) \\in R^{N_{seg} \\times c}$ . Finally, the SAM Mask Decoder $(D)$ takes the visual features $v_{seg}$ and the multiple [SEG] token embeddings", + "bbox": [ + 169, + 650, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "$f_{seg}$ as input to generate the segmentation mask $M = D(\\text{concat}(v_{seg}, f_{seg}))$ , which identifies the target regions in the image corresponding to the text queries.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "**Optimization** Our model is trained end-to-end through two sources of supervision. For the text generation, we compute auto-regressive cross-entropy loss $L_{txt}$ between the text output $\\hat{y}_{txt}$ and the ground-truth text answer $y_{txt}$ . For the high-quality segmentation mask generation, the mask loss $L_{mask}$ is calculated between the output mask $\\hat{M}$ and the ground-truth mask $M$ . The mask loss $L_{mask}$ is a weighted sum of per-pixel binary cross-entropy loss $L_{bce}$ and a DICE loss $L_{dice}$ , determined by $\\lambda_{bce}$ and $\\lambda_{dice}$ . The overall loss $L$ is formulated as follows:", + "bbox": [ + 169, + 146, + 823, + 232 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL = L _ {t x t} + L _ {m a s k},\n$$\n", + "text_format": "latex", + "bbox": [ + 423, + 255, + 553, + 268 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL _ {m a s k} = \\lambda_ {b c e} L _ {b c e} + \\lambda_ {d i c e} L _ {d i c e}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 265, + 823, + 286 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\lambda_{bce}$ and $\\lambda_{dice}$ are set to 0.5 and 2.0, respectively.", + "bbox": [ + 169, + 290, + 544, + 306 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 EXPERIMENT", + "text_level": 1, + "bbox": [ + 171, + 324, + 318, + 339 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 171, + 354, + 375, + 369 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Implementation Details We use pre-trained LLaVA-7B (Liu et al., 2024) and LLaVA-Llama2-13B with CLIP-ViT-L/14 (Radford et al., 2021) and Vicuna-7B (Chiang et al., 2023)/Llama2-13B (Touvron et al., 2023) to form Multimodal Large Language Model (MLLM). We adopt the pre-trained SAM-ViT-H (Kirillov et al., 2023) for the segmentation model. For CLIP-ViT-L/14, input image $x_{img}$ is resized to $224 \\times 224 \\times 3$ and processed with a patch size of 14, resulting in $N_{img} = 256$ . LLM dimensions $d$ are set to 4096 and 5120 for Vicuna-7B and Llama2-13B. For SAM-ViT-H, $c$ and $c'$ are 256 and 1280, respectively. Efficient fine-tuning of the MLLM is facilitated using LoRA (Hu et al., 2021). The trainable components in $\\mathbf{M}^2\\mathbf{SA}$ include the SAM Mask Decoder $D$ , the projector $\\phi$ , two convolution layers, the LoRA adapter in MLLM, and the token embeddings. We use features from the 8th layer in the SAM Vision Encoder $E$ for early layer feature fusion. Our model is trained for 10 epochs, with each epoch consisting of 5,000 steps. We employ the AdamW (Loshchilov & Hutter, 2017) optimizer with a learning rate of 0.0003 and set gradient accumulation to 10 steps per update. Additionally, we use WarmupDecayLR as the learning rate scheduler. The learning rate is linearly decayed after 100 steps. The batch size and LoRA rank are set to 2 and 8, respectively. All experiments are conducted using 4 NVIDIA RTX A6000 GPUs. The results reported in the paper are the average values obtained from experiments conducted with 3 different random seeds.", + "bbox": [ + 169, + 380, + 826, + 617 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Datasets For model training, we adopt the mixed training dataset composition scheme proposed by LISA (Lai et al., 2023), comprising four types: semantic segmentation datasets (ADE20K (Zhou et al., 2019), COCO-Stuff (Caesar et al., 2018), Mapillary (Neuhold et al., 2017), PACO-LVIS (Ramanathan et al., 2023), and PASCAL-Part (Chen et al., 2014)), referring expression segmentation datasets (RefCOCO (Kazemzadeh et al., 2014), RefCOCO+ (Kazemzadeh et al., 2014), RefCOCOg (Mao et al., 2016), and RefCLEF (Kazemzadeh et al., 2014)), a visual question answering dataset (LLaVA-Instruct-150K (Liu et al., 2024)), and the proposed MMR dataset for multi-target and multi-granularity reasoning segmentation. We sample the data from the mixed training dataset in a ratio of 2:9:2:6, where 2 represents semantic segmentation datasets, 9 represents referring expression segmentation datasets, 2 represents the visual question answering dataset, and 6 represents the proposed MMR dataset.", + "bbox": [ + 169, + 631, + 826, + 785 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Baseline Methods To validate the effectiveness of the $\\mathbf{M}^2\\mathbf{SA}$ for a multi-target and multi-granularity reasoning segmentation task, we adopt LISA (Lai et al., 2023), GSVA (Xia et al., 2024), and GLaMM (Rasheed et al., 2024) along with their variants. The pre-trained models refer to those trained solely on their respective datasets. In contrast, the variant models referred to as $\\mathrm{model}_{tr}$ , are trained from scratch on a mixed training set that includes the MMR dataset. Due to issues with the publicly available code from the PixelLM, we exclude PixelLM from the baseline methods to ensure reliable and consistent comparison results. For a Multi-granularity Referring Expression Segmentation (MRES) task, we additionally adopt the class RES models (Yang et al., 2022; Liu et al., 2023a; Wang et al., 2023; 2022) and the general models (Zhu et al., 2022; Zou et al., 2023; 2024).", + "bbox": [ + 169, + 797, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/00c67b8bab549d0d741c5a3bfe54aac53625989b1e503526d215c1f6bddaf286.jpg", + "table_caption": [ + "Table 2: Results on MMR benchmark. The gIoU and cIoU metrics are reported for the comparison. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
Methodsvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
LISA-7B (Lai et al., 2023)13.818.323.525.16.67.914.517.9
LISA-7Btr19.431.634.741.88.013.119.527.1
GSVA-7B (Xia et al., 2024)14.625.126.434.36.011.615.524.8
GSVA-7Btr19.838.930.241.18.018.621.234.5
GLaMM (Rasheed et al., 2024)12.619.223.731.93.96.413.318.7
GLaMMtr26.947.140.354.212.125.530.345.0
M2SA-7B27.848.641.055.613.527.030.946.8
LISA-Llama2-13B (Lai et al., 2023)15.420.026.127.97.48.416.119.8
LISA-Llama2-13Btr22.333.440.245.210.716.423.029.2
M2SA-Llama2-13B28.449.142.357.613.627.231.647.6
", + "bbox": [ + 279, + 159, + 720, + 281 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/955d59c28fc9ac1c5a98ff89a36a38fbe9c2e717d2f8675830a553efaa127b29.jpg", + "table_caption": [ + "Table 3: Referring expression segmentation results on RefCOCO, RefCOCO+ (Kazemzadeh et al., 2014) and RefCOCOg (Mao et al., 2016) among $\\mathbf{M}^2\\mathbf{SA}$ and existing methods. For a fair comparison with previous methods, the cIoU metrics are adopted. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodsRefCOCORefCOCO+RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
MCN (Luo et al., 2020)62.464.259.750.655.544.749.249.4
VLT (Ding et al., 2021)67.570.565.256.361.050.155.057.0
CRIS (Wang et al., 2022)70.573.266.162.368.153.759.960.4
LAVT (Yang et al., 2022)72.775.868.862.168.455.161.262.1
ReLA (Liu et al., 2023a)73.876.570.266.071.057.765.066.0
X-Decoder (Zou et al., 2023)------64.6-
SEEM (Zou et al., 2024)------65.7-
LISA-7B (Lai et al., 2023)74.176.571.162.467.456.566.468.5
GSVA-7B (Xia et al., 2024)76.477.472.864.567.758.671.172.0
GLaMM (Rasheed et al., 2024)79.583.276.972.678.764.674.274.9
M2SA-7B74.076.869.763.167.256.167.068.3
LISA-Llama2-13B (Lai et al., 2023)73.677.370.563.268.257.067.068.4
M2SA-Llama2-13B74.677.671.064.068.157.669.069.3
", + "bbox": [ + 279, + 358, + 720, + 496 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation Metrics Following the implementation of the referring expression segmentation works, we adopt gIoU and cIoU scores to assess the quality of the segmentation mask. gIoU denotes the mean IoU for each mask, whereas cIoU is computed by the cumulative intersection area over the cumulative union area across the entire dataset. Given that cIoU may exhibit bias towards large-area objects, gIoU is preferable for evaluating part regions.", + "bbox": [ + 169, + 527, + 823, + 599 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.2 RESULTS ON BENCHMARK DATASETS", + "text_level": 1, + "bbox": [ + 171, + 616, + 477, + 630 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Comparison on MMR Tab. 2 compares $\\mathbf{M}^2\\mathrm{SA}$ and the baseline models in a multi-target and multi-granularity reasoning segmentation task (MMR dataset). The pre-trained models perform poorly on the proposed MMR dataset, particularly struggling with the part-only set due to its lack of detailed part-level understanding. Conversely, $\\mathrm{LISA}_{tr}$ , $\\mathrm{GSVA}_{tr}$ , and $\\mathrm{GLaMM}_{tr}$ , trained using the proposed MMR dataset, exhibit superior performance as they acquire both object-level and part-level knowledge. However, its ability to handle multi-target and fine-detail reasoning remains limited. In contrast, the proposed $\\mathbf{M}^2\\mathrm{SA}$ shows highly competitive performance, effectively managing multi-target scenarios and fine-detail tasks, thus showcasing its strength in comprehensive reasoning segmentation. Qualitative results are provided in the Appendix A.13.", + "bbox": [ + 169, + 642, + 826, + 768 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Comparison on Referring Expression Segmentation Task Tab. 3 presents the single-target object-level RefCOCO series dataset results. While $\\mathbf{M}^2\\mathbf{SA}$ achieves commendable performance, it is important to note that single-target referring expression segmentation is a relatively simple task, involving explicit queries that focus on identifying a single object. The true strength of $\\mathbf{M}^2\\mathbf{SA}$ lies in its ability to excel in more complex and challenging tasks, such as multi-target referring expression segmentation and multi-granularity referring segmentation. To evaluate its performance on multi-target referring expression segmentation, we curate text queries for multi-target objects using annotation information from the RefCOCO-series datasets. Each query is constructed by randomly selecting 4 to 6 object categories from each image and generating text prompts like \"Can you segment the class 1, class 2, ..., and class n?\" We then compare $\\mathbf{M}^2\\mathbf{SA}$ 's performance against LISA,", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/1bc0cfe22f88aa17d1c7853a7780b317f2a15b12f7e2ba9be4caab3220821938.jpg", + "table_caption": [ + "Table 4: Multi-referring expression segmentation results. We adopt the cIoU metric for comparison. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodsMulti-RefCOCOMulti-RefCOCO+Multi-RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
LISA-7B (Lai et al., 2023)34.032.736.428.228.628.545.248.7
GSVA-7B (Xia et al., 2024)50.753.347.844.847.440.647.748.6
GLaMM (Rasheed et al., 2024)30.832.030.028.829.627.232.535.0
M2SA-7B71.373.367.261.865.355.862.063.6
LISA-Llama2-13B (Lai et al., 2023)33.232.632.427.729.926.744.047.1
M2SA-Llama2-13B72.075.668.062.367.156.165.465.8
", + "bbox": [ + 279, + 143, + 722, + 220 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/0889fdd67ae440737f6ceb95d6e3c377a2dd148baa0b5268b372c0db34ac76ac.jpg", + "table_caption": [ + "Table 5: Multi-granularity referring expression segmentation results on RefCOCom (Wang et al., 2023). For a fair comparison with previous methods, the mIoU metrics are adopted. Part denotes part-only evaluation, and Obj & Part denotes multi-granularity evaluation. The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodsvaltestAtestB
PartObj & PartPartObj & PartPartObj & Part
SeqTR (Zhu et al., 2022)13.928.212.122.818.134.7
CRIS (Wang et al., 2022)10.625.410.121.212.930.0
LAVT (Yang et al., 2022)15.329.913.224.418.735.5
X-Decoder (Zou et al., 2023)16.229.513.623.620.333.8
SEEM (Zou et al., 2024)16.129.413.623.420.433.9
UniRES (Wang et al., 2023)19.634.316.427.825.241.7
LISA-7B (Lai et al., 2023)21.334.318.528.625.740.1
GSVA-7B (Xia et al., 2024)11.423.19.219.216.828.2
GLaMM (Rasheed et al., 2024)21.435.318.629.526.941.1
M²SA-7B22.435.519.930.127.141.4
LISA-Llama2-13B (Lai et al., 2023)22.135.219.429.727.241.6
M²SA-Llama2-13B24.537.321.931.928.542.7
", + "bbox": [ + 279, + 321, + 722, + 452 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "GSVA, and GLaMM. As shown in Tab. 4, $\\mathbf{M}^2\\mathbf{SA}$ significantly outperforms these methods, showcasing its ability to reason about multiple objects simultaneously and effectively leverage its multi [SEG] tokens for diverse and intricate queries.", + "bbox": [ + 169, + 494, + 823, + 537 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Additionally, we evaluate $\\mathbf{M}^2\\mathrm{SA}$ on RefCOCOM, a multi-granularity referring segmentation dataset. As demonstrated in Tab. 5, $\\mathbf{M}^2\\mathrm{SA}$ surpasses existing methods in this task, though the performance improvement is less pronounced. This is likely because the MMR dataset does not include the person class, which constitutes a significant portion of the categories in RefCOCOM. These results emphasize the versatility and effectiveness of $\\mathbf{M}^2\\mathrm{SA}$ in addressing complex, real-world scenarios, extending well beyond simple single-target segmentation tasks.", + "bbox": [ + 169, + 542, + 826, + 628 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 660, + 320, + 676 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This paper addresses the limitations of current reasoning segmentation datasets, which often overlook multi-target or part-level reasoning. To resolve these issues, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset, providing 194K comprehensive question-answer pairs that cover multi-target, object-level, and part-level aspects, enhancing diverse and context-aware interactions. We also propose the $\\mathbf{M}^2\\mathbf{SA}$ model, designed for multi-target, object-level, and part-level reasoning segmentation. $\\mathbf{M}^2\\mathbf{SA}$ incorporates early local feature fusion and multiple [SEG] tokens, improving fine-grained visual understanding and multi-target segmentation. Experimental results show that $\\mathbf{M}^2\\mathbf{SA}$ outperforms existing models on the MMR benchmark. The MMR dataset aims to drive progress in reasoning segmentation by emphasizing the importance of multi-target and part-level aspects in human-AI interactions.", + "bbox": [ + 169, + 699, + 826, + 839 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 871, + 357, + 886 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This research has been supported by the LG Electronics Corporation. (Project No. G01230381)", + "bbox": [ + 171, + 907, + 800, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 174, + 102, + 287, + 117 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.", + "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022.", + "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023.", + "Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1209-1218, 2018.", + "Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023.", + "Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1971-1978, 2014.", + "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\\%$ chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023.", + "Seokju Cho, Heeseong Shin, Sunghwan Hong, Seungjun An, Seungjun Lee, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. Cat-seg: Cost aggregation for open-vocabulary semantic segmentation. arXiv preprint arXiv:2303.11797, 2023.", + "Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3213-3223, 2016.", + "Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024.", + "Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 16321-16330, 2021.", + "Peng Gao, Jiaming Han, Renrui Zhang, Ziyi Lin, Shijie Geng, Aojun Zhou, Wei Zhang, Pan Lu, Conghui He, Xiangyu Yue, et al. Llama-adapter v2: Parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010, 2023.", + "Ke Gong, Xiaodan Liang, Dongyu Zhang, Xiaohui Shen, and Liang Lin. Look into person: Self-supervised structure-sensitive learning and a new benchmark for human parsing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 932-940, 2017.", + "Ju He, Shuo Yang, Shaokang Yang, Adam Kortylewski, Xiaoding Yuan, Jie-Neng Chen, Shuai Liu, Cheng Yang, Qihang Yu, and Alan Yuille. Partimagenet: A large, high-quality dataset of parts. In European Conference on Computer Vision, pp. 128-145. Springer, 2022." + ], + "bbox": [ + 171, + 126, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021.", + "Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4067-4077, 2023.", + "Menglin Jia, Mengyun Shi, Mikhail Sirotenko, Yin Cui, Claire Cardie, Bharath Hariharan, Hartwig Adam, and Serge Belongie. Fashionpedia: Ontology, segmentation, and an attribute localization dataset. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16, pp. 316-332. Springer, 2020.", + "Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pp. 787-798, 2014.", + "Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother, and Piotr Dólar. Panoptic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9404-9413, 2019.", + "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023.", + "Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023.", + "Jianshu Li, Jian Zhao, Yunchao Wei, Congyan Lang, Yidong Li, Terence Sim, Shuicheng Yan, and Jiashi Feng. Multiple-human parsing in the wild. arXiv preprint arXiv:1705.07206, 2017.", + "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023.", + "Xiangtai Li, Shilin Xu, Yibo Yang, Guangliang Cheng, Yunhai Tong, and Dacheng Tao. Panoptic-partformer: Learning a unified model for panoptic part segmentation. In European Conference on Computer Vision, pp. 729-747. Springer, 2022.", + "Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7061-7070, 2023.", + "Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 23592-23601, 2023a.", + "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024.", + "Zhaoyang Liu, Yinan He, Wenhai Wang, Weiyun Wang, Yi Wang, Shoufa Chen, Qinglong Zhang, Yang Yang, Qingyun Li, Jiashuo Yu, et al. Internchat: Solving vision-centric tasks by interacting with chatbots beyond language. arXiv preprint arXiv:2305.05662, 2023b.", + "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.", + "Gen Luo, Yiyi Zhou, Xiaoshuai Sun, Liujuan Cao, Chenglin Wu, Cheng Deng, and Rongrong Ji. Multi-task collaborative network for joint referring expression comprehension and segmentation. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pp. 10034-10043, 2020." + ], + "bbox": [ + 171, + 102, + 825, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 11-20, 2016.", + "Panagiotis Meletis, Xiaoxiao Wen, Chenyang Lu, Daan de Geus, and Gijs Dubbelman. Cityscapes-panoptic-parts and Pascal-panoptic-parts datasets for scene understanding. arXiv preprint arXiv:2004.07944, 2020.", + "Umberto Michieli, Edoardo Borsato, Luca Rossi, and Pietro Zanuttigh. Gmnet: Graph matching network for large scale part semantic segmentation in the wild. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VIII 16, pp. 397-414. Springer, 2020.", + "Kaichun Mo, Shilin Zhu, Angel X Chang, Li Yi, Subarna Tripathi, Leonidas J Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 909-918, 2019.", + "Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In Proceedings of the IEEE international conference on computer vision, pp. 4990-4999, 2017.", + "Tai-Yu Pan, Qing Liu, Wei-Lun Chao, and Brian Price. Towards open-world segmentation of parts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15392-15401, 2023.", + "Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, and Jianfeng Gao. Instruction tuning with gpt-4. arXiv preprint arXiv:2304.03277, 2023.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021.", + "Vignesh Ramanathan, Anmol Kalia, Vladan Petrovic, Yi Wen, Baixue Zheng, Baishan Guo, Rui Wang, Aaron Marquez, Rama Kovvuri, Abhishek Kadian, et al. Paco: Parts and attributes of common objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7141-7151, 2023.", + "Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13009-13018, 2024.", + "Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. arXiv preprint arXiv:2312.02228, 2023.", + "Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 102-118. Springer, 2016.", + "Konstantinos I Roumeliotis and Nikolaos D Tselikas. Chatgpt and open-ai models: A preliminary review. Future Internet, 15(6):192, 2023.", + "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. Stanford alpaca: An instruction-following llama model, 2023.", + "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.", + "Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wenxuan Wang, Tongtian Yue, Yisi Zhang, Longteng Guo, Xingjian He, Xinlong Wang, and Jing Liu. Unveiling parts beyond objects: Towards finer-granularity referring expression segmentation. arXiv preprint arXiv:2312.08007, 2023.", + "Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 11686-11695, 2022.", + "Meng Wei, Xiaoyu Yue, Wenwei Zhang, Shu Kong, Xihui Liu, and Jiangmiao Pang. Ov-parts: Towards open-vocabulary part segmentation. Advances in Neural Information Processing Systems, 36, 2024.", + "Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3858-3869, 2024.", + "Jilan Xu, Junlin Hou, Yuejie Zhang, Rui Feng, Yi Wang, Yu Qiao, and Weidi Xie. Learning open-vocabulary semantic segmentation models from natural language supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2935-2944, 2023.", + "Lu Yang, Qing Song, Zhihui Wang, and Ming Jiang. Parsing r-cnn for instance-level human analysis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 364-373, 2019.", + "Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18155-18165, 2022.", + "Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. arXiv preprint arXiv:2310.07704, 2023.", + "Renrui Zhang, Jiaming Han, Chris Liu, Peng Gao, Aojun Zhou, Xiangfei Hu, Shilin Yan, Pan Lu, Hongsheng Li, and Yu Qiao. Llama-adapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv:2303.16199, 2023a.", + "Shilong Zhang, Peize Sun, Shoufa Chen, Min Xiao, Wenqi Shao, Wenwei Zhang, Kai Chen, and Ping Luo. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023b.", + "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022.", + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36, 2024.", + "Shuai Zheng, Fan Yang, M Hadi Kiapour, and Robinson Piramuthu. Modanet: A large-scale street fashion dataset with polygon annotations. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1670-1678, 2018.", + "Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127:302-321, 2019.", + "Tianfei Zhou, Wenguan Wang, Si Liu, Yi Yang, and Luc Van Gool. Differentiable multi-granularity human representation learning for instance-aware human semantic parsing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 1622-1631, 2021.", + "Chaoyang Zhu, Yiyi Zhou, Yunhang Shen, Gen Luo, Xingjia Pan, Mingbao Lin, Chao Chen, Liujuan Cao, Xiaoshuai Sun, and Rongrong Ji. Seqtr: A simple yet universal network for visual grounding. In European Conference on Computer Vision, pp. 598-615. Springer, 2022." + ], + "bbox": [ + 171, + 102, + 826, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023.", + "Xueyan Zou, Zi-Yi Dou, Jianwei Yang, Zhe Gan, Linjie Li, Chunyuan Li, Xiyang Dai, Harkirat Behl, Jianfeng Wang, Lu Yuan, et al. Generalized decoding for pixel, image, and language. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15116-15127, 2023.", + "Xueyan Zou, Jianwei Yang, Hao Zhang, Feng Li, Linjie Li, Jianfeng Wang, Lijuan Wang, Jianfeng Gao, and Yong Jae Lee. Segment everything everywhere all at once. Advances in Neural Information Processing Systems, 36, 2024." + ], + "bbox": [ + 171, + 102, + 825, + 263 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 102, + 299, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.1 LIMITATION", + "text_level": 1, + "bbox": [ + 171, + 133, + 303, + 148 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "While PACO-LVIS provides diverse and comprehensive object-part mask annotations for common objects, it lacks information on the human class and its parts. Consequently, our question-answer pairs generated based on PACO-LVIS do not consider reasoning about human class and its parts, which is a drawback. Therefore, there is a need for future dataset expansion to include a wider range of objects and parts that exist in real-world environments. Additionally, although we carefully design the prompts to ensure the diversity and quality of the dataset, the content of the question-answer pairs is inherently dependent on the pre-trained knowledge of ChatGPT.", + "bbox": [ + 169, + 161, + 823, + 258 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.2 ETHICS CONCERN", + "text_level": 1, + "bbox": [ + 171, + 277, + 346, + 291 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The MMR dataset is constructed based on the publicly available PACO-LVIS dataset (Ramanathan et al., 2023), which helps mitigate privacy concerns. As the objects and parts within the images are already annotated, we only add text question-answer pairs, ensuring that potential privacy issues remain minimal. These question-answer pairs are generated using the ChatGPT/GPT-4V API (Achiam et al., 2023). While there is a risk of bias from the training data of the ChatGPT/GPT-4V API, we have implemented a thorough data filtering process to remove any ethically problematic content.", + "bbox": [ + 169, + 303, + 823, + 388 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.3 LICENSE", + "text_level": 1, + "bbox": [ + 171, + 405, + 281, + 419 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We utilize the released code from LISA (Lai et al., 2023) for the baseline model code construction. Since LISA follows Apache License 2.0, our code is also licensed under Apache License 2.0. Additionally, the PACO-LVIS dataset is licensed under a Creative Commons Attribution 4.0 (CC BY 4.0) license. Consequently, our MMR dataset is also licensed under Creative Commons Attribution 4.0 (CC BY 4.0). To download the PACO-LVIS dataset (Ramanathan et al., 2023), we utilize author-released code under the MIT license. We use ChatGPT/GPT-4V API (Achiam et al., 2023) developed by OpenAI to generate the question-answer pairs in the MMR dataset. Specific licensing information for the ChatGPT/GPT-4V API model is proprietary to OpenAI.", + "bbox": [ + 169, + 431, + 823, + 544 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.4 THE SPECIFIC DETAILS OF CHATGPT API", + "text_level": 1, + "bbox": [ + 171, + 561, + 514, + 575 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The specific command to use the ChatGPT API (Achiam et al., 2023) for generating question-answer pairs in MMR is as follows:", + "bbox": [ + 169, + 587, + 823, + 617 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "response = open aiCompletion.create \n( \n model=\"gpt-4-vision-preview\", \n messages=prompt, \n temperature=0.7, \n max_tokens=850, \n)", + "guess_lang": "python", + "bbox": [ + 369, + 636, + 630, + 731 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure 5: To generate question-answer pairs in MMR dataset, we use gpt-4-vision-preview model. For the hyper-parameters, we set the temperature to 0.7 and max_tokens to 850.", + "bbox": [ + 169, + 758, + 823, + 787 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.5 PROMPTS AND EXAMPLES", + "text_level": 1, + "bbox": [ + 171, + 814, + 401, + 828 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "General MMR Dataset The MMR dataset fundamentally includes multi-target (both objects and parts) answers to each question. In this section, we discuss the full prompt not covered in the main manuscript. Fig. 6 illustrates the prompt used to generate the train, validation, and test datasets. Both text and image prompts are input into GPT-4V (Achiam et al., 2023), resulting in the creation of question-answer pairs that encompass various information about objects and parts. As shown in Fig. 2, the output includes a global caption and question-answer pairs for the image. The", + "bbox": [ + 169, + 839, + 823, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations and part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 427 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as \"object's part\", like \"person's body\" and \"bus's wheel\". The category names and bounding box coordinates of objects and parts are as follow:", + "bbox": [ + 174, + 102, + 823, + 160 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/24150db4c2490cc27e374eb463bb6d334ecf8dcc379e7e6b81cb2cbbca7a6563.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 468, + 167, + 772, + 325 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "```c\n```\nbottle_1 [459.07, 0.0, 603.49, 315.78];\nbottle_1's label [460, 105, 500, 282];\nbottle_1's neck [470, 0, 593, 62];\nbottle_1's shoulder [461, 56, 603, 103];\nbottle_1's body [460, 94, 604, 291];\nbottle_1's base [463, 287, 596, 316];\nbottle_2 [296.85, 1.15, 416.19, 242.2];\nbottle_2's base [307, 220, 400, 241];\nbottle_2's label [300, 4, 413, 176];\nbottle_2's body [307, 172, 403, 231];\nknife_1 [204.79, 2.4, 238.63, 226.53];\nknife_1's blade [213, 121, 237, 226];\nknife_1's handle [205, 2, 239, 126];\nknife_2 [304.84, 320.65, 615.34, 427.0\nknife_2's blade [305, 321, 601, 422];\nknife_2's handle [529, 399, 616, 426];", + "bbox": [ + 174, + 166, + 346, + 328 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects and parts given above. However, when the corresponding object and part name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects and object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:", + "bbox": [ + 174, + 339, + 821, + 405 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) the relationship between parts within the image or the relationship between a part and an object.", + "2) the function or the general information about the parts." + ], + "bbox": [ + 176, + 405, + 609, + 422 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part or object must be attached to the object name or part name in the format: object_1[x1, y1, x2, y2] and object_1's part [x1, y1, x2, y2]. Do not use other format such as \"a part of object_1\". Here are some additional requirements about generated question and answers:", + "bbox": [ + 174, + 431, + 821, + 494 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image.", + "2. Do not ask the question you are not confident to answer. Only include question that have definite answer.", + "3. Do not mention the coordinates of a part and an object directly in the question.", + "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences.", + "5. The question should describe a complete activity, a function, or general information.", + "6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part and the object. Implied other potential parts is strictly prohibited.", + "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs.", + "8. Avoid using incorrectly formatted object names or part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the object and part information given above. ### For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]].", + "9. All generated answers must include the given object or part information, without changing the format." + ], + "bbox": [ + 176, + 496, + 816, + 614 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure 6: The text and image prompt used in our data creation for MMR dataset with GPT-4V.", + "text_level": 1, + "bbox": [ + 187, + 642, + 807, + 657 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "segmentation mask information for the objects or parts mentioned in the answers is sourced from PACO-LVIS (Ramanathan et al., 2023) to create new annotations.", + "bbox": [ + 171, + 686, + 823, + 715 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Part-only MMR Test Dataset The MMR dataset includes a substantial amount of information on parts to enhance part-level recognition, which has been overlooked in existing reasoning segmentation datasets. Consequently, we create a part-level test dataset to evaluate part-level recognition separately. Using the text and image prompts shown in Fig. 7, we generate a part-only test dataset from 2000 images with extensive part-level information from PACO-LVIS annotations. As shown in Fig. 8, the output includes a global caption and question-answer pairs for the image. The segmentation mask information for the parts mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations.", + "bbox": [ + 169, + 734, + 826, + 847 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Object-only MMR Test Dataset To evaluate recognition separately for object-level, we create an MMR test dataset that includes only information on objects. We generate an object-only test dataset using the text and image prompts shown in Fig. 9, selecting 2000 images with minimal part-level information. As shown in Fig. 10, the output includes a global caption and question-answer pairs for", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object's part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 428 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as \"object's part\", like \"person's body\" and \"bus's wheel\". The category names and bounding box coordinates of parts are as follow:", + "bbox": [ + 174, + 237, + 820, + 292 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/25f739dd3a4a947b306e9cb4eb83141e8bb97b0f0b8399766bf64388dd9abc7a.jpg", + "image_caption": [ + "Figure 7: The text and image prompt used in our data creation for the part-only MMR test dataset with GPT-4V." + ], + "image_footnote": [], + "bbox": [ + 493, + 297, + 754, + 431 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "```\n```\ndog_1's eye [235, 67, 291, 100];\ndog_1's ear [324, 36, 426, 145];\ndog_1's nose [184, 98, 212, 127];\ndog_1's teeth [245, 146, 285, 171];\ndog_1's head [169, 20, 427, 202];\ndog_1's foot [337, 204, 510, 407];\ndog_1's leg [212, 95, 542, 356];\ndog_1's body [243, 20, 503, 328];\nbowl_1's rim [143, 298, 369, 378];\nbowl_1's inner_body [150, 302, 361,\nbowl_1's bottom [194, 362, 308, 376\nbowl_1's body [153, 351, 354, 422];", + "bbox": [ + 174, + 299, + 336, + 420 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of object's parts given above. However, when the corresponding object's part name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:", + "bbox": [ + 174, + 436, + 818, + 491 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1) the relationship between different parts within the image. 2) the function or the general information about the parts.", + "bbox": [ + 176, + 493, + 436, + 506 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part must be attached to the part name in the format: object_1's part [x1, y1, x2, y2]. Do not use other format such as \"a part of object_1. Here are some additional requirements about generated question and answers:", + "bbox": [ + 174, + 517, + 815, + 571 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image.", + "2. Do not ask the question you are not confident to answer. Only include question that have definite answer.", + "3. Do not mention the coordinates of a part and an object directly in the question.", + "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences.", + "5. The question should describe a complete activity, a function, or general information.", + "6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part. Implied other potential parts is strictly prohibited.", + "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs.", + "8. Avoid using incorrectly formatted part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the part information given above. ## For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]].", + "9. All generated answers must include the given part information, without changing the format.", + "10. When creating questions, ask only questions about the object's parts given above without directly mentioning the part name in the question. Please keep in mind that other parts should not dominate the answer.", + "11. If the number of object's parts given for an image is large enough, create a question so that each round's answer includes different object's parts.", + "12. Do not create questions that are answered by parts other than the part information given above.", + "13. If that part doesn't directly answer the question, do not mention it in the answer." + ], + "bbox": [ + 176, + 571, + 816, + 734 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "the image. The segmentation mask information for the objects mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/5ec30dc5b184ebbebb46389d3aa5a525ef1b4b56476504c52bca311256093ad4.jpg", + "image_caption": [ + "Figure 8: An example from the part-only MMR test dataset generated through the prompt in Fig. 7. This example includes information of some object's parts. The left and right pictures show the original image and part-level segmentation masks, respectively." + ], + "image_footnote": [ + "Caption: A dog is sitting next to a bowl, possibly after a drink of water or a meal.", + "Question1: What features of the animal suggest that it may have just had a drink?", + "Answer1: The dog 1's nose [184, 98, 212, 127] seems moist, and this often indicates the dog has just been drinking. In addition, the presence of a bowl 1's rim [143, 298, 369, 378] next to the dog suggests that it could have contained water or food that the dog might have consumed.", + "Question2: Considering the animal's current position, which part seems to be supporting its weight?", + "Answer2: The dog_1's leg [212, 95, 542, 356] and the dog_1's foot [337, 204, 510, 407] are likely bearing the most weight, considering the dog's sitting position adjacent to the bowl_1's body [153, 351, 354, 422].", + "Question3: If the animal were to listen for approaching sounds, which physical feature would be most engaged?", + "Answer3: The dog_1's ear [324, 36, 426, 145] would be most engaged in detecting sounds as ears are responsible for auditory perception in dogs." + ], + "bbox": [ + 176, + 145, + 823, + 313 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.6 DATA FORMAT", + "text_level": 1, + "bbox": [ + 171, + 522, + 321, + 535 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The MMR dataset is given in JSON format. The JSON file for each instance is organized as shown in Fig. 11.", + "bbox": [ + 169, + 547, + 823, + 578 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/29ddc08ffe7a3ed20803202d06a14a5728b0071d5ae94c19106e4e5dc3234b80.jpg", + "table_caption": [ + "Table 6: The effect of multiple [SEG] Tokens and Early Local Feature Fusion in $\\mathbf{M}^2\\mathbf{SA}-7\\mathbf{B}$ on MMR benchmark. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings." + ], + "table_footnote": [], + "table_body": "
multiple [SEG] TokensEarly Local Feature Fusionvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
××19.431.634.741.88.013.119.527.1
×26.047.739.555.411.725.228.445.2
27.948.541.055.613.527.031.046.8
", + "bbox": [ + 176, + 664, + 823, + 736 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.7 EFFECTIVENESS OF THE MULTIPLE [SEG] TAXENS AND EARLY LOCAL FEATURE FUSION", + "text_level": 1, + "bbox": [ + 171, + 771, + 779, + 799 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We conduct an ablation study to verify the effectiveness of the multiple [SEG] tokens and Early Local Feature Fusion proposed in $\\mathbf{M}^2\\mathbf{SA}$ . Tab. 6 demonstrates that merely adding multiple [SEG] tokens results in significant performance improvements in MMR evaluation benchmarks. This finding suggests that using single [SEG] tokens in the LISA is inadequate to fully capture the segmentation capability. Moreover, performance improvements are evident when Early Local Feature Fusion is incorporated. Notably, there is a substantial performance enhancement in the part-only evaluation setting of the MMR test set. This improvement likely arises because Early Layer features contain local detail information (e.g., edges or boundaries), which aids in part and fine-level segmentation.", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 333 and 500, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. The category names and bounding box coordinates of objects are as follow:", + "bbox": [ + 174, + 261, + 812, + 310 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/7cb98bd825e0ff0d51981b9af1c68c0d0b8b01824220fdb5bb54d96a5b1df5e3.jpg", + "image_caption": [ + "Figure 9: The text and image prompt used in our data creation for the object-only MMR test dataset with GPT-4V." + ], + "image_footnote": [], + "bbox": [ + 537, + 315, + 754, + 426 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "mirror_1 [304.9, 35.42, 476.09, 146.49]; \npillow_1 [169.86, 180.73, 221.21, 229.7]; \npillow_2 [370.81, 175.9, 436.81, 231.85];", + "bbox": [ + 176, + 362, + 356, + 391 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects given above. However, when the corresponding object name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:", + "bbox": [ + 174, + 435, + 818, + 489 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "1) the relationship between objects within the image. \n2) the function or the general information about the objects.", + "bbox": [ + 176, + 489, + 439, + 508 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object. In other words, it's important to make the question challenging by not directly including visual content details. Some of the answers of the rounds should include multiple different types of objects. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target object must be attached to the object name in the format: object_1[x1, y1, x2, y2]. Here are some additional requirements about generated question and answers:", + "bbox": [ + 174, + 517, + 816, + 574 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image.", + "2. Do not ask the question you are not confident to answer. Only include question that have definite answer.", + "3. Do not mention the coordinates of an object directly in the question.", + "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences.", + "5. The question should describe a complete activity, a function, or general information.", + "6. The answer to the generated question should include at least two objects and explicitly describe the names of the object. Implied other potential objects is strictly prohibited.", + "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs.", + "8. Avoid using incorrectly formatted object names, such as located at [coordinates] or an object_1 [object_1 [coordinates]]. In other words, use it as it appears in the object information given above.", + "9. All generated answers must include the given object information, without changing the format.", + "10. When creating questions, ask only questions about the objects given above without directly mentioning the object name in the question. Please keep in mind that other objects should not dominate the answer.", + "11. If the number of objects given for an image is large enough, create a question so that each round's answer includes different objects.\"" + ], + "bbox": [ + 176, + 580, + 818, + 717 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/9fe2a7877d3f23e47deec1f7c697a3c4f4090e80d25a2f9730e1cac4cb517f93.jpg", + "image_caption": [ + "Figure 10: An example from the object-only MMR test dataset generated through the prompt in Fig. 9. This example includes information of objects. The left and right pictures show the original image and object-level segmentation masks, respectively." + ], + "image_footnote": [], + "bbox": [ + 176, + 130, + 823, + 297 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Caption: A cozy living room interior with a large mirror on the wall and decorative pillows on furniture.", + "bbox": [ + 178, + 299, + 611, + 309 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Question1: Where might someone rest their head while sitting on the furniture to gain extra comfort?", + "bbox": [ + 178, + 316, + 609, + 325 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Anr: a t r comfort while sitting on the furniture.", + "bbox": [ + 178, + 325, + 808, + 344 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Question2: In what part of the room could someone check their appearance before leaving the house?", + "bbox": [ + 178, + 352, + 619, + 362 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Answer2: Someone could check their appearance in the mirror_1 [304.9, 35.42, 476.09, 146.49], which is located on the wall.", + "bbox": [ + 178, + 362, + 715, + 371 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Question3: If a person were to rearrange the decorative cushions, which items would they be handling?", + "bbox": [ + 178, + 378, + 624, + 388 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Answer3: If a person were to rearrange the decorative cushions, they would be handling pillow_1 [169.86, 180.73, 221.21, 229.7] and pillow_2 [370.81, 175.9, 436.81, 231.85].", + "bbox": [ + 178, + 388, + 790, + 407 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/d77ec1a7075ac8a0007df97381c66350ab54740ba78c5a9a039a6d9e2c909c36.jpg", + "image_caption": [ + "Figure 11: MMR dataset format" + ], + "image_footnote": [], + "bbox": [ + 176, + 545, + 823, + 854 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/3d73614a9c442d59351a50ed39e49728c94da6cb8d0b2dfbb31559ef802203d8.jpg", + "table_caption": [ + "Table 7: Comparison of computational complexity on LISA, GSVA, and GLaMM, and ${\\mathrm{M}}^{2}\\mathrm{{SA}}$ ." + ], + "table_footnote": [], + "table_body": "
MethodsGPU Memory Usage (GB)TFLOPs
LISA-7B (Lai et al., 2023)30.5832.59
GSVA-7B (Xia et al., 2024)30.39203.77
GLaMM (Rasheed et al., 2024)17.14349.28
M2SA-7B30.6032.62
LISA-Llama2-13B (Lai et al., 2023)55.2056.64
M2SA-Llama2-13B55.2356.67
", + "bbox": [ + 233, + 132, + 764, + 237 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.8 TRAINING TIME", + "text_level": 1, + "bbox": [ + 171, + 268, + 331, + 282 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The training takes approximately 40 hours for the $\\mathbf{M}^2\\mathrm{SA - 7B}$ and about 52 hours for the $\\mathbf{M}^2\\mathrm{SA - }$ Llama2-13B, respectively.", + "bbox": [ + 169, + 295, + 823, + 325 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.9 COMPUTATIONAL COMPLEXITY", + "text_level": 1, + "bbox": [ + 171, + 342, + 439, + 357 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We aim to compare the computational complexity of the proposed $\\mathbf{M}^2\\mathrm{SA}$ with LISA, GSVA, and GLaMM. For this comparison, we measure GPU memory usage and TFLOPs. As shown in Tab. 7, while the addition of Early Local Feature Fusion and multiple [SEG] tokens leads to a slight increase in GPU memory usage and TFLOPs, $\\mathbf{M}^2\\mathrm{SA}$ demonstrates a significant improvement in handling multiple targets and fine-grained part-level segmentation compared to LISA. However, despite these performance improvements, there is still room for enhancement from the perspective of computational efficiency. Since $\\mathbf{M}^2\\mathrm{SA}$ is built upon both MLLM and SAM, it requires substantial memory resources. Future research could focus on optimizing the efficiency of the mask decoder, which predicts the final mask by integrating vision and language information.", + "bbox": [ + 169, + 369, + 826, + 494 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/2e464580e9e1dd1464f9ef076f2348b3b1cdbe0b94c2b7f536ca87733c66910c.jpg", + "table_caption": [ + "Table 8: Multi-object referring segmentation results on GTAV and Cityscapes validation sets. We adopt mIoU metric for comparison. We evaluate the zero-shot performance of LISA, GSVA, GLaMM, and $\\mathbf{M}^2\\mathbf{SA}$ . The best results are highlighted in bold." + ], + "table_footnote": [], + "table_body": "
MethodsGTAV-valCityscapes-val
LISA-7B (Lai et al., 2023)3.76.1
GSVA-7B (Xia et al., 2024)15.714.6
GLaMM (Rasheed et al., 2024)12.612.6
M2SA-7B35.141.3
LISA-Llama2-13B (Lai et al., 2023)2.43.4
M2SA-Llama2-13B38.244.0
", + "bbox": [ + 267, + 566, + 730, + 671 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.10 GENERALIZATION ON UNSEEN DATA", + "text_level": 1, + "bbox": [ + 171, + 703, + 483, + 717 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To assess $\\mathbf{M}^2\\mathrm{SA}$ 's generalization to unseen data, we conduct additional experiments. Although OVPARTS (Wei et al., 2024) was recently proposed for open-vocabulary part-level segmentation using Pascal-Part (Chen et al., 2014) and ADE20K (Zhou et al., 2019), both datasets were used during training. Therefore, we evaluate the model's generalization performance using semantic segmentation datasets from driving scenes, specifically Cityscapes (Cordts et al., 2016) and GTAV (Richter et al., 2016), which were not used during training and pose a more challenging test environment. Since these datasets lack part-level mask annotations, we focus on evaluating multi-target object cases. Furthermore, we curate custom text prompts using predefined category names as they do not provide corresponding text queries. For each query, we randomly select 4 to 6 object categories from an image and create prompts such as \"Can you segment the class 1, class 2, ..., and class n?\" The model generates masks for the specified objects, and we compute the mIoU score to compare its performance with LISA. As shown in Tab. 8, $\\mathbf{M}^2\\mathrm{SA}$ performs robustly even on datasets from entirely different domains. Notably, while the existing methods struggle with multi-target cases, $\\mathbf{M}^2\\mathrm{SA}$ handles them effectively. This demonstrates that the use of multiple [SEG] tokens, combined", + "bbox": [ + 169, + 729, + 826, + 925 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/07decf95af415a3da631f490c7ade3a9745b2bb0391c6ea66e340caaa63f3043.jpg", + "table_caption": [ + "Table 9: Comparison between LISA-7B (Lai et al., 2023) trained on MMR dataset and LISA-7B trained on ReasonSeg (Lai et al., 2023). We measure the performance on ReasonSeg validation set" + ], + "table_footnote": [], + "table_body": "
MethodsgIoUcIoU
LISA-7B w/ ReasonSeg44.446.0
LISA-7B w/ MMR49.955.6
", + "bbox": [ + 354, + 143, + 643, + 191 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "with early local feature fusion, enables $\\mathbf{M}^2\\mathbf{SA}$ to generalize well to unseen domains by improving its ability to manage multi-target cases and fine-grained segmentation tasks.", + "bbox": [ + 169, + 223, + 823, + 252 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A.11 MMR AND REASONSEG", + "text_level": 1, + "bbox": [ + 171, + 268, + 397, + 282 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To validate the comprehensiveness and effectiveness of the MMR dataset, we conduct a comparative evaluation with ReasonSeg using the LISA-7B model. Specifically, we train the model in two configurations: one using ReasonSeg and the other using MMR instead of ReasonSeg. As shown in Tab. 9, the model trained on MMR shows superior performance on the ReasonSeg validation set than the model trained on ReasonSeg. This improvement highlights the comprehensiveness of the MMR dataset. By incorporating multi-target and part-level annotations alongside object-level data, MMR provides a more robust knowledge for addressing complex reasoning segmentation tasks.", + "bbox": [ + 169, + 295, + 823, + 393 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/1c6932d80c07652a78d886872a4dd1afc5ca8809e1a16475f4ada3ac643a9465.jpg", + "table_caption": [ + "Table 10: Performance of M2SA on frequently appearing and infrequently appearing object categories. From the total of 75 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively." + ], + "table_footnote": [], + "table_body": "
MethodsMMR test
Obj-only (total)Obj-only (upper)Obj-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B41.055.641.054.839.439.7
", + "bbox": [ + 272, + 477, + 728, + 537 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/3216553d07de328f5daf032f72cf7a31f3d843795076dd6ec93c97a4adca5f81.jpg", + "table_caption": [ + "Table 11: Performance of M2SA on frequently appearing and infrequently appearing part categories. From the total of 445 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively." + ], + "table_footnote": [], + "table_body": "
MethodsMMR test
Part-only (total)Part-only (upper)Part-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B13.527.012.824.813.328.1
", + "bbox": [ + 272, + 635, + 728, + 694 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A.12 ANALYSIS OF THE LONG-TAIL PHENOMENON IN M2SA", + "text_level": 1, + "bbox": [ + 171, + 723, + 612, + 738 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To investigate whether $\\mathbf{M}^2\\mathrm{SA}$ trained on the MMR dataset exhibits a long-tail phenomenon, we evaluate its performance on frequently and infrequently occurring object and part categories. To this end, we construct subsets of the MMR test set by isolating question-answer pairs based on category frequency. Specifically, we extract the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories for both object-only and part-only test sets. This results in four subsets: object-only (upper: 10/75), object-only (lower: 10/75), part-only (upper: 10/445), and part-only (lower: 10/445). The MMR dataset includes a total of 75 object categories and 445 part categories, respectively. The performance comparison is shown in Tab. 10 and Tab. 11.", + "bbox": [ + 169, + 750, + 825, + 861 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "For the object-only dataset, $\\mathbf{M}^2\\mathbf{SA}$ 's performance on frequently occurring (upper) object categories closely aligns with its overall performance across all object categories (gIoU: 41.0, cIoU: 54.8 vs. gIoU: 41.0, cIoU: 55.6). However, for infrequent object categories (lower), the performance declines, with cIoU dropping from 55.6 to 39.7 and gIoU from 41.0 to 39.4. In contrast, for the", + "bbox": [ + 169, + 868, + 823, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "part-only dataset, $\\mathbf{M}^2\\mathrm{SA}$ demonstrates consistent performance across both frequent and infrequent categories. The gIoU scores are 12.8 (upper), 13.3 (lower), and 13.5 (overall), while the cIoU scores are 24.8 (upper), 28.1 (lower), and 27.0 (overall). These findings suggest that $\\mathbf{M}^2\\mathrm{SA}$ is less sensitive to the long-tail distribution in part categories than in object categories.", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "This analysis highlights the strengths and limitations of $\\mathbf{M}^2\\mathrm{SA}$ when addressing long-tail distributions. While $\\mathbf{M}^2\\mathrm{SA}$ demonstrates robust performance across frequent and infrequent part categories, its reduced performance on infrequent object categories indicates potential areas for improvement. Future work could explore strategies to mitigate the impact of long-tail distributions in object categories while preserving its strengths in part-level reasoning tasks.", + "bbox": [ + 169, + 166, + 823, + 238 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "A.13 QUALITATIVE RESULTS", + "text_level": 1, + "bbox": [ + 171, + 253, + 392, + 268 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Qualitative results of $\\mathbf{M}^2\\mathbf{SA}$ on the MMR benchmark are visualized in Fig. 12, Fig. 13, and Fig. 14.", + "bbox": [ + 169, + 279, + 823, + 295 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/780bb26b58d04d0ce06307e4a065a037cc9ae0f76ca4642cc1999d365355bba6.jpg", + "image_caption": [ + "Figure 12: Qualitative result of $\\mathbf{M}^2\\mathrm{SA}$ on MMR test set." + ], + "image_footnote": [], + "bbox": [ + 183, + 313, + 815, + 520 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7b400df9ac2094828eb2b7cbbbccba0f1161001ec9e031962516e85228822e7f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 187, + 534, + 241, + 575 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Question: If someone wants to send an e-mail, which equipments on the desk would they be utilizing?", + "bbox": [ + 250, + 537, + 718, + 575 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/f87b1455be084fe49fafc094a3ab25add745daccf0b46690442fe1cde225d8f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 595, + 241, + 638 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Answer: They would be utilizing the laptop_computer for typing and viewing the screen, and the mouse for navigating and interacting with the computer.", + "bbox": [ + 254, + 598, + 785, + 656 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/51f912496d4858a857465483c94ca2327c9526cb16cccd5e558cb4d25ef277b2.jpg", + "image_caption": [ + "Image" + ], + "image_footnote": [], + "bbox": [ + 183, + 140, + 815, + 325 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/73ce8ea7b46495d2a5ef432c1dd123d83b993d3b8e847c67c8b4cf55e1719b32.jpg", + "image_caption": [ + "Figure 13: Qualitative result of $\\mathbf{M}^2\\mathbf{SA}$ on MMR test set." + ], + "image_footnote": [], + "bbox": [ + 192, + 338, + 240, + 378 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Question: Where could someone sit while waiting for transportation, and which part provides support for their back?", + "bbox": [ + 250, + 339, + 805, + 378 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/d836da9c1793237c660f8e73f4567737cbd798e73a218a7c0fe2eef6f9998711.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 398, + 240, + 441 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Answer: The bench's seat provides a place to sit, and the bench's back offers support for the back", + "bbox": [ + 254, + 398, + 759, + 439 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/9bd09c601ae16a1d155426840c2c9f0a133d5256b0e47e2df2f60bdb65dae879.jpg", + "image_caption": [ + "Image" + ], + "image_footnote": [], + "bbox": [ + 183, + 556, + 815, + 742 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/58908b471a9e25e9ab603d4d78941437acd61ebd7534794de643b0305ce333e3.jpg", + "image_caption": [ + "Figure 14: Qualitative result of $\\mathbf{M}^2\\mathrm{SA}$ on MMR test set." + ], + "image_footnote": [], + "bbox": [ + 192, + 752, + 240, + 792 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Question: If I need to check how much time is left before my meal is ready, which part of this appliance should I look at?", + "bbox": [ + 250, + 753, + 782, + 792 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/1b1e851165818a2027e98d3a46c169232b0f133a88469a1ed9ccbad48b6bfcef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 197, + 811, + 240, + 856 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Answer: You should look at the microwave_oven's time_display to check the remaining time.", + "bbox": [ + 254, + 814, + 699, + 853 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "A.14 ADDITIONAL EXAMPLES OF MMR", + "text_level": 1, + "bbox": [ + 171, + 104, + 470, + 118 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "To facilitate a quick and intuitive understanding of the MMR dataset's characteristics, we present additional examples in Figure 15.", + "bbox": [ + 169, + 128, + 826, + 159 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e2251577118d3f5db074751bf55d95dcfbb091ce7441c2b75eb649453c5695a9.jpg", + "image_caption": [], + "image_footnote": [ + "Global Caption: A laptop is opened and set on a table next to a computer mouse, suggesting a typical workspace setup.", + "Question1: If one were to begin typing a document, which two areas of this device would they interact with first?", + "Answer: They would primarily interact with the laptop_computer_1's keyboard [195, 276, 418, 325] to type and laptop_computer_1's touchpad [113, 290, 231, 312] to navigate within the document.", + "Question2: Where can one find the manufacturer's branding on the devices pictured here?", + "Answer2: The manufacturer's branding can be found on the laptop_computer_1's logo [354, 281, 370, 288] and on the mouse_(computer_equipment)_1's logo [314, 403, 345, 416]." + ], + "bbox": [ + 305, + 181, + 687, + 292 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/158ac0b1214dc77ea85da060158af18dc79beb3e86b9df0e817e0f798e34d4dd.jpg", + "image_caption": [ + "Figure 15: Additional Examples of MMR dataset." + ], + "image_footnote": [ + "Question3: To move the cursor on the screen without touching the laptop, which part of the computer equipment would one use?", + "Answer3: One would use the mouse (.computer_equipment) 1's body [260, 379, 516, 477] along with either the mouse (.computer_equipment) 1's left button [413, 380, 480, 401] or mouse (.computer_equipment) 1's right button [451, 393, 519, 429] to click and interact with the cursor on the screen.", + "Question4: After finishing work and deciding to pack up, which two parts of the laptop would come into contact?", + "Answer4: When closing the laptop, laptop_computer_1's screen [295, 34, 510, 305] would come into contact with laptop_computer_1's base-panel [77, 271, 479, 352].", + "Global Caption: A plate with a slice of quiche and a side of home fries is ready to be eaten, with a knife resting on the side.", + "Question1: During a meal, what would you typically use to cut a portion of food and how is it structured for ease of use?", + "Answer 1: You would typically use a knife_1 [10.27, 86.49, 258.23, 115.61] to cut a portion of food. It is structured with a knife_1's blade [10, 92, 150, 115] for slicing through food and a knife_1's handle [150, 87, 254, 109] to provide a comfortable grip for handling.", + "Question2: If I wanted to contain a main dish and sides separately on a table, what items could effectively serve this purpose?" + ], + "bbox": [ + 305, + 416, + 687, + 527 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/12341a737e9db676387e93334d996b5f288beff9471119476b53f4bc40c8980c.jpg", + "image_caption": [], + "image_footnote": [ + "Answer2: To contain a main dish and sides separately, you could use plate_1[33.38, 74.25, 640.0, 480.0], which has a plate_1's inner_wall [33, 75, 639, 479] to hold the food and prevent it from spilling, and a separate plate_2[0.0, 23.86, 145.25, 200.7] with plate_2's inner_wall [0, 28, 141, 190] and plate_2's rim [0, 24, 145, 201] to hold another portion, like sides or appetizers.", + "Question3: What part of the tableware should one be cautious of to avoid spills while serving food?", + "Answer: 3: One should be cautious of the plate_1's inner_wall [33, 75, 639, 479] of a plate_1 [33, 38, 74, 25, 640, 480,0] and the plate_2's inner_wall [0, 28, 141, 190] of a plate_2 [0.0, 23.86, 145, 25, 200.7] to avoid spillps, as these parts help to contain the food within the boundaries of the plates.", + "Global Caption: A dog wearing a hat is resting on a pillow.", + "Question1: Where would this animal most likely register scents and how would it express alertness or curiosity?", + "Answer: This animal would most likely register scents using its dog_1's nose [175, 206, 221, 246], and express alertness or curiosity by adjusting the position of its dog_1's ear [329, 101, 398, 212] and dog_1's head [175, 92, 397, 280].", + "Question2: Can you describe the area that supports the dog while it's lying down?", + "Answer 2: The area that supports the dog while it's lying down is [218, 202, 514, 374], particularly emphasized where the dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] make contact with the pillow_1 [5.32, 268.85, 632.27, 427.0].", + "Question3: When this canine looks around its environment, what are the primary features involved in its vision?", + "Answer3: The primary features involved in this canine's vision when looking around its environment are the dog 1's eye [201, 145, 294, 177] and the movement of the dog 1's head [175, 92, 397, 280].", + "Question4: In the case of this dog getting up from its resting position, which parts would engage initially to lift its body?", + "Answer: To get up from its resting position, the dog would initially engage its dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] to lift its dog_1's body [218, 202, 514, 374]." + ], + "bbox": [ + 285, + 641, + 712, + 753 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 25 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_model.json b/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2b51913b75d441b69fd58d35ee7b0784db03b527 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_model.json @@ -0,0 +1,4586 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.175 + ], + "angle": 0, + "content": "MMR: A LARGE-SCALE BENCHMARK DATASET FOR MULTI-TARGET AND MULTI-GRANULARITY REASONING SEGMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.195, + 0.691, + 0.224 + ], + "angle": 0, + "content": "Donggon Jang* Yucheol Cho* Suin Lee Taehyeon Kim Dae-Shik Kim† \nDepartment of Electrical Engineering, KAIST" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.224, + 0.726, + 0.24 + ], + "angle": 0, + "content": "{jdg900,yc_cho,suinlee,rlaxogus0814,daeshik}@kaist.ac.kr" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.275, + 0.548, + 0.29 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.306, + 0.77, + 0.655 + ], + "angle": 0, + "content": "The fusion of Large Language Models (LLMs) with vision models is pioneering new possibilities in user-interactive vision-language tasks. A notable application is reasoning segmentation, where models generate pixel-level segmentation masks by comprehending implicit meanings in human instructions. However, seamless human-AI interaction demands more than just object-level recognition; it requires understanding both objects and the functions of their detailed parts, particularly in multi-target scenarios. For example, when instructing a robot to \"turn on the TV\", there could be various ways to accomplish this command. Recognizing multiple objects capable of turning on the TV, such as the TV itself or a remote control (multi-target), provides more flexible options and aids in finding the optimized scenario. Furthermore, understanding specific parts of these objects, like the TV's button or the remote's button (part-level), is important for completing the action. Unfortunately, current reasoning segmentation datasets predominantly focus on a single target object-level reasoning, which limits the detailed recognition of an object's parts in multi-target contexts. To address this gap, we construct a large-scale dataset called Multi-target and Multi-granularity Reasoning (MMR). MMR comprises 194K complex and implicit instructions that consider multi-target, object-level, and part-level aspects, based on pre-existing image-mask sets. This dataset supports diverse and context-aware interactions by hierarchically providing object and part information. Moreover, we propose a straightforward yet effective framework for multi-target, object-level, and part-level reasoning segmentation. Experimental results on MMR show that the proposed method can reason effectively in multi-target and multi-granularity scenarios, while the existing reasoning segmentation model still has room for improvement. The dataset is available at https://github.com/jdg900/MMR." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.681, + 0.338, + 0.696 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.713, + 0.827, + 0.853 + ], + "angle": 0, + "content": "Human-machine interaction is a key focus in AI for real-world applications, driving interest in multimodal perception models that integrate vision and language modalities. The model perceives the context within the image related to explicit text query inputs and predicts pixel-level masks or bounding boxes accordingly. For example, Open Vocabulary Segmentation (OVS) (Liang et al., 2023; Cho et al., 2023; Xu et al., 2023), leveraging models like CLIP (Radford et al., 2021), generates segmentation masks from open-set text categories. Similarly, Referring Expression Segmentation (RES) (Wang et al., 2023; Hu et al., 2023; Liu et al., 2023a; Yang et al., 2022) predicts the segmentation mask corresponding to the objects referenced by the text input within the image. However, these models encounter challenges with implicit and complex text queries, limiting their effectiveness in real-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.858, + 0.825, + 0.888 + ], + "angle": 0, + "content": "The emergence of Large Language Models (LLMs) (Zheng et al., 2024; Roumeliotis & Tselikas, 2023; Achiam et al., 2023; Zhang et al., 2023a) offers promising solutions to this challenge. Recent" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.338, + 0.924 + ], + "angle": 0, + "content": "*Equal Contribution †Corresponding Author" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.26, + 0.058, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.13881v1 [cs.CV] 18 Mar 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.175 + ], + "angle": 0, + "content": "studies (Bai et al., 2023; Li et al., 2023; Liu et al., 2024; Zhu et al., 2023; Zhang et al., 2023b; Chen et al., 2023; You et al., 2023) have witnessed that multimodal LLMs with superior reasoning capabilities can effectively perform vision tasks when given implicit text inputs. However, current multimodal LLMs primarily provide information corresponding to images or regions in text form, lacking pixel-level mask generation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.181, + 0.827, + 0.378 + ], + "angle": 0, + "content": "To address these limitations, LISA (Lai et al., 2023) introduces reasoning segmentation. Unlike previous tasks that rely on explicit text (e.g., \"steak\"), reasoning segmentation handles implicit queries that require intricate reasoning or world knowledge (e.g., \"the food with most protein\"), by combining LLMs with the Segment Anything Model (SAM) (Kirillov et al., 2023) that has robust mask generation capabilities. LISA also introduces ReasonSeg, a benchmark dataset for reasoning segmentation. ReasonSeg consists of 1,218 image-instruction pairs, each containing implicit text question-answer pairs that involve complex reasoning for each image. Nevertheless, ReasonSeg has two limitations: 1) It does not adequately address scenarios involving multiple targets, and 2) it primarily focuses on object-level reasoning, treating part-level targets ambiguously. Although the recently proposed MUSE dataset by PixelLM (Ren et al., 2023) addresses multi-target object-level reasoning, it does not consider part-level reasoning. These observations underscore that current datasets for reasoning segmentation overlook the complexities of multiple targets and part-level scenarios, concentrating instead solely on object-level reasoning. This limitation restricts more advanced functionalities in reasoning segmentation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.382, + 0.825, + 0.537 + ], + "angle": 0, + "content": "In this paper, we introduce a Multi-target and Multi-granularity Reasoning segmentation (MMR) dataset to overcome these limitations, which covers both multiple targets and fine-grained part-level reasoning. We collect image and mask annotations from the publicly available PACO-LVIS dataset (Ramanathan et al., 2023). These annotations include class names and bounding box information of objects and parts. Then, inspired by LLaVA (Liu et al., 2024), we generate intricate question-answer pairs using the GPT-4V API (Achiam et al., 2023). Through this, the MMR dataset contains a vast collection of 194K complex and implicit instructions for comprehensive reasoning segmentation. A distinguishing characteristic of the proposed MMR dataset is its ability to handle multiple objects and diverse parts in the question-answer pairs. This diverse granularity enables models to reason and comprehend complex questions about both multiple target objects and their parts within a single query, providing more meaningful and high-quality masks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.542, + 0.827, + 0.669 + ], + "angle": 0, + "content": "Moreover, we propose a simple yet effective model, Multi-target and Multi-granularity Segmentation Assistant (M²SA), for multi-target, object-level, and part-level reasoning segmentation. The M²SA model incorporates an early local feature fusion and multiple [SEG] tokens, which enables the model to enhance fine-grained visual understanding and consider multi-target segmentation. Experimental results on benchmarks, such as MMR, single-target referring expression segmentation datasets, and a multi-granularity referring expression segmentation dataset, demonstrate that M²SA outperforms existing state-of-the-art methods. We believe that our dataset and model serve as a valuable resource for potential applications in real-world reasoning segmentation tasks, offering enhanced versatility and robustness." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.674, + 0.474, + 0.689 + ], + "angle": 0, + "content": "Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.699, + 0.822, + 0.74 + ], + "angle": 0, + "content": "- We construct the MMR dataset, which includes 194K complex and implicit question pairs for multi-target, object-level, and part-level reasoning segmentation. This dataset facilitates advanced reasoning segmentation tasks in open-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.745, + 0.822, + 0.785 + ], + "angle": 0, + "content": "- We propose \\(\\mathbf{M}^2\\mathbf{SA}\\) for multi-target, object-level, and part-level reasoning segmentation. It incorporates an early local feature fusion and multiple [SEG] tokens to improve fine-grained visual understanding and segment multiple targets." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.79, + 0.822, + 0.816 + ], + "angle": 0, + "content": "- Experimental results on MMR and other benchmarks show that \\(\\mathbf{M}^2\\mathbf{SA}\\) outperforms state-of-the-art methods, validating the effectiveness of its components." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.699, + 0.822, + 0.816 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.838, + 0.346, + 0.853 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Multimodal Large Language Models Recent advancements (Peng et al., 2023; Taori et al., 2023; Touvron et al., 2023; Zhang et al., 2022) in multimodal Large Language Models (LLMs) have greatly improved the integration between language models and vision tasks by comprehensively understanding and recognizing multiple modalities. Recently proposed models such as BLIP-2 (Li et al., 2023)," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.218 + ], + "angle": 0, + "content": "Flamingo (Alayrac et al., 2022), MiniGPT-4 (Zhu et al., 2023), llama-adapter (Gao et al., 2023; Zhang et al., 2023a), LLaVA (Liu et al., 2024), InstructBLIP (Dai et al., 2024), InternGPT (Liu et al., 2023b), and QwenVL (Bai et al., 2023) have shown superiority at multimodal tasks such as visual question-answering and captioning, leveraging the multimodal understanding capability of LLMs. While these methods have demonstrated improved performance in vision-language tasks through instructional tuning, they only provide the text output about the visual target and focus on a holistic understanding of global information in the image. Therefore, their applicability is limited in tasks requiring finer-grained understanding at the pixel level." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.231, + 0.828, + 0.427 + ], + "angle": 0, + "content": "Reasoning Segmentation The task of reasoning segmentation, introduced by LISA (Lai et al., 2023), is understanding implicit text instruction and providing a corresponding mask for the answer. This task is more challenging and important than the referring expression segmentation task which deals with explicit and simple text queries. For instance, when a user wants to segment a pepper in an image, handling an implicit query like 'the food with a spicy taste' instead of a direct reference such as 'the pepper' is significant for improving human-AI interaction. To tackle this, LISA introduces ReasonSeg, a benchmark containing implicit text queries that require complex reasoning for each image. Recently, PixelLM (Ren et al., 2023), has addressed the limitation of ReasonSeg which considers only a single target in a query text. PixelLM constructs MUSE, a new dataset with multiple target objects in the text instructions. However, both studies are still limited to object-level reasoning segmentation. Methods such as GSVA (Xia et al., 2024) and GLaMM (Rasheed et al., 2024) have also been proposed, but they focus on frameworks for object-level reasoning segmentation rather than introducing new datasets. In this paper, we extend these existing tasks and propose a new benchmark dataset that considers both part-level and object-level reasoning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.44, + 0.828, + 0.637 + ], + "angle": 0, + "content": "Part-level Segmentation Recent research (Li et al., 2022; Kirillov et al., 2019; Michieli et al., 2020; Zhou et al., 2021; Pan et al., 2023) has delved into a fine-grained understanding of objects at the part-level. For the part-level visual understanding, datasets with detailed annotations for each part are required. To this end, some initial studies (Gong et al., 2017; Li et al., 2017; Yang et al., 2019; Wah et al., 2011; Jia et al., 2020; Zheng et al., 2018) have introduced datasets with part-level masks on specific domains, such as human body parts (Gong et al., 2017; Li et al., 2017; Yang et al., 2019), bird parts (Wah et al., 2011), and fashion cloth parts (Jia et al., 2020; Zheng et al., 2018). Moreover, recognizing the need for annotations on general objects, some approaches (Chen et al., 2014; Mo et al., 2019; He et al., 2022; Zhou et al., 2019; Meletis et al., 2020; Ramanathan et al., 2023; Wei et al., 2024) have extended the existing object-level datasets by including more fine-grained annotations. Furthermore, there has been an attempt (Wang et al., 2023) to extend the previous Referring Expression Segmentation (RES) task to provide part-level segmentation masks matching explicit text queries. In line with this effort, our work introduces a new dataset that includes multiple target parts and diverse implicit text queries for multi-granularity reasoning segmentation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.655, + 0.342, + 0.672 + ], + "angle": 0, + "content": "3 MMR DATASET" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.687, + 0.827, + 0.855 + ], + "angle": 0, + "content": "Current publicly available datasets for reasoning segmentation primarily emphasize object-level reasoning. Consequently, Multimodal Large Language Models (MLLMs) often struggle with questions that involve multiple targets or require reasoning at both the object- and part-levels. To address these limitations, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset. MMR includes multi-target, object-level, and part-level reasoning scenarios. This dataset comprises images and masks from the publicly available PACO dataset (Ramanathan et al., 2023), supplemented with implicit and complex question-answer pairs generated by the GPT-API (Achiam et al., 2023). Unlike existing datasets, MMR includes large-scale question-answer pairs that consider multiple target cases and require reasoning at both the object- and part-levels, enhancing its versatility and applicability. In the following sections, we detail the dataset generation process (Sec. 3.1), describe the data filtering process (Sec. 3.2), provide a statistical analysis of MMR (Sec. 3.3), and highlight its distinctiveness compared to existing datasets (Sec. 3.4)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.353, + 0.884 + ], + "angle": 0, + "content": "3.1 DATA GENERATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.927 + ], + "angle": 0, + "content": "To generate a multi-target, object-level, and part-level reasoning segmentation dataset, we leverage the PACO-LVIS dataset (Ramanathan et al., 2023). PACO-LVIS includes 456 object-specific part" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.251, + 0.104, + 0.75, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.326, + 0.726, + 0.341 + ], + "angle": 0, + "content": "Figure 1: The prompt used in our data creation process with GPT-4V." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.364, + 0.825, + 0.462 + ], + "angle": 0, + "content": "classes across 75 object categories, offering 502K part-level masks and bounding boxes annotated across 273K object-level masks and bounding boxes. By utilizing these comprehensive images and multi-granularity mask annotations, we can reduce annotation costs while ensuring detailed and accurate segmentation data. To create intricate and implicit question-answer pairs for multiple target and multi-granularity reasoning, we employ a GPT-assisted data generation scheme similar to LLaVA (Liu et al., 2024). Specifically, we adopt GPT-4V API which has robust visual understanding capabilities. Fig. 1 illustrates the entire data generation process." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.469, + 0.825, + 0.593 + ], + "angle": 0, + "content": "To guide the GPT-4V API effectively, we carefully craft prompts that include GPT role, object and part information, task prompts, and requirements. GPT role defines the persona of the GPT-4V API, informing it about the context and objectives of the data generation process. Object & part information provides comprehensive annotations, such as object and part names within the image and their corresponding bounding box coordinates. Task prompt informs the GPT-4V API about the task definition and considerations for generating question-answer pairs. Requirements set the rules and patterns that the GPT-4V API should follow when generating question-answer pairs (e.g., \"questions should avoid direct mention of coordinates of objects or parts\" or \"Q&A pairs should contain multiple objects or parts\"). Please see the Appendix A.5 for the detailed prompt." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.6, + 0.825, + 0.685 + ], + "angle": 0, + "content": "The GPT-4V-assisted data generation follows a two-step process: 1) Global Caption Generation: GPT-4V API first generates a global caption based on the image to foster a deep understanding of its context. 2) Question-Answer Pair Generation: Leveraging this global caption along with object and part information, GPT-4V autonomously crafts multi-target, multi-granularity question-answer pairs. Carefully designed prompts and a two-step generation process enable GPT-4V to deeply comprehend image context and generate contextually relevant question-answer pairs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.701, + 0.334, + 0.714 + ], + "angle": 0, + "content": "3.2 DATA FILTERING" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.727, + 0.825, + 0.81 + ], + "angle": 0, + "content": "Despite meticulously crafted prompts for guiding GPT-4V, occasional deviations from established rules result in the generation of subpar question-answer pairs. These deviations include questions that reveal explicit target coordinates or provide overly direct hints, as well as answers that offer irrelevant information or omit essential details. To enhance the reliability of the question-answer pairs in our dataset, a rigorous filtering process is essential. Therefore, we engage four skilled human inspectors to review the dataset according to strict criteria:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.822, + 0.825, + 0.877 + ], + "angle": 0, + "content": "- Logicality and Reasoning: Questions should avoid explicit target coordinates or strong hints. Non-compliant questions and their corresponding answers are removed. For example, a question like \"Which part of this animal [coordinates] uses its sense of smell?\" would be excluded." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "- Coherence and Relevance: Answers lacking essential target information or containing irrelevant details are corrected for precision and relevance. This includes cases where answers mention objects or parts not provided in the annotations." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.822, + 0.825, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.102, + 0.489, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.491, + 0.102, + 0.756, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.241, + 0.236, + 0.71, + 0.246 + ], + "angle": 0, + "content": "Caption: A knife is inserted vertically into a sandwich on a cutting board, with another knife lying beside it and bottles in the background." + }, + { + "type": "image_footnote", + "bbox": [ + 0.241, + 0.251, + 0.751, + 0.274 + ], + "angle": 0, + "content": "Question1: What item on the table is designed to be held at one end while the other end is meant for cutting through food? Answer1: The knife_1 [204.79, 2.4, 238.63, 226.53] is designed with a knife_1's handle [205, 2, 239, 126] to be held while the knife_1's blade [213, 121, 237, 226] is meant for cutting through food." + }, + { + "type": "image_footnote", + "bbox": [ + 0.241, + 0.279, + 0.754, + 0.303 + ], + "angle": 0, + "content": "Question2: Which object on the table appears to be in the process of being used to keep a sandwich upright? Answer2: The knife_2 [304.84, 320.65, 615.34, 427.0] with its knife_2's blade [305, 321, 601, 422] inserted into the sandwich is being used to keep it upright." + }, + { + "type": "image_footnote", + "bbox": [ + 0.241, + 0.308, + 0.731, + 0.325 + ], + "angle": 0, + "content": "Question3: If I wanted to read the product information on a container in view, which part should I look at? Answer3: To read the product information, you should look at the bottle_1's label [460, 105, 500, 282] or the bottle_2's label [300, 4, 413, 176]." + }, + { + "type": "image_footnote", + "bbox": [ + 0.241, + 0.329, + 0.742, + 0.353 + ], + "angle": 0, + "content": "Question4: Which objects in the scene are meant to contain liquids, and what part of them touches the surface they rest on? Answer4: The objects meant to contain liquids are bottle_1[459.07, 0.0, 603.49, 315.78] and bottle_2[296.85, 1.15, 416.19, 242.2]. The part that touches the surface they rest on is bottle_1's base [463, 287, 596, 316] and bottle_2's base [307, 220, 400, 241]." + }, + { + "type": "list", + "bbox": [ + 0.241, + 0.236, + 0.754, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.377, + 0.825, + 0.406 + ], + "angle": 0, + "content": "Figure 2: An example from the MMR dataset generated through our data creation process. The left and right pictures show the object- and part-level segmentation masks, respectively." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.43, + 0.825, + 0.501 + ], + "angle": 0, + "content": "- Clarity and Precision: Questions and answers should be clear, concise, and free of ambiguity. For example, ill-defined data, such as asking about the function of an object or part from a segmentation perspective, is removed (e.g., \"What is the function of object_1?\"). Answers should provide precise information that directly addresses the question without causing confusion." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.513, + 0.825, + 0.598 + ], + "angle": 0, + "content": "Originally, 222K question-answer pairs are generated. Of these, \\(12.6\\%\\) are filtered out through a review process conducted by the four inspectors, resulting in the final MMR dataset. Since dataset generation is a key contribution to our work, each inspector thoroughly reviews the entire set of 222K question-answer pairs. To minimize human error, we only filter out question-answer pairs flagged by two or more inspectors. This meticulous filtering regimen ensures the integrity and trustworthiness of the MMR dataset. An example of the generated question-answer pairs is illustrated in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.614, + 0.34, + 0.628 + ], + "angle": 0, + "content": "3.3 DATA STATISTICS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.639, + 0.825, + 0.71 + ], + "angle": 0, + "content": "The MMR dataset includes 194,398 intricate and implicit question-answer pairs with 57,643 corresponding images and masks selected from PACO-LVIS. The entire dataset is split into distinct sets for training (154,127 pairs), validation (8,194 pairs), and test (32,077 pairs). Moreover, the test set is further categorized into three subsets: object-only, part-only, and mixed sets, providing a benchmark for evaluating multi-granularity reasoning segmentation capabilities." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.716, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Additionally, our dataset inherits a rich coverage of 75 object categories and 445 part categories from PACO-LVIS, enhancing its diversity and utility. We delve into the frequency distribution per object and part category across question-answer pairs. Fig. 3 (b) and (d) provide a comprehensive overview of the number of questions per object category and part category, respectively. The results show that our dataset encompasses a wide range of categories, ensuring that the question-answer pairs are not biased toward specific categories and exhibit a high level of diversity. Furthermore, the word clouds illustrated in Fig. 3 (a) and (c) highlight the prevalent head object and part categories, respectively. These word clouds demonstrate that our question-answer pairs are grounded in common and general objects and their associated parts. Fig. 3 (e) presents statistics on the number of targets in each question-answer pair. On average, there are 1.8 targets per answer, with the maximum number of targets in a single pair being 16. This demonstrates that our dataset can consider multiple targets in an image and cover diverse target reasoning. To evaluate the comprehensiveness of both objects and parts in the proposed dataset, we compare their occurrences within the total question-answer pairs. As depicted in Fig. 3 (f), there are 114,704 descriptions for objects and 226,869 for parts, maintaining a ratio of approximately 1:2. This ratio is reasonable because objects typically" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.257, + 0.111, + 0.272, + 0.121 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.109, + 0.4, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.423, + 0.111, + 0.437, + 0.121 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.453, + 0.104, + 0.57, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.586, + 0.112, + 0.601, + 0.121 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image", + "bbox": [ + 0.597, + 0.113, + 0.741, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.258, + 0.206, + 0.408, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.206, + 0.574, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.206, + 0.741, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.315, + 0.825, + 0.373 + ], + "angle": 0, + "content": "Figure 3: Statistics of the proposed MMR dataset. (a) the word cloud for the object categories, (b) the number of objects per each object category in questions (log scale), (c) the word cloud for the part categories, (d) the number of parts per each part category in questions (log scale), (e) the distribution of target count in answers, and (f) the total number of expressions of objects and parts." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.399, + 0.825, + 0.441 + ], + "angle": 0, + "content": "Table 1: Comparison among several reasoning segmentation datasets, including ReasonSeg (Lai et al., 2023), MUSE (Ren et al., 2023), and the proposed MMR. Here, part-level is an expression that refers to various parts of an object that appear in the image." + }, + { + "type": "table", + "bbox": [ + 0.254, + 0.445, + 0.744, + 0.496 + ], + "angle": 0, + "content": "
DatasetsObject-levelPart-levelMulti-target# of Q&A pairsGPT models
ReasonSeg×1.2KGPT-3.5
MUSE×214KGPT-4V
MMR194KGPT-4V
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.825, + 0.558 + ], + "angle": 0, + "content": "consist of multiple parts. Therefore, it reflects a balanced distribution, contributing to the dataset's comprehensiveness and facilitating multi-granularity knowledge understanding." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.578, + 0.704, + 0.591 + ], + "angle": 0, + "content": "3.4 COMPARISON WITH EXISTING REASONING SEGMENTATION DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.605, + 0.825, + 0.633 + ], + "angle": 0, + "content": "Tab. 1 presents a comparative overview of existing reasoning segmentation datasets and the proposed MMR dataset. As observed, MMR offers several notable advantages over existing datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.639, + 0.825, + 0.696 + ], + "angle": 0, + "content": "First, MMR contains 194K question-answer pairs, comparable to MUSE (Ren et al., 2023), and far exceeds ReasonSeg (Lai et al., 2023) which has only 1,218 question-answer pairs primarily designed for validation and testing purposes. This extensive scale facilitates both training and evaluation for reasoning segmentation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.703, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Second, MMR supports question-answer pairs covering multi-target and multi-granularity (object-level and part-level) visual reasoning. Although MUSE includes multi-target instances, its coverage is limited to object-level reasoning. This lack of part-level detail reduces its effectiveness in fine-grained visual tasks. Part-level reasoning in MMR enables a more comprehensive understanding of visual contexts and hierarchical relationships between parts and objects. While ReasonSeg appears to include part-level reasoning, ReasonSeg often has ambiguous boundaries between objects and their parts because it doesn't specify which object a part belongs to. For instance, in a scene with a \"car\" and a \"tire\", ReasonSeg considers the \"tire\" as part of the \"car\", even if the tire is not attached. In contrast, MMR clearly distinguishes the boundaries between objects and their parts by specifying hierarchy like which object a part belongs to based on their spatial context. Additionally, unlike ReasonSeg, MMR distinguishes multiple objects of the same class within a single image at the instance level. For example, ReasonSeg might group all buses in a scene under a single \"Bus\" label. On the other hand, MMR treats them as distinct entities like \"Bus_1,\" \"Bus_2\", etc. Also, ReasonSeg treats all screens simply as \"screen,\" whereas MMR would specify \"laptop_1's screen,\" \"laptop_2's screen,\" and so forth. This allows MMR to handle objects or parts of the same class separately by considering their spatial context within the image." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.189 + ], + "angle": 0, + "content": "Third, MMR leverages the advanced visual understanding capabilities of GPT-4V for question-answer generation. GPT-4V receives the image along with information such as class names and bounding boxes of objects and parts, enabling detailed and contextually accurate question-answer generation. In comparison, ReasonSeg generates questions using the language-specialized GPT-3.5 and pre-trained image tagging models, which do not fully capture the visual context, leading to less relevant question-answer pairs with the image." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.825, + 0.239 + ], + "angle": 0, + "content": "In summary, MMR provides a substantial improvement over ReasonSeg and MUSE by including large-scale, multi-target, and multi-granularity question-answer pairs. It strengthens real-world applicability, making it a valuable asset for advancing research in reasoning-based segmentation tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.263, + 0.398, + 0.279 + ], + "angle": 0, + "content": "4 BASELINE FRAMEWOK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.298, + 0.389, + 0.534 + ], + "angle": 0, + "content": "We propose a novel baseline framework for multi-target and multi-granularity reasoning segmentation, \\(\\mathbf{M}^2\\mathbf{SA}\\). \\(\\mathbf{M}^2\\mathbf{SA}\\) enhances the LISA framework with two key features: 1) Early Local Feature Fusion and 2) multiple [SEG] tokens. For Early Local Feature Fusion, we extract local features from the early layer of the SAM's vision encoder, which contains fine-grained details such as image edges and boundaries. These local features are fused with the global semantic context features from the last layer of SAM's vi" + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.325, + 0.828, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.506, + 0.761, + 0.521 + ], + "angle": 0, + "content": "Figure 4: The overview of \\(\\mathbf{M}^2\\mathrm{SA}\\) framework." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.534, + 0.825, + 0.631 + ], + "angle": 0, + "content": "sion encoder for more informative visual features in the mask decoder. Multiple [SEG] tokens overcome the LISA framework's limitation of a single [SEG] token, which struggles to segment multiple targets simultaneously.. To overcome this, we propose utilizing multiple [SEG] tokens. In our MMR dataset, we append a [SEG] token to each target object and part in the answer annotations (e.g., \"When closing the laptop, laptop computer's screen [SEG] would come into contact with laptop computer's base panel [SEG].\"). This approach enables the model to predict separate [SEG] tokens for each target, reducing ambiguity among multiple targets." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.651, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Model Architecture Fig. 4 presents the overall architecture of the proposed \\(\\mathbf{M}^2\\mathbf{SA}\\) framework, which integrates two core components: Segment Anything Model (SAM)(Kirillov et al., 2023) and Multimodal Large Language Model (MLLM), specifically LLaVA(Liu et al., 2024). SAM module consists of SAM Vision Encoder \\((E)\\) and SAM Mask Decoder \\((D)\\), while the MLLM comprises CLIP Vision Encoder \\((I)\\), vision-to-text projector \\((\\psi)\\), and Large Language Model \\((F)\\). The image \\(x_{img} \\in R^{h \\times w \\times 3}\\) is fed into the SAM Vision Encoder \\((E)\\), which generates global context features \\(v_g = E(x_{img}) \\in R^{h/16 \\times w/16 \\times c}\\) and early local features \\(v_l = E_l(x_{img}) \\in R^{h/16 \\times w/16 \\times c'}\\). To align the channel dimensions of \\(v_l\\) with \\(v_g\\), the early local features \\(v_l\\) are passed through two convolution layers, resulting in refined features \\(\\hat{v}_l \\in R^{h/16 \\times w/16 \\times c}\\). \\(v_g\\) and \\(\\hat{v}_l\\) are then summed to obtain visual features \\(v_{seg} \\in R^{h/16 \\times w/16 \\times c}\\) for segmentation. Simultaneously, the image \\(x_{img}\\) is input into the CLIP Vision Encoder \\((I)\\), producing visual token embeddings \\(f_{img} = \\psi(I(x_{img})) \\in R^{N_{img} \\times d}\\), which are mapped to the LLM input space using the vision-to-text projector \\(\\psi\\). In parallel, the text queries \\(x_{txt}\\) are tokenized by the \\(F\\)'s tokenizer, producing text token embeddings \\(f_{txt} \\in R^{N_{txt} \\times d}\\). The visual token embeddings \\(f_{img}\\) and text token embeddings \\(f_{txt}\\) are concatenated and processed by LLM \\(F\\), resulting in output response \\(\\hat{y}_{txt} = F(\\text{concat}(f_{img}, f_{txt}))\\). \\(\\hat{y}_{txt}\\) contains the textual response to the text query and special [SEG] tokens that correspond to each target entity to be segmented. These multiple [SEG] token embeddings are extracted and projected into SAM's prompt space via the projector \\(\\phi\\), resulting in embeddings \\(f_{seg} = \\phi(\\hat{y}_{txt}[SEG]) \\in R^{N_{seg} \\times c}\\). Finally, the SAM Mask Decoder \\((D)\\) takes the visual features \\(v_{seg}\\) and the multiple [SEG] token embeddings" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "\\(f_{seg}\\) as input to generate the segmentation mask \\(M = D(\\text{concat}(v_{seg}, f_{seg}))\\), which identifies the target regions in the image corresponding to the text queries." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.147, + 0.825, + 0.233 + ], + "angle": 0, + "content": "**Optimization** Our model is trained end-to-end through two sources of supervision. For the text generation, we compute auto-regressive cross-entropy loss \\( L_{txt} \\) between the text output \\( \\hat{y}_{txt} \\) and the ground-truth text answer \\( y_{txt} \\). For the high-quality segmentation mask generation, the mask loss \\( L_{mask} \\) is calculated between the output mask \\( \\hat{M} \\) and the ground-truth mask \\( M \\). The mask loss \\( L_{mask} \\) is a weighted sum of per-pixel binary cross-entropy loss \\( L_{bce} \\) and a DICE loss \\( L_{dice} \\), determined by \\( \\lambda_{bce} \\) and \\( \\lambda_{dice} \\). The overall loss \\( L \\) is formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.424, + 0.256, + 0.555, + 0.27 + ], + "angle": 0, + "content": "\\[\nL = L _ {t x t} + L _ {m a s k},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.266, + 0.824, + 0.287 + ], + "angle": 0, + "content": "\\[\nL _ {m a s k} = \\lambda_ {b c e} L _ {b c e} + \\lambda_ {d i c e} L _ {d i c e}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.291, + 0.545, + 0.307 + ], + "angle": 0, + "content": "where \\(\\lambda_{bce}\\) and \\(\\lambda_{dice}\\) are set to 0.5 and 2.0, respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.325, + 0.319, + 0.34 + ], + "angle": 0, + "content": "5 EXPERIMENT" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.356, + 0.377, + 0.37 + ], + "angle": 0, + "content": "5.1 EXPERIMENTAL SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.827, + 0.618 + ], + "angle": 0, + "content": "Implementation Details We use pre-trained LLaVA-7B (Liu et al., 2024) and LLaVA-Llama2-13B with CLIP-ViT-L/14 (Radford et al., 2021) and Vicuna-7B (Chiang et al., 2023)/Llama2-13B (Touvron et al., 2023) to form Multimodal Large Language Model (MLLM). We adopt the pre-trained SAM-ViT-H (Kirillov et al., 2023) for the segmentation model. For CLIP-ViT-L/14, input image \\( x_{img} \\) is resized to \\( 224 \\times 224 \\times 3 \\) and processed with a patch size of 14, resulting in \\( N_{img} = 256 \\). LLM dimensions \\( d \\) are set to 4096 and 5120 for Vicuna-7B and Llama2-13B. For SAM-ViT-H, \\( c \\) and \\( c' \\) are 256 and 1280, respectively. Efficient fine-tuning of the MLLM is facilitated using LoRA (Hu et al., 2021). The trainable components in \\( \\mathbf{M}^2\\mathbf{SA} \\) include the SAM Mask Decoder \\( D \\), the projector \\( \\phi \\), two convolution layers, the LoRA adapter in MLLM, and the token embeddings. We use features from the 8th layer in the SAM Vision Encoder \\( E \\) for early layer feature fusion. Our model is trained for 10 epochs, with each epoch consisting of 5,000 steps. We employ the AdamW (Loshchilov & Hutter, 2017) optimizer with a learning rate of 0.0003 and set gradient accumulation to 10 steps per update. Additionally, we use WarmupDecayLR as the learning rate scheduler. The learning rate is linearly decayed after 100 steps. The batch size and LoRA rank are set to 2 and 8, respectively. All experiments are conducted using 4 NVIDIA RTX A6000 GPUs. The results reported in the paper are the average values obtained from experiments conducted with 3 different random seeds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.632, + 0.827, + 0.786 + ], + "angle": 0, + "content": "Datasets For model training, we adopt the mixed training dataset composition scheme proposed by LISA (Lai et al., 2023), comprising four types: semantic segmentation datasets (ADE20K (Zhou et al., 2019), COCO-Stuff (Caesar et al., 2018), Mapillary (Neuhold et al., 2017), PACO-LVIS (Ramanathan et al., 2023), and PASCAL-Part (Chen et al., 2014)), referring expression segmentation datasets (RefCOCO (Kazemzadeh et al., 2014), RefCOCO+ (Kazemzadeh et al., 2014), RefCOCOg (Mao et al., 2016), and RefCLEF (Kazemzadeh et al., 2014)), a visual question answering dataset (LLaVA-Instruct-150K (Liu et al., 2024)), and the proposed MMR dataset for multi-target and multi-granularity reasoning segmentation. We sample the data from the mixed training dataset in a ratio of 2:9:2:6, where 2 represents semantic segmentation datasets, 9 represents referring expression segmentation datasets, 2 represents the visual question answering dataset, and 6 represents the proposed MMR dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Baseline Methods To validate the effectiveness of the \\(\\mathbf{M}^2\\mathbf{SA}\\) for a multi-target and multi-granularity reasoning segmentation task, we adopt LISA (Lai et al., 2023), GSVA (Xia et al., 2024), and GLaMM (Rasheed et al., 2024) along with their variants. The pre-trained models refer to those trained solely on their respective datasets. In contrast, the variant models referred to as \\(\\mathrm{model}_{tr}\\), are trained from scratch on a mixed training set that includes the MMR dataset. Due to issues with the publicly available code from the PixelLM, we exclude PixelLM from the baseline methods to ensure reliable and consistent comparison results. For a Multi-granularity Referring Expression Segmentation (MRES) task, we additionally adopt the class RES models (Yang et al., 2022; Liu et al., 2023a; Wang et al., 2023; 2022) and the general models (Zhu et al., 2022; Zou et al., 2023; 2024)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.157 + ], + "angle": 0, + "content": "Table 2: Results on MMR benchmark. The gIoU and cIoU metrics are reported for the comparison. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.28, + 0.16, + 0.722, + 0.282 + ], + "angle": 0, + "content": "
Methodsvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
LISA-7B (Lai et al., 2023)13.818.323.525.16.67.914.517.9
LISA-7Btr19.431.634.741.88.013.119.527.1
GSVA-7B (Xia et al., 2024)14.625.126.434.36.011.615.524.8
GSVA-7Btr19.838.930.241.18.018.621.234.5
GLaMM (Rasheed et al., 2024)12.619.223.731.93.96.413.318.7
GLaMMtr26.947.140.354.212.125.530.345.0
M2SA-7B27.848.641.055.613.527.030.946.8
LISA-Llama2-13B (Lai et al., 2023)15.420.026.127.97.48.416.119.8
LISA-Llama2-13Btr22.333.440.245.210.716.423.029.2
M2SA-Llama2-13B28.449.142.357.613.627.231.647.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.313, + 0.825, + 0.357 + ], + "angle": 0, + "content": "Table 3: Referring expression segmentation results on RefCOCO, RefCOCO+ (Kazemzadeh et al., 2014) and RefCOCOg (Mao et al., 2016) among \\(\\mathbf{M}^2\\mathbf{SA}\\) and existing methods. For a fair comparison with previous methods, the cIoU metrics are adopted. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.28, + 0.359, + 0.722, + 0.497 + ], + "angle": 0, + "content": "
MethodsRefCOCORefCOCO+RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
MCN (Luo et al., 2020)62.464.259.750.655.544.749.249.4
VLT (Ding et al., 2021)67.570.565.256.361.050.155.057.0
CRIS (Wang et al., 2022)70.573.266.162.368.153.759.960.4
LAVT (Yang et al., 2022)72.775.868.862.168.455.161.262.1
ReLA (Liu et al., 2023a)73.876.570.266.071.057.765.066.0
X-Decoder (Zou et al., 2023)------64.6-
SEEM (Zou et al., 2024)------65.7-
LISA-7B (Lai et al., 2023)74.176.571.162.467.456.566.468.5
GSVA-7B (Xia et al., 2024)76.477.472.864.567.758.671.172.0
GLaMM (Rasheed et al., 2024)79.583.276.972.678.764.674.274.9
M2SA-7B74.076.869.763.167.256.167.068.3
LISA-Llama2-13B (Lai et al., 2023)73.677.370.563.268.257.067.068.4
M2SA-Llama2-13B74.677.671.064.068.157.669.069.3
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.825, + 0.601 + ], + "angle": 0, + "content": "Evaluation Metrics Following the implementation of the referring expression segmentation works, we adopt gIoU and cIoU scores to assess the quality of the segmentation mask. gIoU denotes the mean IoU for each mask, whereas cIoU is computed by the cumulative intersection area over the cumulative union area across the entire dataset. Given that cIoU may exhibit bias towards large-area objects, gIoU is preferable for evaluating part regions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.617, + 0.478, + 0.631 + ], + "angle": 0, + "content": "5.2 RESULTS ON BENCHMARK DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.827, + 0.77 + ], + "angle": 0, + "content": "Comparison on MMR Tab. 2 compares \\(\\mathbf{M}^2\\mathrm{SA}\\) and the baseline models in a multi-target and multi-granularity reasoning segmentation task (MMR dataset). The pre-trained models perform poorly on the proposed MMR dataset, particularly struggling with the part-only set due to its lack of detailed part-level understanding. Conversely, \\(\\mathrm{LISA}_{tr}\\), \\(\\mathrm{GSVA}_{tr}\\), and \\(\\mathrm{GLaMM}_{tr}\\), trained using the proposed MMR dataset, exhibit superior performance as they acquire both object-level and part-level knowledge. However, its ability to handle multi-target and fine-detail reasoning remains limited. In contrast, the proposed \\(\\mathbf{M}^2\\mathrm{SA}\\) shows highly competitive performance, effectively managing multi-target scenarios and fine-detail tasks, thus showcasing its strength in comprehensive reasoning segmentation. Qualitative results are provided in the Appendix A.13." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Comparison on Referring Expression Segmentation Task Tab. 3 presents the single-target object-level RefCOCO series dataset results. While \\(\\mathbf{M}^2\\mathbf{SA}\\) achieves commendable performance, it is important to note that single-target referring expression segmentation is a relatively simple task, involving explicit queries that focus on identifying a single object. The true strength of \\(\\mathbf{M}^2\\mathbf{SA}\\) lies in its ability to excel in more complex and challenging tasks, such as multi-target referring expression segmentation and multi-granularity referring segmentation. To evaluate its performance on multi-target referring expression segmentation, we curate text queries for multi-target objects using annotation information from the RefCOCO-series datasets. Each query is constructed by randomly selecting 4 to 6 object categories from each image and generating text prompts like \"Can you segment the class 1, class 2, ..., and class n?\" We then compare \\(\\mathbf{M}^2\\mathbf{SA}\\)'s performance against LISA," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Table 4: Multi-referring expression segmentation results. We adopt the cIoU metric for comparison. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.28, + 0.145, + 0.723, + 0.221 + ], + "angle": 0, + "content": "
MethodsMulti-RefCOCOMulti-RefCOCO+Multi-RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
LISA-7B (Lai et al., 2023)34.032.736.428.228.628.545.248.7
GSVA-7B (Xia et al., 2024)50.753.347.844.847.440.647.748.6
GLaMM (Rasheed et al., 2024)30.832.030.028.829.627.232.535.0
M2SA-7B71.373.367.261.865.355.862.063.6
LISA-Llama2-13B (Lai et al., 2023)33.232.632.427.729.926.744.047.1
M2SA-Llama2-13B72.075.668.062.367.156.165.465.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.261, + 0.825, + 0.317 + ], + "angle": 0, + "content": "Table 5: Multi-granularity referring expression segmentation results on RefCOCom (Wang et al., 2023). For a fair comparison with previous methods, the mIoU metrics are adopted. Part denotes part-only evaluation, and Obj & Part denotes multi-granularity evaluation. The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.28, + 0.322, + 0.723, + 0.453 + ], + "angle": 0, + "content": "
MethodsvaltestAtestB
PartObj & PartPartObj & PartPartObj & Part
SeqTR (Zhu et al., 2022)13.928.212.122.818.134.7
CRIS (Wang et al., 2022)10.625.410.121.212.930.0
LAVT (Yang et al., 2022)15.329.913.224.418.735.5
X-Decoder (Zou et al., 2023)16.229.513.623.620.333.8
SEEM (Zou et al., 2024)16.129.413.623.420.433.9
UniRES (Wang et al., 2023)19.634.316.427.825.241.7
LISA-7B (Lai et al., 2023)21.334.318.528.625.740.1
GSVA-7B (Xia et al., 2024)11.423.19.219.216.828.2
GLaMM (Rasheed et al., 2024)21.435.318.629.526.941.1
M²SA-7B22.435.519.930.127.141.4
LISA-Llama2-13B (Lai et al., 2023)22.135.219.429.727.241.6
M²SA-Llama2-13B24.537.321.931.928.542.7
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.539 + ], + "angle": 0, + "content": "GSVA, and GLaMM. As shown in Tab. 4, \\(\\mathbf{M}^2\\mathbf{SA}\\) significantly outperforms these methods, showcasing its ability to reason about multiple objects simultaneously and effectively leverage its multi [SEG] tokens for diverse and intricate queries." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.544, + 0.827, + 0.63 + ], + "angle": 0, + "content": "Additionally, we evaluate \\(\\mathbf{M}^2\\mathrm{SA}\\) on RefCOCOM, a multi-granularity referring segmentation dataset. As demonstrated in Tab. 5, \\(\\mathbf{M}^2\\mathrm{SA}\\) surpasses existing methods in this task, though the performance improvement is less pronounced. This is likely because the MMR dataset does not include the person class, which constitutes a significant portion of the categories in RefCOCOM. These results emphasize the versatility and effectiveness of \\(\\mathbf{M}^2\\mathrm{SA}\\) in addressing complex, real-world scenarios, extending well beyond simple single-target segmentation tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.661, + 0.321, + 0.677 + ], + "angle": 0, + "content": "6 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.7, + 0.827, + 0.84 + ], + "angle": 0, + "content": "This paper addresses the limitations of current reasoning segmentation datasets, which often overlook multi-target or part-level reasoning. To resolve these issues, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset, providing 194K comprehensive question-answer pairs that cover multi-target, object-level, and part-level aspects, enhancing diverse and context-aware interactions. We also propose the \\(\\mathbf{M}^2\\mathbf{SA}\\) model, designed for multi-target, object-level, and part-level reasoning segmentation. \\(\\mathbf{M}^2\\mathbf{SA}\\) incorporates early local feature fusion and multiple [SEG] tokens, improving fine-grained visual understanding and multi-target segmentation. Experimental results show that \\(\\mathbf{M}^2\\mathbf{SA}\\) outperforms existing models on the MMR benchmark. The MMR dataset aims to drive progress in reasoning segmentation by emphasizing the importance of multi-target and part-level aspects in human-AI interactions." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.872, + 0.359, + 0.887 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.909, + 0.801, + 0.926 + ], + "angle": 0, + "content": "This research has been supported by the LG Electronics Corporation. (Project No. G01230381)" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.103, + 0.289, + 0.118 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.127, + 0.826, + 0.17 + ], + "angle": 0, + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.181, + 0.826, + 0.237 + ], + "angle": 0, + "content": "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.249, + 0.826, + 0.291 + ], + "angle": 0, + "content": "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.302, + 0.826, + 0.344 + ], + "angle": 0, + "content": "Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1209-1218, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.356, + 0.826, + 0.385 + ], + "angle": 0, + "content": "Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.396, + 0.826, + 0.452 + ], + "angle": 0, + "content": "Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1971-1978, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.464, + 0.826, + 0.52 + ], + "angle": 0, + "content": "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with \\(90\\%\\) chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.531, + 0.826, + 0.574 + ], + "angle": 0, + "content": "Seokju Cho, Heeseong Shin, Sunghwan Hong, Seungjun An, Seungjun Lee, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. Cat-seg: Cost aggregation for open-vocabulary semantic segmentation. arXiv preprint arXiv:2303.11797, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.585, + 0.826, + 0.642 + ], + "angle": 0, + "content": "Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3213-3223, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.653, + 0.826, + 0.709 + ], + "angle": 0, + "content": "Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.72, + 0.826, + 0.763 + ], + "angle": 0, + "content": "Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 16321-16330, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.774, + 0.826, + 0.817 + ], + "angle": 0, + "content": "Peng Gao, Jiaming Han, Renrui Zhang, Ziyi Lin, Shijie Geng, Aojun Zhou, Wei Zhang, Pan Lu, Conghui He, Xiangyu Yue, et al. Llama-adapter v2: Parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.826, + 0.871 + ], + "angle": 0, + "content": "Ke Gong, Xiaodan Liang, Dongyu Zhang, Xiaohui Shen, and Liang Lin. Look into person: Self-supervised structure-sensitive learning and a new benchmark for human parsing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 932-940, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Ju He, Shuo Yang, Shaokang Yang, Adam Kortylewski, Xiaoding Yuan, Jie-Neng Chen, Shuai Liu, Cheng Yang, Qihang Yu, and Alan Yuille. Partimagenet: A large, high-quality dataset of parts. In European Conference on Computer Vision, pp. 128-145. Springer, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.127, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4067-4077, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.209, + 0.826, + 0.267 + ], + "angle": 0, + "content": "Menglin Jia, Mengyun Shi, Mikhail Sirotenko, Yin Cui, Claire Cardie, Bharath Hariharan, Hartwig Adam, and Serge Belongie. Fashionpedia: Ontology, segmentation, and an attribute localization dataset. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16, pp. 316-332. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.276, + 0.826, + 0.32 + ], + "angle": 0, + "content": "Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pp. 787-798, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.328, + 0.826, + 0.372 + ], + "angle": 0, + "content": "Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother, and Piotr Dólar. Panoptic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9404-9413, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.381, + 0.826, + 0.425 + ], + "angle": 0, + "content": "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.434, + 0.826, + 0.464 + ], + "angle": 0, + "content": "Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.473, + 0.826, + 0.503 + ], + "angle": 0, + "content": "Jianshu Li, Jian Zhao, Yunchao Wei, Congyan Lang, Yidong Li, Terence Sim, Shuicheng Yan, and Jiashi Feng. Multiple-human parsing in the wild. arXiv preprint arXiv:1705.07206, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.512, + 0.826, + 0.556 + ], + "angle": 0, + "content": "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.565, + 0.826, + 0.609 + ], + "angle": 0, + "content": "Xiangtai Li, Shilin Xu, Yibo Yang, Guangliang Cheng, Yunhai Tong, and Dacheng Tao. Panoptic-partformer: Learning a unified model for panoptic part segmentation. In European Conference on Computer Vision, pp. 729-747. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.618, + 0.826, + 0.675 + ], + "angle": 0, + "content": "Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7061-7070, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.684, + 0.826, + 0.727 + ], + "angle": 0, + "content": "Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 23592-23601, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.737, + 0.826, + 0.767 + ], + "angle": 0, + "content": "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.776, + 0.826, + 0.82 + ], + "angle": 0, + "content": "Zhaoyang Liu, Yinan He, Wenhai Wang, Weiyun Wang, Yi Wang, Shoufa Chen, Qinglong Zhang, Yang Yang, Qingyun Li, Jiashuo Yu, et al. Internchat: Solving vision-centric tasks by interacting with chatbots beyond language. arXiv preprint arXiv:2305.05662, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.826, + 0.858 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.826, + 0.924 + ], + "angle": 0, + "content": "Gen Luo, Yiyi Zhou, Xiaoshuai Sun, Liujuan Cao, Chenglin Wu, Cheng Deng, and Rongrong Ji. Multi-task collaborative network for joint referring expression comprehension and segmentation. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pp. 10034-10043, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 11-20, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.154, + 0.826, + 0.198 + ], + "angle": 0, + "content": "Panagiotis Meletis, Xiaoxiao Wen, Chenyang Lu, Daan de Geus, and Gijs Dubbelman. Cityscapes-panoptic-parts and Pascal-panoptic-parts datasets for scene understanding. arXiv preprint arXiv:2004.07944, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.205, + 0.826, + 0.263 + ], + "angle": 0, + "content": "Umberto Michieli, Edoardo Borsato, Luca Rossi, and Pietro Zanuttigh. Gmnet: Graph matching network for large scale part semantic segmentation in the wild. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VIII 16, pp. 397-414. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.27, + 0.826, + 0.327 + ], + "angle": 0, + "content": "Kaichun Mo, Shilin Zhu, Angel X Chang, Li Yi, Subarna Tripathi, Leonidas J Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 909-918, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.335, + 0.826, + 0.379 + ], + "angle": 0, + "content": "Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In Proceedings of the IEEE international conference on computer vision, pp. 4990-4999, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.386, + 0.826, + 0.429 + ], + "angle": 0, + "content": "Tai-Yu Pan, Qing Liu, Wei-Lun Chao, and Brian Price. Towards open-world segmentation of parts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15392-15401, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.437, + 0.826, + 0.467 + ], + "angle": 0, + "content": "Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, and Jianfeng Gao. Instruction tuning with gpt-4. arXiv preprint arXiv:2304.03277, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.474, + 0.826, + 0.53 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.538, + 0.826, + 0.596 + ], + "angle": 0, + "content": "Vignesh Ramanathan, Anmol Kalia, Vladan Petrovic, Yi Wen, Baixue Zheng, Baishan Guo, Rui Wang, Aaron Marquez, Rama Kovvuri, Abhishek Kadian, et al. Paco: Parts and attributes of common objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7141-7151, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.604, + 0.826, + 0.661 + ], + "angle": 0, + "content": "Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13009-13018, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.668, + 0.826, + 0.711 + ], + "angle": 0, + "content": "Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. arXiv preprint arXiv:2312.02228, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.72, + 0.826, + 0.763 + ], + "angle": 0, + "content": "Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 102-118. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.771, + 0.826, + 0.8 + ], + "angle": 0, + "content": "Konstantinos I Roumeliotis and Nikolaos D Tselikas. Chatgpt and open-ai models: A preliminary review. Future Internet, 15(6):192, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.807, + 0.826, + 0.838 + ], + "angle": 0, + "content": "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. Stanford alpaca: An instruction-following llama model, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.845, + 0.826, + 0.888 + ], + "angle": 0, + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Wenxuan Wang, Tongtian Yue, Yisi Zhang, Longteng Guo, Xingjian He, Xinlong Wang, and Jing Liu. Unveiling parts beyond objects: Towards finer-granularity referring expression segmentation. arXiv preprint arXiv:2312.08007, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 11686-11695, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.825, + 0.251 + ], + "angle": 0, + "content": "Meng Wei, Xiaoyu Yue, Wenwei Zhang, Shu Kong, Xihui Liu, and Jiangmiao Pang. Ov-parts: Towards open-vocabulary part segmentation. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.259, + 0.825, + 0.303 + ], + "angle": 0, + "content": "Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3858-3869, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.311, + 0.827, + 0.356 + ], + "angle": 0, + "content": "Jilan Xu, Junlin Hou, Yuejie Zhang, Rui Feng, Yi Wang, Yu Qiao, and Weidi Xie. Learning open-vocabulary semantic segmentation models from natural language supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2935-2944, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.363, + 0.825, + 0.405 + ], + "angle": 0, + "content": "Lu Yang, Qing Song, Zhihui Wang, and Ming Jiang. Parsing r-cnn for instance-level human analysis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 364-373, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.415, + 0.825, + 0.459 + ], + "angle": 0, + "content": "Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18155-18165, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.467, + 0.825, + 0.511 + ], + "angle": 0, + "content": "Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. arXiv preprint arXiv:2310.07704, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.519, + 0.825, + 0.562 + ], + "angle": 0, + "content": "Renrui Zhang, Jiaming Han, Chris Liu, Peng Gao, Aojun Zhou, Xiangfei Hu, Shilin Yan, Pan Lu, Hongsheng Li, and Yu Qiao. Llama-adapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv:2303.16199, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.57, + 0.825, + 0.613 + ], + "angle": 0, + "content": "Shilong Zhang, Peize Sun, Shoufa Chen, Min Xiao, Wenqi Shao, Wenwei Zhang, Kai Chen, and Ping Luo. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.622, + 0.825, + 0.666 + ], + "angle": 0, + "content": "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.674, + 0.825, + 0.718 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.726, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Shuai Zheng, Fan Yang, M Hadi Kiapour, and Robinson Piramuthu. Modanet: A large-scale street fashion dataset with polygon annotations. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1670-1678, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.778, + 0.825, + 0.821 + ], + "angle": 0, + "content": "Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127:302-321, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Tianfei Zhou, Wenguan Wang, Si Liu, Yi Yang, and Luc Van Gool. Differentiable multi-granularity human representation learning for instance-aware human semantic parsing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 1622-1631, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Chaoyang Zhu, Yiyi Zhou, Yunhang Shen, Gen Luo, Xingjia Pan, Mingbao Lin, Chao Chen, Liujuan Cao, Xiaoshuai Sun, and Rongrong Ji. Seqtr: A simple yet universal network for visual grounding. In European Conference on Computer Vision, pp. 598-615. Springer, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.211 + ], + "angle": 0, + "content": "Xueyan Zou, Zi-Yi Dou, Jianwei Yang, Zhe Gan, Linjie Li, Chunyuan Li, Xiyang Dai, Harkirat Behl, Jianfeng Wang, Lu Yuan, et al. Generalized decoding for pixel, image, and language. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15116-15127, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.221, + 0.826, + 0.264 + ], + "angle": 0, + "content": "Xueyan Zou, Jianwei Yang, Hao Zhang, Feng Li, Linjie Li, Jianfeng Wang, Lijuan Wang, Jianfeng Gao, and Yong Jae Lee. Segment everything everywhere all at once. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.3, + 0.119 + ], + "angle": 0, + "content": "A APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.135, + 0.304, + 0.149 + ], + "angle": 0, + "content": "A.1 LIMITATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.162, + 0.825, + 0.26 + ], + "angle": 0, + "content": "While PACO-LVIS provides diverse and comprehensive object-part mask annotations for common objects, it lacks information on the human class and its parts. Consequently, our question-answer pairs generated based on PACO-LVIS do not consider reasoning about human class and its parts, which is a drawback. Therefore, there is a need for future dataset expansion to include a wider range of objects and parts that exist in real-world environments. Additionally, although we carefully design the prompts to ensure the diversity and quality of the dataset, the content of the question-answer pairs is inherently dependent on the pre-trained knowledge of ChatGPT." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.278, + 0.347, + 0.292 + ], + "angle": 0, + "content": "A.2 ETHICS CONCERN" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.304, + 0.825, + 0.389 + ], + "angle": 0, + "content": "The MMR dataset is constructed based on the publicly available PACO-LVIS dataset (Ramanathan et al., 2023), which helps mitigate privacy concerns. As the objects and parts within the images are already annotated, we only add text question-answer pairs, ensuring that potential privacy issues remain minimal. These question-answer pairs are generated using the ChatGPT/GPT-4V API (Achiam et al., 2023). While there is a risk of bias from the training data of the ChatGPT/GPT-4V API, we have implemented a thorough data filtering process to remove any ethically problematic content." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.406, + 0.282, + 0.42 + ], + "angle": 0, + "content": "A.3 LICENSE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.825, + 0.545 + ], + "angle": 0, + "content": "We utilize the released code from LISA (Lai et al., 2023) for the baseline model code construction. Since LISA follows Apache License 2.0, our code is also licensed under Apache License 2.0. Additionally, the PACO-LVIS dataset is licensed under a Creative Commons Attribution 4.0 (CC BY 4.0) license. Consequently, our MMR dataset is also licensed under Creative Commons Attribution 4.0 (CC BY 4.0). To download the PACO-LVIS dataset (Ramanathan et al., 2023), we utilize author-released code under the MIT license. We use ChatGPT/GPT-4V API (Achiam et al., 2023) developed by OpenAI to generate the question-answer pairs in the MMR dataset. Specific licensing information for the ChatGPT/GPT-4V API model is proprietary to OpenAI." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.562, + 0.515, + 0.576 + ], + "angle": 0, + "content": "A.4 THE SPECIFIC DETAILS OF CHATGPT API" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.588, + 0.825, + 0.618 + ], + "angle": 0, + "content": "The specific command to use the ChatGPT API (Achiam et al., 2023) for generating question-answer pairs in MMR is as follows:" + }, + { + "type": "code", + "bbox": [ + 0.37, + 0.637, + 0.631, + 0.732 + ], + "angle": 0, + "content": "response = open aiCompletion.create \n( \n model=\"gpt-4-vision-preview\", \n messages=prompt, \n temperature=0.7, \n max_tokens=850, \n)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.759, + 0.825, + 0.789 + ], + "angle": 0, + "content": "Figure 5: To generate question-answer pairs in MMR dataset, we use gpt-4-vision-preview model. For the hyper-parameters, we set the temperature to 0.7 and max_tokens to 850." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.815, + 0.402, + 0.829 + ], + "angle": 0, + "content": "A.5 PROMPTS AND EXAMPLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.825, + 0.926 + ], + "angle": 0, + "content": "General MMR Dataset The MMR dataset fundamentally includes multi-target (both objects and parts) answers to each question. In this section, we discuss the full prompt not covered in the main manuscript. Fig. 6 illustrates the prompt used to generate the train, validation, and test datasets. Both text and image prompts are input into GPT-4V (Achiam et al., 2023), resulting in the creation of question-answer pairs that encompass various information about objects and parts. As shown in Fig. 2, the output includes a global caption and question-answer pairs for the image. The" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.103, + 0.825, + 0.161 + ], + "angle": 0, + "content": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations and part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 427 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as \"object's part\", like \"person's body\" and \"bus's wheel\". The category names and bounding box coordinates of objects and parts are as follow:" + }, + { + "type": "image", + "bbox": [ + 0.469, + 0.169, + 0.773, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.167, + 0.348, + 0.329 + ], + "angle": 0, + "content": "```c\n```\nbottle_1 [459.07, 0.0, 603.49, 315.78];\nbottle_1's label [460, 105, 500, 282];\nbottle_1's neck [470, 0, 593, 62];\nbottle_1's shoulder [461, 56, 603, 103];\nbottle_1's body [460, 94, 604, 291];\nbottle_1's base [463, 287, 596, 316];\nbottle_2 [296.85, 1.15, 416.19, 242.2];\nbottle_2's base [307, 220, 400, 241];\nbottle_2's label [300, 4, 413, 176];\nbottle_2's body [307, 172, 403, 231];\nknife_1 [204.79, 2.4, 238.63, 226.53];\nknife_1's blade [213, 121, 237, 226];\nknife_1's handle [205, 2, 239, 126];\nknife_2 [304.84, 320.65, 615.34, 427.0\nknife_2's blade [305, 321, 601, 422];\nknife_2's handle [529, 399, 616, 426];" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.34, + 0.822, + 0.406 + ], + "angle": 0, + "content": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects and parts given above. However, when the corresponding object and part name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects and object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.406, + 0.61, + 0.415 + ], + "angle": 0, + "content": "1) the relationship between parts within the image or the relationship between a part and an object." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.415, + 0.429, + 0.424 + ], + "angle": 0, + "content": "2) the function or the general information about the parts." + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.406, + 0.61, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.432, + 0.822, + 0.496 + ], + "angle": 0, + "content": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part or object must be attached to the object name or part name in the format: object_1[x1, y1, x2, y2] and object_1's part [x1, y1, x2, y2]. Do not use other format such as \"a part of object_1\". Here are some additional requirements about generated question and answers:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.497, + 0.765, + 0.507 + ], + "angle": 0, + "content": "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.507, + 0.652, + 0.515 + ], + "angle": 0, + "content": "2. Do not ask the question you are not confident to answer. Only include question that have definite answer." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.515, + 0.535, + 0.524 + ], + "angle": 0, + "content": "3. Do not mention the coordinates of a part and an object directly in the question." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.524, + 0.71, + 0.534 + ], + "angle": 0, + "content": "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.534, + 0.558, + 0.543 + ], + "angle": 0, + "content": "5. The question should describe a complete activity, a function, or general information." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.543, + 0.801, + 0.561 + ], + "angle": 0, + "content": "6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part and the object. Implied other potential parts is strictly prohibited." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.561, + 0.805, + 0.579 + ], + "angle": 0, + "content": "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.579, + 0.817, + 0.606 + ], + "angle": 0, + "content": "8. Avoid using incorrectly formatted object names or part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the object and part information given above. ### For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]]." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.606, + 0.64, + 0.616 + ], + "angle": 0, + "content": "9. All generated answers must include the given object or part information, without changing the format." + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.497, + 0.817, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.188, + 0.643, + 0.808, + 0.658 + ], + "angle": 0, + "content": "Figure 6: The text and image prompt used in our data creation for MMR dataset with GPT-4V." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.688, + 0.825, + 0.716 + ], + "angle": 0, + "content": "segmentation mask information for the objects or parts mentioned in the answers is sourced from PACO-LVIS (Ramanathan et al., 2023) to create new annotations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.827, + 0.848 + ], + "angle": 0, + "content": "Part-only MMR Test Dataset The MMR dataset includes a substantial amount of information on parts to enhance part-level recognition, which has been overlooked in existing reasoning segmentation datasets. Consequently, we create a part-level test dataset to evaluate part-level recognition separately. Using the text and image prompts shown in Fig. 7, we generate a part-only test dataset from 2000 images with extensive part-level information from PACO-LVIS annotations. As shown in Fig. 8, the output includes a global caption and question-answer pairs for the image. The segmentation mask information for the parts mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Object-only MMR Test Dataset To evaluate recognition separately for object-level, we create an MMR test dataset that includes only information on objects. We generate an object-only test dataset using the text and image prompts shown in Fig. 9, selecting 2000 images with minimal part-level information. As shown in Fig. 10, the output includes a global caption and question-answer pairs for" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.238, + 0.821, + 0.294 + ], + "angle": 0, + "content": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object's part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 428 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as \"object's part\", like \"person's body\" and \"bus's wheel\". The category names and bounding box coordinates of parts are as follow:" + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.298, + 0.756, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.3, + 0.337, + 0.421 + ], + "angle": 0, + "content": "```\n```\ndog_1's eye [235, 67, 291, 100];\ndog_1's ear [324, 36, 426, 145];\ndog_1's nose [184, 98, 212, 127];\ndog_1's teeth [245, 146, 285, 171];\ndog_1's head [169, 20, 427, 202];\ndog_1's foot [337, 204, 510, 407];\ndog_1's leg [212, 95, 542, 356];\ndog_1's body [243, 20, 503, 328];\nbowl_1's rim [143, 298, 369, 378];\nbowl_1's inner_body [150, 302, 361,\nbowl_1's bottom [194, 362, 308, 376\nbowl_1's body [153, 351, 354, 422];" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.437, + 0.819, + 0.492 + ], + "angle": 0, + "content": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of object's parts given above. However, when the corresponding object's part name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.494, + 0.437, + 0.507 + ], + "angle": 0, + "content": "1) the relationship between different parts within the image. 2) the function or the general information about the parts." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.518, + 0.816, + 0.572 + ], + "angle": 0, + "content": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part must be attached to the part name in the format: object_1's part [x1, y1, x2, y2]. Do not use other format such as \"a part of object_1. Here are some additional requirements about generated question and answers:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.573, + 0.753, + 0.582 + ], + "angle": 0, + "content": "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.583, + 0.642, + 0.59 + ], + "angle": 0, + "content": "2. Do not ask the question you are not confident to answer. Only include question that have definite answer." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.591, + 0.528, + 0.6 + ], + "angle": 0, + "content": "3. Do not mention the coordinates of a part and an object directly in the question." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.601, + 0.7, + 0.609 + ], + "angle": 0, + "content": "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.61, + 0.55, + 0.617 + ], + "angle": 0, + "content": "5. The question should describe a complete activity, a function, or general information." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.618, + 0.79, + 0.636 + ], + "angle": 0, + "content": "6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part. Implied other potential parts is strictly prohibited." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.637, + 0.809, + 0.654 + ], + "angle": 0, + "content": "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.654, + 0.81, + 0.679 + ], + "angle": 0, + "content": "8. Avoid using incorrectly formatted part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the part information given above. ## For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]]." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.681, + 0.584, + 0.69 + ], + "angle": 0, + "content": "9. All generated answers must include the given part information, without changing the format." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.691, + 0.795, + 0.708 + ], + "angle": 0, + "content": "10. When creating questions, ask only questions about the object's parts given above without directly mentioning the part name in the question. Please keep in mind that other parts should not dominate the answer." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.709, + 0.817, + 0.717 + ], + "angle": 0, + "content": "11. If the number of object's parts given for an image is large enough, create a question so that each round's answer includes different object's parts." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.718, + 0.607, + 0.726 + ], + "angle": 0, + "content": "12. Do not create questions that are answered by parts other than the part information given above." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.727, + 0.539, + 0.735 + ], + "angle": 0, + "content": "13. If that part doesn't directly answer the question, do not mention it in the answer." + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.573, + 0.817, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.767, + 0.825, + 0.794 + ], + "angle": 0, + "content": "Figure 7: The text and image prompt used in our data creation for the part-only MMR test dataset with GPT-4V." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "the image. The segmentation mask information for the objects mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.146, + 0.825, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.179, + 0.315, + 0.525, + 0.326 + ], + "angle": 0, + "content": "Caption: A dog is sitting next to a bowl, possibly after a drink of water or a meal." + }, + { + "type": "image_footnote", + "bbox": [ + 0.179, + 0.332, + 0.534, + 0.342 + ], + "angle": 0, + "content": "Question1: What features of the animal suggest that it may have just had a drink?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.179, + 0.342, + 0.808, + 0.361 + ], + "angle": 0, + "content": "Answer1: The dog 1's nose [184, 98, 212, 127] seems moist, and this often indicates the dog has just been drinking. In addition, the presence of a bowl 1's rim [143, 298, 369, 378] next to the dog suggests that it could have contained water or food that the dog might have consumed." + }, + { + "type": "image_footnote", + "bbox": [ + 0.179, + 0.368, + 0.609, + 0.379 + ], + "angle": 0, + "content": "Question2: Considering the animal's current position, which part seems to be supporting its weight?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.179, + 0.379, + 0.798, + 0.397 + ], + "angle": 0, + "content": "Answer2: The dog_1's leg [212, 95, 542, 356] and the dog_1's foot [337, 204, 510, 407] are likely bearing the most weight, considering the dog's sitting position adjacent to the bowl_1's body [153, 351, 354, 422]." + }, + { + "type": "image_footnote", + "bbox": [ + 0.179, + 0.404, + 0.661, + 0.414 + ], + "angle": 0, + "content": "Question3: If the animal were to listen for approaching sounds, which physical feature would be most engaged?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.179, + 0.414, + 0.807, + 0.424 + ], + "angle": 0, + "content": "Answer3: The dog_1's ear [324, 36, 426, 145] would be most engaged in detecting sounds as ears are responsible for auditory perception in dogs." + }, + { + "type": "list", + "bbox": [ + 0.179, + 0.315, + 0.808, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.448, + 0.825, + 0.492 + ], + "angle": 0, + "content": "Figure 8: An example from the part-only MMR test dataset generated through the prompt in Fig. 7. This example includes information of some object's parts. The left and right pictures show the original image and part-level segmentation masks, respectively." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.523, + 0.322, + 0.536 + ], + "angle": 0, + "content": "A.6 DATA FORMAT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.549, + 0.825, + 0.579 + ], + "angle": 0, + "content": "The MMR dataset is given in JSON format. The JSON file for each instance is organized as shown in Fig. 11." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.606, + 0.825, + 0.65 + ], + "angle": 0, + "content": "Table 6: The effect of multiple [SEG] Tokens and Early Local Feature Fusion in \\(\\mathbf{M}^2\\mathbf{SA}-7\\mathbf{B}\\) on MMR benchmark. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.665, + 0.825, + 0.737 + ], + "angle": 0, + "content": "
multiple [SEG] TokensEarly Local Feature Fusionvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
××19.431.634.741.88.013.119.527.1
×26.047.739.555.411.725.228.445.2
27.948.541.055.613.527.031.046.8
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.772, + 0.78, + 0.8 + ], + "angle": 0, + "content": "A.7 EFFECTIVENESS OF THE MULTIPLE [SEG] TAXENS AND EARLY LOCAL FEATURE FUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We conduct an ablation study to verify the effectiveness of the multiple [SEG] tokens and Early Local Feature Fusion proposed in \\(\\mathbf{M}^2\\mathbf{SA}\\). Tab. 6 demonstrates that merely adding multiple [SEG] tokens results in significant performance improvements in MMR evaluation benchmarks. This finding suggests that using single [SEG] tokens in the LISA is inadequate to fully capture the segmentation capability. Moreover, performance improvements are evident when Early Local Feature Fusion is incorporated. Notably, there is a substantial performance enhancement in the part-only evaluation setting of the MMR test set. This improvement likely arises because Early Layer features contain local detail information (e.g., edges or boundaries), which aids in part and fine-level segmentation." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.262, + 0.813, + 0.311 + ], + "angle": 0, + "content": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 333 and 500, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. The category names and bounding box coordinates of objects are as follow:" + }, + { + "type": "image", + "bbox": [ + 0.538, + 0.316, + 0.755, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.363, + 0.357, + 0.392 + ], + "angle": 0, + "content": "mirror_1 [304.9, 35.42, 476.09, 146.49]; \npillow_1 [169.86, 180.73, 221.21, 229.7]; \npillow_2 [370.81, 175.9, 436.81, 231.85];" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.436, + 0.82, + 0.491 + ], + "angle": 0, + "content": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects given above. However, when the corresponding object name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.491, + 0.44, + 0.51 + ], + "angle": 0, + "content": "1) the relationship between objects within the image. \n2) the function or the general information about the objects." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.518, + 0.818, + 0.575 + ], + "angle": 0, + "content": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object. In other words, it's important to make the question challenging by not directly including visual content details. Some of the answers of the rounds should include multiple different types of objects. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target object must be attached to the object name in the format: object_1[x1, y1, x2, y2]. Here are some additional requirements about generated question and answers:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.582, + 0.761, + 0.592 + ], + "angle": 0, + "content": "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.592, + 0.648, + 0.601 + ], + "angle": 0, + "content": "2. Do not ask the question you are not confident to answer. Only include question that have definite answer." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.601, + 0.483, + 0.61 + ], + "angle": 0, + "content": "3. Do not mention the coordinates of an object directly in the question." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.61, + 0.706, + 0.619 + ], + "angle": 0, + "content": "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.619, + 0.555, + 0.628 + ], + "angle": 0, + "content": "5. The question should describe a complete activity, a function, or general information." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.628, + 0.82, + 0.646 + ], + "angle": 0, + "content": "6. The answer to the generated question should include at least two objects and explicitly describe the names of the object. Implied other potential objects is strictly prohibited." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.646, + 0.818, + 0.664 + ], + "angle": 0, + "content": "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.664, + 0.818, + 0.683 + ], + "angle": 0, + "content": "8. Avoid using incorrectly formatted object names, such as located at [coordinates] or an object_1 [object_1 [coordinates]]. In other words, use it as it appears in the object information given above." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.683, + 0.597, + 0.692 + ], + "angle": 0, + "content": "9. All generated answers must include the given object information, without changing the format." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.692, + 0.818, + 0.71 + ], + "angle": 0, + "content": "10. When creating questions, ask only questions about the objects given above without directly mentioning the object name in the question. Please keep in mind that other objects should not dominate the answer." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.71, + 0.775, + 0.718 + ], + "angle": 0, + "content": "11. If the number of objects given for an image is large enough, create a question so that each round's answer includes different objects.\"" + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.582, + 0.82, + 0.718 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.737, + 0.825, + 0.765 + ], + "angle": 0, + "content": "Figure 9: The text and image prompt used in our data creation for the object-only MMR test dataset with GPT-4V." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.131, + 0.825, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.3, + 0.612, + 0.31 + ], + "angle": 0, + "content": "Caption: A cozy living room interior with a large mirror on the wall and decorative pillows on furniture." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.318, + 0.611, + 0.327 + ], + "angle": 0, + "content": "Question1: Where might someone rest their head while sitting on the furniture to gain extra comfort?" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.327, + 0.81, + 0.345 + ], + "angle": 0, + "content": "Anr: a t r comfort while sitting on the furniture." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.353, + 0.62, + 0.363 + ], + "angle": 0, + "content": "Question2: In what part of the room could someone check their appearance before leaving the house?" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.363, + 0.717, + 0.372 + ], + "angle": 0, + "content": "Answer2: Someone could check their appearance in the mirror_1 [304.9, 35.42, 476.09, 146.49], which is located on the wall." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.38, + 0.625, + 0.389 + ], + "angle": 0, + "content": "Question3: If a person were to rearrange the decorative cushions, which items would they be handling?" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.39, + 0.791, + 0.408 + ], + "angle": 0, + "content": "Answer3: If a person were to rearrange the decorative cushions, they would be handling pillow_1 [169.86, 180.73, 221.21, 229.7] and pillow_2 [370.81, 175.9, 436.81, 231.85]." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.477 + ], + "angle": 0, + "content": "Figure 10: An example from the object-only MMR test dataset generated through the prompt in Fig. 9. This example includes information of objects. The left and right pictures show the original image and object-level segmentation masks, respectively." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.546, + 0.825, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.391, + 0.877, + 0.607, + 0.894 + ], + "angle": 0, + "content": "Figure 11: MMR dataset format" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.187, + 0.114, + 0.809, + 0.13 + ], + "angle": 0, + "content": "Table 7: Comparison of computational complexity on LISA, GSVA, and GLaMM, and \\( {\\mathrm{M}}^{2}\\mathrm{{SA}} \\) ." + }, + { + "type": "table", + "bbox": [ + 0.234, + 0.133, + 0.766, + 0.238 + ], + "angle": 0, + "content": "
MethodsGPU Memory Usage (GB)TFLOPs
LISA-7B (Lai et al., 2023)30.5832.59
GSVA-7B (Xia et al., 2024)30.39203.77
GLaMM (Rasheed et al., 2024)17.14349.28
M2SA-7B30.6032.62
LISA-Llama2-13B (Lai et al., 2023)55.2056.64
M2SA-Llama2-13B55.2356.67
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.27, + 0.333, + 0.284 + ], + "angle": 0, + "content": "A.8 TRAINING TIME" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.825, + 0.327 + ], + "angle": 0, + "content": "The training takes approximately 40 hours for the \\(\\mathbf{M}^2\\mathrm{SA - 7B}\\) and about 52 hours for the \\(\\mathbf{M}^2\\mathrm{SA - }\\) Llama2-13B, respectively." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.343, + 0.441, + 0.358 + ], + "angle": 0, + "content": "A.9 COMPUTATIONAL COMPLEXITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.37, + 0.827, + 0.496 + ], + "angle": 0, + "content": "We aim to compare the computational complexity of the proposed \\(\\mathbf{M}^2\\mathrm{SA}\\) with LISA, GSVA, and GLaMM. For this comparison, we measure GPU memory usage and TFLOPs. As shown in Tab. 7, while the addition of Early Local Feature Fusion and multiple [SEG] tokens leads to a slight increase in GPU memory usage and TFLOPs, \\(\\mathbf{M}^2\\mathrm{SA}\\) demonstrates a significant improvement in handling multiple targets and fine-grained part-level segmentation compared to LISA. However, despite these performance improvements, there is still room for enhancement from the perspective of computational efficiency. Since \\(\\mathbf{M}^2\\mathrm{SA}\\) is built upon both MLLM and SAM, it requires substantial memory resources. Future research could focus on optimizing the efficiency of the mask decoder, which predicts the final mask by integrating vision and language information." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.52, + 0.825, + 0.564 + ], + "angle": 0, + "content": "Table 8: Multi-object referring segmentation results on GTAV and Cityscapes validation sets. We adopt mIoU metric for comparison. We evaluate the zero-shot performance of LISA, GSVA, GLaMM, and \\(\\mathbf{M}^2\\mathbf{SA}\\). The best results are highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.269, + 0.567, + 0.731, + 0.672 + ], + "angle": 0, + "content": "
MethodsGTAV-valCityscapes-val
LISA-7B (Lai et al., 2023)3.76.1
GSVA-7B (Xia et al., 2024)15.714.6
GLaMM (Rasheed et al., 2024)12.612.6
M2SA-7B35.141.3
LISA-Llama2-13B (Lai et al., 2023)2.43.4
M2SA-Llama2-13B38.244.0
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.704, + 0.485, + 0.718 + ], + "angle": 0, + "content": "A.10 GENERALIZATION ON UNSEEN DATA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.827, + 0.926 + ], + "angle": 0, + "content": "To assess \\(\\mathbf{M}^2\\mathrm{SA}\\)'s generalization to unseen data, we conduct additional experiments. Although OVPARTS (Wei et al., 2024) was recently proposed for open-vocabulary part-level segmentation using Pascal-Part (Chen et al., 2014) and ADE20K (Zhou et al., 2019), both datasets were used during training. Therefore, we evaluate the model's generalization performance using semantic segmentation datasets from driving scenes, specifically Cityscapes (Cordts et al., 2016) and GTAV (Richter et al., 2016), which were not used during training and pose a more challenging test environment. Since these datasets lack part-level mask annotations, we focus on evaluating multi-target object cases. Furthermore, we curate custom text prompts using predefined category names as they do not provide corresponding text queries. For each query, we randomly select 4 to 6 object categories from an image and create prompts such as \"Can you segment the class 1, class 2, ..., and class n?\" The model generates masks for the specified objects, and we compute the mIoU score to compare its performance with LISA. As shown in Tab. 8, \\(\\mathbf{M}^2\\mathrm{SA}\\) performs robustly even on datasets from entirely different domains. Notably, while the existing methods struggle with multi-target cases, \\(\\mathbf{M}^2\\mathrm{SA}\\) handles them effectively. This demonstrates that the use of multiple [SEG] tokens, combined" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Table 9: Comparison between LISA-7B (Lai et al., 2023) trained on MMR dataset and LISA-7B trained on ReasonSeg (Lai et al., 2023). We measure the performance on ReasonSeg validation set" + }, + { + "type": "table", + "bbox": [ + 0.356, + 0.145, + 0.644, + 0.193 + ], + "angle": 0, + "content": "
MethodsgIoUcIoU
LISA-7B w/ ReasonSeg44.446.0
LISA-7B w/ MMR49.955.6
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.253 + ], + "angle": 0, + "content": "with early local feature fusion, enables \\(\\mathbf{M}^2\\mathbf{SA}\\) to generalize well to unseen domains by improving its ability to manage multi-target cases and fine-grained segmentation tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.27, + 0.398, + 0.284 + ], + "angle": 0, + "content": "A.11 MMR AND REASONSEG" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.825, + 0.395 + ], + "angle": 0, + "content": "To validate the comprehensiveness and effectiveness of the MMR dataset, we conduct a comparative evaluation with ReasonSeg using the LISA-7B model. Specifically, we train the model in two configurations: one using ReasonSeg and the other using MMR instead of ReasonSeg. As shown in Tab. 9, the model trained on MMR shows superior performance on the ReasonSeg validation set than the model trained on ReasonSeg. This improvement highlights the comprehensiveness of the MMR dataset. By incorporating multi-target and part-level annotations alongside object-level data, MMR provides a more robust knowledge for addressing complex reasoning segmentation tasks." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.418, + 0.825, + 0.474 + ], + "angle": 0, + "content": "Table 10: Performance of M2SA on frequently appearing and infrequently appearing object categories. From the total of 75 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively." + }, + { + "type": "table", + "bbox": [ + 0.274, + 0.478, + 0.73, + 0.539 + ], + "angle": 0, + "content": "
MethodsMMR test
Obj-only (total)Obj-only (upper)Obj-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B41.055.641.054.839.439.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.575, + 0.825, + 0.632 + ], + "angle": 0, + "content": "Table 11: Performance of M2SA on frequently appearing and infrequently appearing part categories. From the total of 445 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively." + }, + { + "type": "table", + "bbox": [ + 0.274, + 0.636, + 0.729, + 0.695 + ], + "angle": 0, + "content": "
MethodsMMR test
Part-only (total)Part-only (upper)Part-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B13.527.012.824.813.328.1
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.724, + 0.614, + 0.739 + ], + "angle": 0, + "content": "A.12 ANALYSIS OF THE LONG-TAIL PHENOMENON IN M2SA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.826, + 0.862 + ], + "angle": 0, + "content": "To investigate whether \\(\\mathbf{M}^2\\mathrm{SA}\\) trained on the MMR dataset exhibits a long-tail phenomenon, we evaluate its performance on frequently and infrequently occurring object and part categories. To this end, we construct subsets of the MMR test set by isolating question-answer pairs based on category frequency. Specifically, we extract the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories for both object-only and part-only test sets. This results in four subsets: object-only (upper: 10/75), object-only (lower: 10/75), part-only (upper: 10/445), and part-only (lower: 10/445). The MMR dataset includes a total of 75 object categories and 445 part categories, respectively. The performance comparison is shown in Tab. 10 and Tab. 11." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.825, + 0.926 + ], + "angle": 0, + "content": "For the object-only dataset, \\(\\mathbf{M}^2\\mathbf{SA}\\)'s performance on frequently occurring (upper) object categories closely aligns with its overall performance across all object categories (gIoU: 41.0, cIoU: 54.8 vs. gIoU: 41.0, cIoU: 55.6). However, for infrequent object categories (lower), the performance declines, with cIoU dropping from 55.6 to 39.7 and gIoU from 41.0 to 39.4. In contrast, for the" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "part-only dataset, \\(\\mathbf{M}^2\\mathrm{SA}\\) demonstrates consistent performance across both frequent and infrequent categories. The gIoU scores are 12.8 (upper), 13.3 (lower), and 13.5 (overall), while the cIoU scores are 24.8 (upper), 28.1 (lower), and 27.0 (overall). These findings suggest that \\(\\mathbf{M}^2\\mathrm{SA}\\) is less sensitive to the long-tail distribution in part categories than in object categories." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.825, + 0.239 + ], + "angle": 0, + "content": "This analysis highlights the strengths and limitations of \\(\\mathbf{M}^2\\mathrm{SA}\\) when addressing long-tail distributions. While \\(\\mathbf{M}^2\\mathrm{SA}\\) demonstrates robust performance across frequent and infrequent part categories, its reduced performance on infrequent object categories indicates potential areas for improvement. Future work could explore strategies to mitigate the impact of long-tail distributions in object categories while preserving its strengths in part-level reasoning tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.255, + 0.393, + 0.27 + ], + "angle": 0, + "content": "A.13 QUALITATIVE RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.28, + 0.825, + 0.296 + ], + "angle": 0, + "content": "Qualitative results of \\(\\mathbf{M}^2\\mathbf{SA}\\) on the MMR benchmark are visualized in Fig. 12, Fig. 13, and Fig. 14." + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.314, + 0.816, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.535, + 0.242, + 0.576 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.251, + 0.538, + 0.72, + 0.577 + ], + "angle": 0, + "content": "Question: If someone wants to send an e-mail, which equipments on the desk would they be utilizing?" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.596, + 0.242, + 0.64 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.599, + 0.787, + 0.657 + ], + "angle": 0, + "content": "Answer: They would be utilizing the laptop_computer for typing and viewing the screen, and the mouse for navigating and interacting with the computer." + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.682, + 0.685, + 0.697 + ], + "angle": 0, + "content": "Figure 12: Qualitative result of \\(\\mathbf{M}^2\\mathrm{SA}\\) on MMR test set." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.308, + 0.117, + 0.378, + 0.137 + ], + "angle": 0, + "content": "Image" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.141, + 0.816, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.339, + 0.241, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.251, + 0.34, + 0.807, + 0.38 + ], + "angle": 0, + "content": "Question: Where could someone sit while waiting for transportation, and which part provides support for their back?" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.399, + 0.241, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.4, + 0.761, + 0.44 + ], + "angle": 0, + "content": "Answer: The bench's seat provides a place to sit, and the bench's back offers support for the back" + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.484, + 0.685, + 0.5 + ], + "angle": 0, + "content": "Figure 13: Qualitative result of \\(\\mathbf{M}^2\\mathbf{SA}\\) on MMR test set." + }, + { + "type": "image_caption", + "bbox": [ + 0.308, + 0.531, + 0.377, + 0.551 + ], + "angle": 0, + "content": "Image" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.558, + 0.816, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.753, + 0.241, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.251, + 0.754, + 0.783, + 0.793 + ], + "angle": 0, + "content": "Question: If I need to check how much time is left before my meal is ready, which part of this appliance should I look at?" + }, + { + "type": "image", + "bbox": [ + 0.198, + 0.813, + 0.241, + 0.857 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.815, + 0.7, + 0.854 + ], + "angle": 0, + "content": "Answer: You should look at the microwave_oven's time_display to check the remaining time." + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.898, + 0.685, + 0.914 + ], + "angle": 0, + "content": "Figure 14: Qualitative result of \\(\\mathbf{M}^2\\mathrm{SA}\\) on MMR test set." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.471, + 0.119 + ], + "angle": 0, + "content": "A.14 ADDITIONAL EXAMPLES OF MMR" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.828, + 0.16 + ], + "angle": 0, + "content": "To facilitate a quick and intuitive understanding of the MMR dataset's characteristics, we present additional examples in Figure 15." + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.182, + 0.688, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.295, + 0.581, + 0.304 + ], + "angle": 0, + "content": "Global Caption: A laptop is opened and set on a table next to a computer mouse, suggesting a typical workspace setup." + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.307, + 0.564, + 0.316 + ], + "angle": 0, + "content": "Question1: If one were to begin typing a document, which two areas of this device would they interact with first?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.316, + 0.747, + 0.329 + ], + "angle": 0, + "content": "Answer: They would primarily interact with the laptop_computer_1's keyboard [195, 276, 418, 325] to type and laptop_computer_1's touchpad [113, 290, 231, 312] to navigate within the document." + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.334, + 0.498, + 0.342 + ], + "angle": 0, + "content": "Question2: Where can one find the manufacturer's branding on the devices pictured here?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.342, + 0.756, + 0.35 + ], + "angle": 0, + "content": "Answer2: The manufacturer's branding can be found on the laptop_computer_1's logo [354, 281, 370, 288] and on the mouse_(computer_equipment)_1's logo [314, 403, 345, 416]." + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.354, + 0.613, + 0.362 + ], + "angle": 0, + "content": "Question3: To move the cursor on the screen without touching the laptop, which part of the computer equipment would one use?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.362, + 0.76, + 0.375 + ], + "angle": 0, + "content": "Answer3: One would use the mouse (.computer_equipment) 1's body [260, 379, 516, 477] along with either the mouse (.computer_equipment) 1's left button [413, 380, 480, 401] or mouse (.computer_equipment) 1's right button [451, 393, 519, 429] to click and interact with the cursor on the screen." + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.38, + 0.563, + 0.388 + ], + "angle": 0, + "content": "Question4: After finishing work and deciding to pack up, which two parts of the laptop would come into contact?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.235, + 0.388, + 0.72, + 0.395 + ], + "angle": 0, + "content": "Answer4: When closing the laptop, laptop_computer_1's screen [295, 34, 510, 305] would come into contact with laptop_computer_1's base-panel [77, 271, 479, 352]." + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.295, + 0.76, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.417, + 0.688, + 0.529 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.239, + 0.53, + 0.595, + 0.538 + ], + "angle": 0, + "content": "Global Caption: A plate with a slice of quiche and a side of home fries is ready to be eaten, with a knife resting on the side." + }, + { + "type": "image_footnote", + "bbox": [ + 0.239, + 0.544, + 0.59, + 0.551 + ], + "angle": 0, + "content": "Question1: During a meal, what would you typically use to cut a portion of food and how is it structured for ease of use?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.239, + 0.551, + 0.754, + 0.565 + ], + "angle": 0, + "content": "Answer 1: You would typically use a knife_1 [10.27, 86.49, 258.23, 115.61] to cut a portion of food. It is structured with a knife_1's blade [10, 92, 150, 115] for slicing through food and a knife_1's handle [150, 87, 254, 109] to provide a comfortable grip for handling." + }, + { + "type": "list", + "bbox": [ + 0.239, + 0.544, + 0.754, + 0.565 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.239, + 0.569, + 0.605, + 0.576 + ], + "angle": 0, + "content": "Question2: If I wanted to contain a main dish and sides separately on a table, what items could effectively serve this purpose?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.239, + 0.577, + 0.752, + 0.597 + ], + "angle": 0, + "content": "Answer2: To contain a main dish and sides separately, you could use plate_1[33.38, 74.25, 640.0, 480.0], which has a plate_1's inner_wall [33, 75, 639, 479] to hold the food and prevent it from spilling, and a separate plate_2[0.0, 23.86, 145.25, 200.7] with plate_2's inner_wall [0, 28, 141, 190] and plate_2's rim [0, 24, 145, 201] to hold another portion, like sides or appetizers." + }, + { + "type": "image_footnote", + "bbox": [ + 0.239, + 0.603, + 0.53, + 0.61 + ], + "angle": 0, + "content": "Question3: What part of the tableware should one be cautious of to avoid spills while serving food?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.239, + 0.61, + 0.754, + 0.623 + ], + "angle": 0, + "content": "Answer: 3: One should be cautious of the plate_1's inner_wall [33, 75, 639, 479] of a plate_1 [33, 38, 74, 25, 640, 480,0] and the plate_2's inner_wall [0, 28, 141, 190] of a plate_2 [0.0, 23.86, 145, 25, 200.7] to avoid spillps, as these parts help to contain the food within the boundaries of the plates." + }, + { + "type": "list", + "bbox": [ + 0.239, + 0.569, + 0.754, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.642, + 0.713, + 0.754 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.755, + 0.412, + 0.763 + ], + "angle": 0, + "content": "Global Caption: A dog wearing a hat is resting on a pillow." + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.769, + 0.568, + 0.776 + ], + "angle": 0, + "content": "Question1: Where would this animal most likely register scents and how would it express alertness or curiosity?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.777, + 0.75, + 0.789 + ], + "angle": 0, + "content": "Answer: This animal would most likely register scents using its dog_1's nose [175, 206, 221, 246], and express alertness or curiosity by adjusting the position of its dog_1's ear [329, 101, 398, 212] and dog_1's head [175, 92, 397, 280]." + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.795, + 0.482, + 0.803 + ], + "angle": 0, + "content": "Question2: Can you describe the area that supports the dog while it's lying down?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.803, + 0.75, + 0.816 + ], + "angle": 0, + "content": "Answer 2: The area that supports the dog while it's lying down is [218, 202, 514, 374], particularly emphasized where the dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] make contact with the pillow_1 [5.32, 268.85, 632.27, 427.0]." + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.821, + 0.568, + 0.828 + ], + "angle": 0, + "content": "Question3: When this canine looks around its environment, what are the primary features involved in its vision?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.828, + 0.751, + 0.841 + ], + "angle": 0, + "content": "Answer3: The primary features involved in this canine's vision when looking around its environment are the dog 1's eye [201, 145, 294, 177] and the movement of the dog 1's head [175, 92, 397, 280]." + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.848, + 0.59, + 0.855 + ], + "angle": 0, + "content": "Question4: In the case of this dog getting up from its resting position, which parts would engage initially to lift its body?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.243, + 0.855, + 0.754, + 0.868 + ], + "angle": 0, + "content": "Answer: To get up from its resting position, the dog would initially engage its dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] to lift its dog_1's body [218, 202, 514, 374]." + }, + { + "type": "list", + "bbox": [ + 0.243, + 0.755, + 0.754, + 0.868 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.333, + 0.899, + 0.664, + 0.915 + ], + "angle": 0, + "content": "Figure 15: Additional Examples of MMR dataset." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_origin.pdf b/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..40f96df4c1253990e13bf6692f5c6233236579b0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2718eca6c16b815d6dbad7aa078ee4e15f3f0f30007c423530b7846d9f98a5c +size 13625230 diff --git a/data/2025/2503_13xxx/2503.13881/full.md b/data/2025/2503_13xxx/2503.13881/full.md new file mode 100644 index 0000000000000000000000000000000000000000..68a9a13c38c0f80a136f5522fc18edd59b55ac1c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/full.md @@ -0,0 +1,578 @@ +# MMR: A LARGE-SCALE BENCHMARK DATASET FOR MULTI-TARGET AND MULTI-GRANULARITY REASONING SEGMENTATION + +Donggon Jang* Yucheol Cho* Suin Lee Taehyeon Kim Dae-Shik Kim† +Department of Electrical Engineering, KAIST + +{jdg900,yc_cho,suinlee,rlaxogus0814,daeshik}@kaist.ac.kr + +# ABSTRACT + +The fusion of Large Language Models (LLMs) with vision models is pioneering new possibilities in user-interactive vision-language tasks. A notable application is reasoning segmentation, where models generate pixel-level segmentation masks by comprehending implicit meanings in human instructions. However, seamless human-AI interaction demands more than just object-level recognition; it requires understanding both objects and the functions of their detailed parts, particularly in multi-target scenarios. For example, when instructing a robot to "turn on the TV", there could be various ways to accomplish this command. Recognizing multiple objects capable of turning on the TV, such as the TV itself or a remote control (multi-target), provides more flexible options and aids in finding the optimized scenario. Furthermore, understanding specific parts of these objects, like the TV's button or the remote's button (part-level), is important for completing the action. Unfortunately, current reasoning segmentation datasets predominantly focus on a single target object-level reasoning, which limits the detailed recognition of an object's parts in multi-target contexts. To address this gap, we construct a large-scale dataset called Multi-target and Multi-granularity Reasoning (MMR). MMR comprises 194K complex and implicit instructions that consider multi-target, object-level, and part-level aspects, based on pre-existing image-mask sets. This dataset supports diverse and context-aware interactions by hierarchically providing object and part information. Moreover, we propose a straightforward yet effective framework for multi-target, object-level, and part-level reasoning segmentation. Experimental results on MMR show that the proposed method can reason effectively in multi-target and multi-granularity scenarios, while the existing reasoning segmentation model still has room for improvement. The dataset is available at https://github.com/jdg900/MMR. + +# 1 INTRODUCTION + +Human-machine interaction is a key focus in AI for real-world applications, driving interest in multimodal perception models that integrate vision and language modalities. The model perceives the context within the image related to explicit text query inputs and predicts pixel-level masks or bounding boxes accordingly. For example, Open Vocabulary Segmentation (OVS) (Liang et al., 2023; Cho et al., 2023; Xu et al., 2023), leveraging models like CLIP (Radford et al., 2021), generates segmentation masks from open-set text categories. Similarly, Referring Expression Segmentation (RES) (Wang et al., 2023; Hu et al., 2023; Liu et al., 2023a; Yang et al., 2022) predicts the segmentation mask corresponding to the objects referenced by the text input within the image. However, these models encounter challenges with implicit and complex text queries, limiting their effectiveness in real-world scenarios. + +The emergence of Large Language Models (LLMs) (Zheng et al., 2024; Roumeliotis & Tselikas, 2023; Achiam et al., 2023; Zhang et al., 2023a) offers promising solutions to this challenge. Recent + +studies (Bai et al., 2023; Li et al., 2023; Liu et al., 2024; Zhu et al., 2023; Zhang et al., 2023b; Chen et al., 2023; You et al., 2023) have witnessed that multimodal LLMs with superior reasoning capabilities can effectively perform vision tasks when given implicit text inputs. However, current multimodal LLMs primarily provide information corresponding to images or regions in text form, lacking pixel-level mask generation. + +To address these limitations, LISA (Lai et al., 2023) introduces reasoning segmentation. Unlike previous tasks that rely on explicit text (e.g., "steak"), reasoning segmentation handles implicit queries that require intricate reasoning or world knowledge (e.g., "the food with most protein"), by combining LLMs with the Segment Anything Model (SAM) (Kirillov et al., 2023) that has robust mask generation capabilities. LISA also introduces ReasonSeg, a benchmark dataset for reasoning segmentation. ReasonSeg consists of 1,218 image-instruction pairs, each containing implicit text question-answer pairs that involve complex reasoning for each image. Nevertheless, ReasonSeg has two limitations: 1) It does not adequately address scenarios involving multiple targets, and 2) it primarily focuses on object-level reasoning, treating part-level targets ambiguously. Although the recently proposed MUSE dataset by PixelLM (Ren et al., 2023) addresses multi-target object-level reasoning, it does not consider part-level reasoning. These observations underscore that current datasets for reasoning segmentation overlook the complexities of multiple targets and part-level scenarios, concentrating instead solely on object-level reasoning. This limitation restricts more advanced functionalities in reasoning segmentation. + +In this paper, we introduce a Multi-target and Multi-granularity Reasoning segmentation (MMR) dataset to overcome these limitations, which covers both multiple targets and fine-grained part-level reasoning. We collect image and mask annotations from the publicly available PACO-LVIS dataset (Ramanathan et al., 2023). These annotations include class names and bounding box information of objects and parts. Then, inspired by LLaVA (Liu et al., 2024), we generate intricate question-answer pairs using the GPT-4V API (Achiam et al., 2023). Through this, the MMR dataset contains a vast collection of 194K complex and implicit instructions for comprehensive reasoning segmentation. A distinguishing characteristic of the proposed MMR dataset is its ability to handle multiple objects and diverse parts in the question-answer pairs. This diverse granularity enables models to reason and comprehend complex questions about both multiple target objects and their parts within a single query, providing more meaningful and high-quality masks. + +Moreover, we propose a simple yet effective model, Multi-target and Multi-granularity Segmentation Assistant (M²SA), for multi-target, object-level, and part-level reasoning segmentation. The M²SA model incorporates an early local feature fusion and multiple [SEG] tokens, which enables the model to enhance fine-grained visual understanding and consider multi-target segmentation. Experimental results on benchmarks, such as MMR, single-target referring expression segmentation datasets, and a multi-granularity referring expression segmentation dataset, demonstrate that M²SA outperforms existing state-of-the-art methods. We believe that our dataset and model serve as a valuable resource for potential applications in real-world reasoning segmentation tasks, offering enhanced versatility and robustness. + +Our contributions are summarized as follows: + +- We construct the MMR dataset, which includes 194K complex and implicit question pairs for multi-target, object-level, and part-level reasoning segmentation. This dataset facilitates advanced reasoning segmentation tasks in open-world scenarios. +- We propose $\mathbf{M}^2\mathbf{SA}$ for multi-target, object-level, and part-level reasoning segmentation. It incorporates an early local feature fusion and multiple [SEG] tokens to improve fine-grained visual understanding and segment multiple targets. +- Experimental results on MMR and other benchmarks show that $\mathbf{M}^2\mathbf{SA}$ outperforms state-of-the-art methods, validating the effectiveness of its components. + +# 2 RELATED WORK + +Multimodal Large Language Models Recent advancements (Peng et al., 2023; Taori et al., 2023; Touvron et al., 2023; Zhang et al., 2022) in multimodal Large Language Models (LLMs) have greatly improved the integration between language models and vision tasks by comprehensively understanding and recognizing multiple modalities. Recently proposed models such as BLIP-2 (Li et al., 2023), + +Flamingo (Alayrac et al., 2022), MiniGPT-4 (Zhu et al., 2023), llama-adapter (Gao et al., 2023; Zhang et al., 2023a), LLaVA (Liu et al., 2024), InstructBLIP (Dai et al., 2024), InternGPT (Liu et al., 2023b), and QwenVL (Bai et al., 2023) have shown superiority at multimodal tasks such as visual question-answering and captioning, leveraging the multimodal understanding capability of LLMs. While these methods have demonstrated improved performance in vision-language tasks through instructional tuning, they only provide the text output about the visual target and focus on a holistic understanding of global information in the image. Therefore, their applicability is limited in tasks requiring finer-grained understanding at the pixel level. + +Reasoning Segmentation The task of reasoning segmentation, introduced by LISA (Lai et al., 2023), is understanding implicit text instruction and providing a corresponding mask for the answer. This task is more challenging and important than the referring expression segmentation task which deals with explicit and simple text queries. For instance, when a user wants to segment a pepper in an image, handling an implicit query like 'the food with a spicy taste' instead of a direct reference such as 'the pepper' is significant for improving human-AI interaction. To tackle this, LISA introduces ReasonSeg, a benchmark containing implicit text queries that require complex reasoning for each image. Recently, PixelLM (Ren et al., 2023), has addressed the limitation of ReasonSeg which considers only a single target in a query text. PixelLM constructs MUSE, a new dataset with multiple target objects in the text instructions. However, both studies are still limited to object-level reasoning segmentation. Methods such as GSVA (Xia et al., 2024) and GLaMM (Rasheed et al., 2024) have also been proposed, but they focus on frameworks for object-level reasoning segmentation rather than introducing new datasets. In this paper, we extend these existing tasks and propose a new benchmark dataset that considers both part-level and object-level reasoning. + +Part-level Segmentation Recent research (Li et al., 2022; Kirillov et al., 2019; Michieli et al., 2020; Zhou et al., 2021; Pan et al., 2023) has delved into a fine-grained understanding of objects at the part-level. For the part-level visual understanding, datasets with detailed annotations for each part are required. To this end, some initial studies (Gong et al., 2017; Li et al., 2017; Yang et al., 2019; Wah et al., 2011; Jia et al., 2020; Zheng et al., 2018) have introduced datasets with part-level masks on specific domains, such as human body parts (Gong et al., 2017; Li et al., 2017; Yang et al., 2019), bird parts (Wah et al., 2011), and fashion cloth parts (Jia et al., 2020; Zheng et al., 2018). Moreover, recognizing the need for annotations on general objects, some approaches (Chen et al., 2014; Mo et al., 2019; He et al., 2022; Zhou et al., 2019; Meletis et al., 2020; Ramanathan et al., 2023; Wei et al., 2024) have extended the existing object-level datasets by including more fine-grained annotations. Furthermore, there has been an attempt (Wang et al., 2023) to extend the previous Referring Expression Segmentation (RES) task to provide part-level segmentation masks matching explicit text queries. In line with this effort, our work introduces a new dataset that includes multiple target parts and diverse implicit text queries for multi-granularity reasoning segmentation. + +# 3 MMR DATASET + +Current publicly available datasets for reasoning segmentation primarily emphasize object-level reasoning. Consequently, Multimodal Large Language Models (MLLMs) often struggle with questions that involve multiple targets or require reasoning at both the object- and part-levels. To address these limitations, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset. MMR includes multi-target, object-level, and part-level reasoning scenarios. This dataset comprises images and masks from the publicly available PACO dataset (Ramanathan et al., 2023), supplemented with implicit and complex question-answer pairs generated by the GPT-API (Achiam et al., 2023). Unlike existing datasets, MMR includes large-scale question-answer pairs that consider multiple target cases and require reasoning at both the object- and part-levels, enhancing its versatility and applicability. In the following sections, we detail the dataset generation process (Sec. 3.1), describe the data filtering process (Sec. 3.2), provide a statistical analysis of MMR (Sec. 3.3), and highlight its distinctiveness compared to existing datasets (Sec. 3.4). + +# 3.1 DATA GENERATION + +To generate a multi-target, object-level, and part-level reasoning segmentation dataset, we leverage the PACO-LVIS dataset (Ramanathan et al., 2023). PACO-LVIS includes 456 object-specific part + +![](images/e98e400479af6f6c5ba0198fac9f6a5a8eabd87b74f6f8c3931fa032e5554076.jpg) +Figure 1: The prompt used in our data creation process with GPT-4V. + +classes across 75 object categories, offering 502K part-level masks and bounding boxes annotated across 273K object-level masks and bounding boxes. By utilizing these comprehensive images and multi-granularity mask annotations, we can reduce annotation costs while ensuring detailed and accurate segmentation data. To create intricate and implicit question-answer pairs for multiple target and multi-granularity reasoning, we employ a GPT-assisted data generation scheme similar to LLaVA (Liu et al., 2024). Specifically, we adopt GPT-4V API which has robust visual understanding capabilities. Fig. 1 illustrates the entire data generation process. + +To guide the GPT-4V API effectively, we carefully craft prompts that include GPT role, object and part information, task prompts, and requirements. GPT role defines the persona of the GPT-4V API, informing it about the context and objectives of the data generation process. Object & part information provides comprehensive annotations, such as object and part names within the image and their corresponding bounding box coordinates. Task prompt informs the GPT-4V API about the task definition and considerations for generating question-answer pairs. Requirements set the rules and patterns that the GPT-4V API should follow when generating question-answer pairs (e.g., "questions should avoid direct mention of coordinates of objects or parts" or "Q&A pairs should contain multiple objects or parts"). Please see the Appendix A.5 for the detailed prompt. + +The GPT-4V-assisted data generation follows a two-step process: 1) Global Caption Generation: GPT-4V API first generates a global caption based on the image to foster a deep understanding of its context. 2) Question-Answer Pair Generation: Leveraging this global caption along with object and part information, GPT-4V autonomously crafts multi-target, multi-granularity question-answer pairs. Carefully designed prompts and a two-step generation process enable GPT-4V to deeply comprehend image context and generate contextually relevant question-answer pairs. + +# 3.2 DATA FILTERING + +Despite meticulously crafted prompts for guiding GPT-4V, occasional deviations from established rules result in the generation of subpar question-answer pairs. These deviations include questions that reveal explicit target coordinates or provide overly direct hints, as well as answers that offer irrelevant information or omit essential details. To enhance the reliability of the question-answer pairs in our dataset, a rigorous filtering process is essential. Therefore, we engage four skilled human inspectors to review the dataset according to strict criteria: + +- Logicality and Reasoning: Questions should avoid explicit target coordinates or strong hints. Non-compliant questions and their corresponding answers are removed. For example, a question like "Which part of this animal [coordinates] uses its sense of smell?" would be excluded. +- Coherence and Relevance: Answers lacking essential target information or containing irrelevant details are corrected for precision and relevance. This includes cases where answers mention objects or parts not provided in the annotations. + +Figure 2: An example from the MMR dataset generated through our data creation process. The left and right pictures show the object- and part-level segmentation masks, respectively. +![](images/3a43e4506a5918098ff444f068e5cb49842f6b8b3de45cc8ae4635bc58eb33b9.jpg) +Caption: A knife is inserted vertically into a sandwich on a cutting board, with another knife lying beside it and bottles in the background. + +![](images/dba0b0f92114ba306981b6085c8e195b4cca087c05a93e961e6e03f14f63fb7c.jpg) +Question1: What item on the table is designed to be held at one end while the other end is meant for cutting through food? Answer1: The knife_1 [204.79, 2.4, 238.63, 226.53] is designed with a knife_1's handle [205, 2, 239, 126] to be held while the knife_1's blade [213, 121, 237, 226] is meant for cutting through food. +Question2: Which object on the table appears to be in the process of being used to keep a sandwich upright? Answer2: The knife_2 [304.84, 320.65, 615.34, 427.0] with its knife_2's blade [305, 321, 601, 422] inserted into the sandwich is being used to keep it upright. +Question3: If I wanted to read the product information on a container in view, which part should I look at? Answer3: To read the product information, you should look at the bottle_1's label [460, 105, 500, 282] or the bottle_2's label [300, 4, 413, 176]. +Question4: Which objects in the scene are meant to contain liquids, and what part of them touches the surface they rest on? Answer4: The objects meant to contain liquids are bottle_1[459.07, 0.0, 603.49, 315.78] and bottle_2[296.85, 1.15, 416.19, 242.2]. The part that touches the surface they rest on is bottle_1's base [463, 287, 596, 316] and bottle_2's base [307, 220, 400, 241]. + +- Clarity and Precision: Questions and answers should be clear, concise, and free of ambiguity. For example, ill-defined data, such as asking about the function of an object or part from a segmentation perspective, is removed (e.g., "What is the function of object_1?"). Answers should provide precise information that directly addresses the question without causing confusion. + +Originally, 222K question-answer pairs are generated. Of these, $12.6\%$ are filtered out through a review process conducted by the four inspectors, resulting in the final MMR dataset. Since dataset generation is a key contribution to our work, each inspector thoroughly reviews the entire set of 222K question-answer pairs. To minimize human error, we only filter out question-answer pairs flagged by two or more inspectors. This meticulous filtering regimen ensures the integrity and trustworthiness of the MMR dataset. An example of the generated question-answer pairs is illustrated in Fig. 2. + +# 3.3 DATA STATISTICS + +The MMR dataset includes 194,398 intricate and implicit question-answer pairs with 57,643 corresponding images and masks selected from PACO-LVIS. The entire dataset is split into distinct sets for training (154,127 pairs), validation (8,194 pairs), and test (32,077 pairs). Moreover, the test set is further categorized into three subsets: object-only, part-only, and mixed sets, providing a benchmark for evaluating multi-granularity reasoning segmentation capabilities. + +Additionally, our dataset inherits a rich coverage of 75 object categories and 445 part categories from PACO-LVIS, enhancing its diversity and utility. We delve into the frequency distribution per object and part category across question-answer pairs. Fig. 3 (b) and (d) provide a comprehensive overview of the number of questions per object category and part category, respectively. The results show that our dataset encompasses a wide range of categories, ensuring that the question-answer pairs are not biased toward specific categories and exhibit a high level of diversity. Furthermore, the word clouds illustrated in Fig. 3 (a) and (c) highlight the prevalent head object and part categories, respectively. These word clouds demonstrate that our question-answer pairs are grounded in common and general objects and their associated parts. Fig. 3 (e) presents statistics on the number of targets in each question-answer pair. On average, there are 1.8 targets per answer, with the maximum number of targets in a single pair being 16. This demonstrates that our dataset can consider multiple targets in an image and cover diverse target reasoning. To evaluate the comprehensiveness of both objects and parts in the proposed dataset, we compare their occurrences within the total question-answer pairs. As depicted in Fig. 3 (f), there are 114,704 descriptions for objects and 226,869 for parts, maintaining a ratio of approximately 1:2. This ratio is reasonable because objects typically + +![](images/6af68df24ea5fe2bcd2e8a1c6b52f8c4a4dbe4f613d06f3921cae1f077f251c6.jpg) +(a) + +![](images/49d95cca8a99e7183736adb72e5992742a6708b40215ddab704ed4836b67c487.jpg) +(c) + +![](images/75402cb6ca335d4d37dec42055f59df83bb9e5047c9952f863c51e27ad68564e.jpg) +(e) + +![](images/27ebdc79105d81dce6346da94f8ea4a8348668259ea47a09d9ab995347bce18b.jpg) +Figure 3: Statistics of the proposed MMR dataset. (a) the word cloud for the object categories, (b) the number of objects per each object category in questions (log scale), (c) the word cloud for the part categories, (d) the number of parts per each part category in questions (log scale), (e) the distribution of target count in answers, and (f) the total number of expressions of objects and parts. + +![](images/727c972b90b1ecc04fd743aed83f08e577ab1f60f02b3b714179cc2aabd64bfc.jpg) + +![](images/78c36daf8e08ee87adcae76de957465e2efddd4d69ab427ee4a382fa5cb691f7.jpg) + +Table 1: Comparison among several reasoning segmentation datasets, including ReasonSeg (Lai et al., 2023), MUSE (Ren et al., 2023), and the proposed MMR. Here, part-level is an expression that refers to various parts of an object that appear in the image. + +
DatasetsObject-levelPart-levelMulti-target# of Q&A pairsGPT models
ReasonSeg×1.2KGPT-3.5
MUSE×214KGPT-4V
MMR194KGPT-4V
+ +consist of multiple parts. Therefore, it reflects a balanced distribution, contributing to the dataset's comprehensiveness and facilitating multi-granularity knowledge understanding. + +# 3.4 COMPARISON WITH EXISTING REASONING SEGMENTATION DATASETS + +Tab. 1 presents a comparative overview of existing reasoning segmentation datasets and the proposed MMR dataset. As observed, MMR offers several notable advantages over existing datasets. + +First, MMR contains 194K question-answer pairs, comparable to MUSE (Ren et al., 2023), and far exceeds ReasonSeg (Lai et al., 2023) which has only 1,218 question-answer pairs primarily designed for validation and testing purposes. This extensive scale facilitates both training and evaluation for reasoning segmentation. + +Second, MMR supports question-answer pairs covering multi-target and multi-granularity (object-level and part-level) visual reasoning. Although MUSE includes multi-target instances, its coverage is limited to object-level reasoning. This lack of part-level detail reduces its effectiveness in fine-grained visual tasks. Part-level reasoning in MMR enables a more comprehensive understanding of visual contexts and hierarchical relationships between parts and objects. While ReasonSeg appears to include part-level reasoning, ReasonSeg often has ambiguous boundaries between objects and their parts because it doesn't specify which object a part belongs to. For instance, in a scene with a "car" and a "tire", ReasonSeg considers the "tire" as part of the "car", even if the tire is not attached. In contrast, MMR clearly distinguishes the boundaries between objects and their parts by specifying hierarchy like which object a part belongs to based on their spatial context. Additionally, unlike ReasonSeg, MMR distinguishes multiple objects of the same class within a single image at the instance level. For example, ReasonSeg might group all buses in a scene under a single "Bus" label. On the other hand, MMR treats them as distinct entities like "Bus_1," "Bus_2", etc. Also, ReasonSeg treats all screens simply as "screen," whereas MMR would specify "laptop_1's screen," "laptop_2's screen," and so forth. This allows MMR to handle objects or parts of the same class separately by considering their spatial context within the image. + +Third, MMR leverages the advanced visual understanding capabilities of GPT-4V for question-answer generation. GPT-4V receives the image along with information such as class names and bounding boxes of objects and parts, enabling detailed and contextually accurate question-answer generation. In comparison, ReasonSeg generates questions using the language-specialized GPT-3.5 and pre-trained image tagging models, which do not fully capture the visual context, leading to less relevant question-answer pairs with the image. + +In summary, MMR provides a substantial improvement over ReasonSeg and MUSE by including large-scale, multi-target, and multi-granularity question-answer pairs. It strengthens real-world applicability, making it a valuable asset for advancing research in reasoning-based segmentation tasks. + +# 4 BASELINE FRAMEWOK + +We propose a novel baseline framework for multi-target and multi-granularity reasoning segmentation, $\mathbf{M}^2\mathbf{SA}$ . $\mathbf{M}^2\mathbf{SA}$ enhances the LISA framework with two key features: 1) Early Local Feature Fusion and 2) multiple [SEG] tokens. For Early Local Feature Fusion, we extract local features from the early layer of the SAM's vision encoder, which contains fine-grained details such as image edges and boundaries. These local features are fused with the global semantic context features from the last layer of SAM's vi + +![](images/cb1cb6ee8f4d1046e00e0441d898b982ead7bd659cd7608fd7812d75c403cf50.jpg) +Figure 4: The overview of $\mathbf{M}^2\mathrm{SA}$ framework. + +sion encoder for more informative visual features in the mask decoder. Multiple [SEG] tokens overcome the LISA framework's limitation of a single [SEG] token, which struggles to segment multiple targets simultaneously.. To overcome this, we propose utilizing multiple [SEG] tokens. In our MMR dataset, we append a [SEG] token to each target object and part in the answer annotations (e.g., "When closing the laptop, laptop computer's screen [SEG] would come into contact with laptop computer's base panel [SEG]."). This approach enables the model to predict separate [SEG] tokens for each target, reducing ambiguity among multiple targets. + +Model Architecture Fig. 4 presents the overall architecture of the proposed $\mathbf{M}^2\mathbf{SA}$ framework, which integrates two core components: Segment Anything Model (SAM)(Kirillov et al., 2023) and Multimodal Large Language Model (MLLM), specifically LLaVA(Liu et al., 2024). SAM module consists of SAM Vision Encoder $(E)$ and SAM Mask Decoder $(D)$ , while the MLLM comprises CLIP Vision Encoder $(I)$ , vision-to-text projector $(\psi)$ , and Large Language Model $(F)$ . The image $x_{img} \in R^{h \times w \times 3}$ is fed into the SAM Vision Encoder $(E)$ , which generates global context features $v_g = E(x_{img}) \in R^{h/16 \times w/16 \times c}$ and early local features $v_l = E_l(x_{img}) \in R^{h/16 \times w/16 \times c'}$ . To align the channel dimensions of $v_l$ with $v_g$ , the early local features $v_l$ are passed through two convolution layers, resulting in refined features $\hat{v}_l \in R^{h/16 \times w/16 \times c}$ . $v_g$ and $\hat{v}_l$ are then summed to obtain visual features $v_{seg} \in R^{h/16 \times w/16 \times c}$ for segmentation. Simultaneously, the image $x_{img}$ is input into the CLIP Vision Encoder $(I)$ , producing visual token embeddings $f_{img} = \psi(I(x_{img})) \in R^{N_{img} \times d}$ , which are mapped to the LLM input space using the vision-to-text projector $\psi$ . In parallel, the text queries $x_{txt}$ are tokenized by the $F$ 's tokenizer, producing text token embeddings $f_{txt} \in R^{N_{txt} \times d}$ . The visual token embeddings $f_{img}$ and text token embeddings $f_{txt}$ are concatenated and processed by LLM $F$ , resulting in output response $\hat{y}_{txt} = F(\text{concat}(f_{img}, f_{txt}))$ . $\hat{y}_{txt}$ contains the textual response to the text query and special [SEG] tokens that correspond to each target entity to be segmented. These multiple [SEG] token embeddings are extracted and projected into SAM's prompt space via the projector $\phi$ , resulting in embeddings $f_{seg} = \phi(\hat{y}_{txt}[SEG]) \in R^{N_{seg} \times c}$ . Finally, the SAM Mask Decoder $(D)$ takes the visual features $v_{seg}$ and the multiple [SEG] token embeddings + +$f_{seg}$ as input to generate the segmentation mask $M = D(\text{concat}(v_{seg}, f_{seg}))$ , which identifies the target regions in the image corresponding to the text queries. + +**Optimization** Our model is trained end-to-end through two sources of supervision. For the text generation, we compute auto-regressive cross-entropy loss $L_{txt}$ between the text output $\hat{y}_{txt}$ and the ground-truth text answer $y_{txt}$ . For the high-quality segmentation mask generation, the mask loss $L_{mask}$ is calculated between the output mask $\hat{M}$ and the ground-truth mask $M$ . The mask loss $L_{mask}$ is a weighted sum of per-pixel binary cross-entropy loss $L_{bce}$ and a DICE loss $L_{dice}$ , determined by $\lambda_{bce}$ and $\lambda_{dice}$ . The overall loss $L$ is formulated as follows: + +$$ +L = L _ {t x t} + L _ {m a s k}, +$$ + +$$ +L _ {m a s k} = \lambda_ {b c e} L _ {b c e} + \lambda_ {d i c e} L _ {d i c e}, \tag {1} +$$ + +where $\lambda_{bce}$ and $\lambda_{dice}$ are set to 0.5 and 2.0, respectively. + +# 5 EXPERIMENT + +# 5.1 EXPERIMENTAL SETUP + +Implementation Details We use pre-trained LLaVA-7B (Liu et al., 2024) and LLaVA-Llama2-13B with CLIP-ViT-L/14 (Radford et al., 2021) and Vicuna-7B (Chiang et al., 2023)/Llama2-13B (Touvron et al., 2023) to form Multimodal Large Language Model (MLLM). We adopt the pre-trained SAM-ViT-H (Kirillov et al., 2023) for the segmentation model. For CLIP-ViT-L/14, input image $x_{img}$ is resized to $224 \times 224 \times 3$ and processed with a patch size of 14, resulting in $N_{img} = 256$ . LLM dimensions $d$ are set to 4096 and 5120 for Vicuna-7B and Llama2-13B. For SAM-ViT-H, $c$ and $c'$ are 256 and 1280, respectively. Efficient fine-tuning of the MLLM is facilitated using LoRA (Hu et al., 2021). The trainable components in $\mathbf{M}^2\mathbf{SA}$ include the SAM Mask Decoder $D$ , the projector $\phi$ , two convolution layers, the LoRA adapter in MLLM, and the token embeddings. We use features from the 8th layer in the SAM Vision Encoder $E$ for early layer feature fusion. Our model is trained for 10 epochs, with each epoch consisting of 5,000 steps. We employ the AdamW (Loshchilov & Hutter, 2017) optimizer with a learning rate of 0.0003 and set gradient accumulation to 10 steps per update. Additionally, we use WarmupDecayLR as the learning rate scheduler. The learning rate is linearly decayed after 100 steps. The batch size and LoRA rank are set to 2 and 8, respectively. All experiments are conducted using 4 NVIDIA RTX A6000 GPUs. The results reported in the paper are the average values obtained from experiments conducted with 3 different random seeds. + +Datasets For model training, we adopt the mixed training dataset composition scheme proposed by LISA (Lai et al., 2023), comprising four types: semantic segmentation datasets (ADE20K (Zhou et al., 2019), COCO-Stuff (Caesar et al., 2018), Mapillary (Neuhold et al., 2017), PACO-LVIS (Ramanathan et al., 2023), and PASCAL-Part (Chen et al., 2014)), referring expression segmentation datasets (RefCOCO (Kazemzadeh et al., 2014), RefCOCO+ (Kazemzadeh et al., 2014), RefCOCOg (Mao et al., 2016), and RefCLEF (Kazemzadeh et al., 2014)), a visual question answering dataset (LLaVA-Instruct-150K (Liu et al., 2024)), and the proposed MMR dataset for multi-target and multi-granularity reasoning segmentation. We sample the data from the mixed training dataset in a ratio of 2:9:2:6, where 2 represents semantic segmentation datasets, 9 represents referring expression segmentation datasets, 2 represents the visual question answering dataset, and 6 represents the proposed MMR dataset. + +Baseline Methods To validate the effectiveness of the $\mathbf{M}^2\mathbf{SA}$ for a multi-target and multi-granularity reasoning segmentation task, we adopt LISA (Lai et al., 2023), GSVA (Xia et al., 2024), and GLaMM (Rasheed et al., 2024) along with their variants. The pre-trained models refer to those trained solely on their respective datasets. In contrast, the variant models referred to as $\mathrm{model}_{tr}$ , are trained from scratch on a mixed training set that includes the MMR dataset. Due to issues with the publicly available code from the PixelLM, we exclude PixelLM from the baseline methods to ensure reliable and consistent comparison results. For a Multi-granularity Referring Expression Segmentation (MRES) task, we additionally adopt the class RES models (Yang et al., 2022; Liu et al., 2023a; Wang et al., 2023; 2022) and the general models (Zhu et al., 2022; Zou et al., 2023; 2024). + +Table 2: Results on MMR benchmark. The gIoU and cIoU metrics are reported for the comparison. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings. The best results are highlighted in bold. + +
Methodsvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
LISA-7B (Lai et al., 2023)13.818.323.525.16.67.914.517.9
LISA-7Btr19.431.634.741.88.013.119.527.1
GSVA-7B (Xia et al., 2024)14.625.126.434.36.011.615.524.8
GSVA-7Btr19.838.930.241.18.018.621.234.5
GLaMM (Rasheed et al., 2024)12.619.223.731.93.96.413.318.7
GLaMMtr26.947.140.354.212.125.530.345.0
M2SA-7B27.848.641.055.613.527.030.946.8
LISA-Llama2-13B (Lai et al., 2023)15.420.026.127.97.48.416.119.8
LISA-Llama2-13Btr22.333.440.245.210.716.423.029.2
M2SA-Llama2-13B28.449.142.357.613.627.231.647.6
+ +Table 3: Referring expression segmentation results on RefCOCO, RefCOCO+ (Kazemzadeh et al., 2014) and RefCOCOg (Mao et al., 2016) among $\mathbf{M}^2\mathbf{SA}$ and existing methods. For a fair comparison with previous methods, the cIoU metrics are adopted. The best results are highlighted in bold. + +
MethodsRefCOCORefCOCO+RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
MCN (Luo et al., 2020)62.464.259.750.655.544.749.249.4
VLT (Ding et al., 2021)67.570.565.256.361.050.155.057.0
CRIS (Wang et al., 2022)70.573.266.162.368.153.759.960.4
LAVT (Yang et al., 2022)72.775.868.862.168.455.161.262.1
ReLA (Liu et al., 2023a)73.876.570.266.071.057.765.066.0
X-Decoder (Zou et al., 2023)------64.6-
SEEM (Zou et al., 2024)------65.7-
LISA-7B (Lai et al., 2023)74.176.571.162.467.456.566.468.5
GSVA-7B (Xia et al., 2024)76.477.472.864.567.758.671.172.0
GLaMM (Rasheed et al., 2024)79.583.276.972.678.764.674.274.9
M2SA-7B74.076.869.763.167.256.167.068.3
LISA-Llama2-13B (Lai et al., 2023)73.677.370.563.268.257.067.068.4
M2SA-Llama2-13B74.677.671.064.068.157.669.069.3
+ +Evaluation Metrics Following the implementation of the referring expression segmentation works, we adopt gIoU and cIoU scores to assess the quality of the segmentation mask. gIoU denotes the mean IoU for each mask, whereas cIoU is computed by the cumulative intersection area over the cumulative union area across the entire dataset. Given that cIoU may exhibit bias towards large-area objects, gIoU is preferable for evaluating part regions. + +# 5.2 RESULTS ON BENCHMARK DATASETS + +Comparison on MMR Tab. 2 compares $\mathbf{M}^2\mathrm{SA}$ and the baseline models in a multi-target and multi-granularity reasoning segmentation task (MMR dataset). The pre-trained models perform poorly on the proposed MMR dataset, particularly struggling with the part-only set due to its lack of detailed part-level understanding. Conversely, $\mathrm{LISA}_{tr}$ , $\mathrm{GSVA}_{tr}$ , and $\mathrm{GLaMM}_{tr}$ , trained using the proposed MMR dataset, exhibit superior performance as they acquire both object-level and part-level knowledge. However, its ability to handle multi-target and fine-detail reasoning remains limited. In contrast, the proposed $\mathbf{M}^2\mathrm{SA}$ shows highly competitive performance, effectively managing multi-target scenarios and fine-detail tasks, thus showcasing its strength in comprehensive reasoning segmentation. Qualitative results are provided in the Appendix A.13. + +Comparison on Referring Expression Segmentation Task Tab. 3 presents the single-target object-level RefCOCO series dataset results. While $\mathbf{M}^2\mathbf{SA}$ achieves commendable performance, it is important to note that single-target referring expression segmentation is a relatively simple task, involving explicit queries that focus on identifying a single object. The true strength of $\mathbf{M}^2\mathbf{SA}$ lies in its ability to excel in more complex and challenging tasks, such as multi-target referring expression segmentation and multi-granularity referring segmentation. To evaluate its performance on multi-target referring expression segmentation, we curate text queries for multi-target objects using annotation information from the RefCOCO-series datasets. Each query is constructed by randomly selecting 4 to 6 object categories from each image and generating text prompts like "Can you segment the class 1, class 2, ..., and class n?" We then compare $\mathbf{M}^2\mathbf{SA}$ 's performance against LISA, + +Table 4: Multi-referring expression segmentation results. We adopt the cIoU metric for comparison. The best results are highlighted in bold. + +
MethodsMulti-RefCOCOMulti-RefCOCO+Multi-RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
LISA-7B (Lai et al., 2023)34.032.736.428.228.628.545.248.7
GSVA-7B (Xia et al., 2024)50.753.347.844.847.440.647.748.6
GLaMM (Rasheed et al., 2024)30.832.030.028.829.627.232.535.0
M2SA-7B71.373.367.261.865.355.862.063.6
LISA-Llama2-13B (Lai et al., 2023)33.232.632.427.729.926.744.047.1
M2SA-Llama2-13B72.075.668.062.367.156.165.465.8
+ +Table 5: Multi-granularity referring expression segmentation results on RefCOCom (Wang et al., 2023). For a fair comparison with previous methods, the mIoU metrics are adopted. Part denotes part-only evaluation, and Obj & Part denotes multi-granularity evaluation. The best results are highlighted in bold. + +
MethodsvaltestAtestB
PartObj & PartPartObj & PartPartObj & Part
SeqTR (Zhu et al., 2022)13.928.212.122.818.134.7
CRIS (Wang et al., 2022)10.625.410.121.212.930.0
LAVT (Yang et al., 2022)15.329.913.224.418.735.5
X-Decoder (Zou et al., 2023)16.229.513.623.620.333.8
SEEM (Zou et al., 2024)16.129.413.623.420.433.9
UniRES (Wang et al., 2023)19.634.316.427.825.241.7
LISA-7B (Lai et al., 2023)21.334.318.528.625.740.1
GSVA-7B (Xia et al., 2024)11.423.19.219.216.828.2
GLaMM (Rasheed et al., 2024)21.435.318.629.526.941.1
M²SA-7B22.435.519.930.127.141.4
LISA-Llama2-13B (Lai et al., 2023)22.135.219.429.727.241.6
M²SA-Llama2-13B24.537.321.931.928.542.7
+ +GSVA, and GLaMM. As shown in Tab. 4, $\mathbf{M}^2\mathbf{SA}$ significantly outperforms these methods, showcasing its ability to reason about multiple objects simultaneously and effectively leverage its multi [SEG] tokens for diverse and intricate queries. + +Additionally, we evaluate $\mathbf{M}^2\mathrm{SA}$ on RefCOCOM, a multi-granularity referring segmentation dataset. As demonstrated in Tab. 5, $\mathbf{M}^2\mathrm{SA}$ surpasses existing methods in this task, though the performance improvement is less pronounced. This is likely because the MMR dataset does not include the person class, which constitutes a significant portion of the categories in RefCOCOM. These results emphasize the versatility and effectiveness of $\mathbf{M}^2\mathrm{SA}$ in addressing complex, real-world scenarios, extending well beyond simple single-target segmentation tasks. + +# 6 CONCLUSION + +This paper addresses the limitations of current reasoning segmentation datasets, which often overlook multi-target or part-level reasoning. To resolve these issues, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset, providing 194K comprehensive question-answer pairs that cover multi-target, object-level, and part-level aspects, enhancing diverse and context-aware interactions. We also propose the $\mathbf{M}^2\mathbf{SA}$ model, designed for multi-target, object-level, and part-level reasoning segmentation. $\mathbf{M}^2\mathbf{SA}$ incorporates early local feature fusion and multiple [SEG] tokens, improving fine-grained visual understanding and multi-target segmentation. Experimental results show that $\mathbf{M}^2\mathbf{SA}$ outperforms existing models on the MMR benchmark. The MMR dataset aims to drive progress in reasoning segmentation by emphasizing the importance of multi-target and part-level aspects in human-AI interactions. + +# ACKNOWLEDGMENTS + +This research has been supported by the LG Electronics Corporation. (Project No. G01230381) + +# REFERENCES + +Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. +Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022. +Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. +Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1209-1218, 2018. +Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. +Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1971-1978, 2014. +Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023. +Seokju Cho, Heeseong Shin, Sunghwan Hong, Seungjun An, Seungjun Lee, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. Cat-seg: Cost aggregation for open-vocabulary semantic segmentation. arXiv preprint arXiv:2303.11797, 2023. +Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3213-3223, 2016. +Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024. +Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 16321-16330, 2021. +Peng Gao, Jiaming Han, Renrui Zhang, Ziyi Lin, Shijie Geng, Aojun Zhou, Wei Zhang, Pan Lu, Conghui He, Xiangyu Yue, et al. Llama-adapter v2: Parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010, 2023. +Ke Gong, Xiaodan Liang, Dongyu Zhang, Xiaohui Shen, and Liang Lin. Look into person: Self-supervised structure-sensitive learning and a new benchmark for human parsing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 932-940, 2017. +Ju He, Shuo Yang, Shaokang Yang, Adam Kortylewski, Xiaoding Yuan, Jie-Neng Chen, Shuai Liu, Cheng Yang, Qihang Yu, and Alan Yuille. Partimagenet: A large, high-quality dataset of parts. In European Conference on Computer Vision, pp. 128-145. Springer, 2022. + +Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. +Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4067-4077, 2023. +Menglin Jia, Mengyun Shi, Mikhail Sirotenko, Yin Cui, Claire Cardie, Bharath Hariharan, Hartwig Adam, and Serge Belongie. Fashionpedia: Ontology, segmentation, and an attribute localization dataset. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16, pp. 316-332. Springer, 2020. +Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pp. 787-798, 2014. +Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother, and Piotr Dólar. Panoptic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9404-9413, 2019. +Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023. +Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023. +Jianshu Li, Jian Zhao, Yunchao Wei, Congyan Lang, Yidong Li, Terence Sim, Shuicheng Yan, and Jiashi Feng. Multiple-human parsing in the wild. arXiv preprint arXiv:1705.07206, 2017. +Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023. +Xiangtai Li, Shilin Xu, Yibo Yang, Guangliang Cheng, Yunhai Tong, and Dacheng Tao. Panoptic-partformer: Learning a unified model for panoptic part segmentation. In European Conference on Computer Vision, pp. 729-747. Springer, 2022. +Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7061-7070, 2023. +Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 23592-23601, 2023a. +Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. +Zhaoyang Liu, Yinan He, Wenhai Wang, Weiyun Wang, Yi Wang, Shoufa Chen, Qinglong Zhang, Yang Yang, Qingyun Li, Jiashuo Yu, et al. Internchat: Solving vision-centric tasks by interacting with chatbots beyond language. arXiv preprint arXiv:2305.05662, 2023b. +Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. +Gen Luo, Yiyi Zhou, Xiaoshuai Sun, Liujuan Cao, Chenglin Wu, Cheng Deng, and Rongrong Ji. Multi-task collaborative network for joint referring expression comprehension and segmentation. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pp. 10034-10043, 2020. + +Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 11-20, 2016. +Panagiotis Meletis, Xiaoxiao Wen, Chenyang Lu, Daan de Geus, and Gijs Dubbelman. Cityscapes-panoptic-parts and Pascal-panoptic-parts datasets for scene understanding. arXiv preprint arXiv:2004.07944, 2020. +Umberto Michieli, Edoardo Borsato, Luca Rossi, and Pietro Zanuttigh. Gmnet: Graph matching network for large scale part semantic segmentation in the wild. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VIII 16, pp. 397-414. Springer, 2020. +Kaichun Mo, Shilin Zhu, Angel X Chang, Li Yi, Subarna Tripathi, Leonidas J Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 909-918, 2019. +Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In Proceedings of the IEEE international conference on computer vision, pp. 4990-4999, 2017. +Tai-Yu Pan, Qing Liu, Wei-Lun Chao, and Brian Price. Towards open-world segmentation of parts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15392-15401, 2023. +Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, and Jianfeng Gao. Instruction tuning with gpt-4. arXiv preprint arXiv:2304.03277, 2023. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021. +Vignesh Ramanathan, Anmol Kalia, Vladan Petrovic, Yi Wen, Baixue Zheng, Baishan Guo, Rui Wang, Aaron Marquez, Rama Kovvuri, Abhishek Kadian, et al. Paco: Parts and attributes of common objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7141-7151, 2023. +Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13009-13018, 2024. +Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. arXiv preprint arXiv:2312.02228, 2023. +Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 102-118. Springer, 2016. +Konstantinos I Roumeliotis and Nikolaos D Tselikas. Chatgpt and open-ai models: A preliminary review. Future Internet, 15(6):192, 2023. +Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. Stanford alpaca: An instruction-following llama model, 2023. +Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. +Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. + +Wenxuan Wang, Tongtian Yue, Yisi Zhang, Longteng Guo, Xingjian He, Xinlong Wang, and Jing Liu. Unveiling parts beyond objects: Towards finer-granularity referring expression segmentation. arXiv preprint arXiv:2312.08007, 2023. +Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 11686-11695, 2022. +Meng Wei, Xiaoyu Yue, Wenwei Zhang, Shu Kong, Xihui Liu, and Jiangmiao Pang. Ov-parts: Towards open-vocabulary part segmentation. Advances in Neural Information Processing Systems, 36, 2024. +Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3858-3869, 2024. +Jilan Xu, Junlin Hou, Yuejie Zhang, Rui Feng, Yi Wang, Yu Qiao, and Weidi Xie. Learning open-vocabulary semantic segmentation models from natural language supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2935-2944, 2023. +Lu Yang, Qing Song, Zhihui Wang, and Ming Jiang. Parsing r-cnn for instance-level human analysis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 364-373, 2019. +Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18155-18165, 2022. +Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. arXiv preprint arXiv:2310.07704, 2023. +Renrui Zhang, Jiaming Han, Chris Liu, Peng Gao, Aojun Zhou, Xiangfei Hu, Shilin Yan, Pan Lu, Hongsheng Li, and Yu Qiao. Llama-adapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv:2303.16199, 2023a. +Shilong Zhang, Peize Sun, Shoufa Chen, Min Xiao, Wenqi Shao, Wenwei Zhang, Kai Chen, and Ping Luo. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023b. +Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022. +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36, 2024. +Shuai Zheng, Fan Yang, M Hadi Kiapour, and Robinson Piramuthu. Modanet: A large-scale street fashion dataset with polygon annotations. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1670-1678, 2018. +Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127:302-321, 2019. +Tianfei Zhou, Wenguan Wang, Si Liu, Yi Yang, and Luc Van Gool. Differentiable multi-granularity human representation learning for instance-aware human semantic parsing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 1622-1631, 2021. +Chaoyang Zhu, Yiyi Zhou, Yunhang Shen, Gen Luo, Xingjia Pan, Mingbao Lin, Chao Chen, Liujuan Cao, Xiaoshuai Sun, and Rongrong Ji. Seqtr: A simple yet universal network for visual grounding. In European Conference on Computer Vision, pp. 598-615. Springer, 2022. + +Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. +Xueyan Zou, Zi-Yi Dou, Jianwei Yang, Zhe Gan, Linjie Li, Chunyuan Li, Xiyang Dai, Harkirat Behl, Jianfeng Wang, Lu Yuan, et al. Generalized decoding for pixel, image, and language. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15116-15127, 2023. +Xueyan Zou, Jianwei Yang, Hao Zhang, Feng Li, Linjie Li, Jianfeng Wang, Lijuan Wang, Jianfeng Gao, and Yong Jae Lee. Segment everything everywhere all at once. Advances in Neural Information Processing Systems, 36, 2024. + +# A APPENDIX + +# A.1 LIMITATION + +While PACO-LVIS provides diverse and comprehensive object-part mask annotations for common objects, it lacks information on the human class and its parts. Consequently, our question-answer pairs generated based on PACO-LVIS do not consider reasoning about human class and its parts, which is a drawback. Therefore, there is a need for future dataset expansion to include a wider range of objects and parts that exist in real-world environments. Additionally, although we carefully design the prompts to ensure the diversity and quality of the dataset, the content of the question-answer pairs is inherently dependent on the pre-trained knowledge of ChatGPT. + +# A.2 ETHICS CONCERN + +The MMR dataset is constructed based on the publicly available PACO-LVIS dataset (Ramanathan et al., 2023), which helps mitigate privacy concerns. As the objects and parts within the images are already annotated, we only add text question-answer pairs, ensuring that potential privacy issues remain minimal. These question-answer pairs are generated using the ChatGPT/GPT-4V API (Achiam et al., 2023). While there is a risk of bias from the training data of the ChatGPT/GPT-4V API, we have implemented a thorough data filtering process to remove any ethically problematic content. + +# A.3 LICENSE + +We utilize the released code from LISA (Lai et al., 2023) for the baseline model code construction. Since LISA follows Apache License 2.0, our code is also licensed under Apache License 2.0. Additionally, the PACO-LVIS dataset is licensed under a Creative Commons Attribution 4.0 (CC BY 4.0) license. Consequently, our MMR dataset is also licensed under Creative Commons Attribution 4.0 (CC BY 4.0). To download the PACO-LVIS dataset (Ramanathan et al., 2023), we utilize author-released code under the MIT license. We use ChatGPT/GPT-4V API (Achiam et al., 2023) developed by OpenAI to generate the question-answer pairs in the MMR dataset. Specific licensing information for the ChatGPT/GPT-4V API model is proprietary to OpenAI. + +# A.4 THE SPECIFIC DETAILS OF CHATGPT API + +The specific command to use the ChatGPT API (Achiam et al., 2023) for generating question-answer pairs in MMR is as follows: + +```python +response = open aiCompletion.create +( + model="gpt-4-vision-preview", + messages=prompt, + temperature=0.7, + max_tokens=850, +) +``` + +Figure 5: To generate question-answer pairs in MMR dataset, we use gpt-4-vision-preview model. For the hyper-parameters, we set the temperature to 0.7 and max_tokens to 850. + +# A.5 PROMPTS AND EXAMPLES + +General MMR Dataset The MMR dataset fundamentally includes multi-target (both objects and parts) answers to each question. In this section, we discuss the full prompt not covered in the main manuscript. Fig. 6 illustrates the prompt used to generate the train, validation, and test datasets. Both text and image prompts are input into GPT-4V (Achiam et al., 2023), resulting in the creation of question-answer pairs that encompass various information about objects and parts. As shown in Fig. 2, the output includes a global caption and question-answer pairs for the image. The + +"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations and part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 427 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use "object_number" such as "person_1" and "person_2" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as "object's part", like "person's body" and "bus's wheel". The category names and bounding box coordinates of objects and parts are as follow: + +![](images/24150db4c2490cc27e374eb463bb6d334ecf8dcc379e7e6b81cb2cbbca7a6563.jpg) + +```c +``` +bottle_1 [459.07, 0.0, 603.49, 315.78]; +bottle_1's label [460, 105, 500, 282]; +bottle_1's neck [470, 0, 593, 62]; +bottle_1's shoulder [461, 56, 603, 103]; +bottle_1's body [460, 94, 604, 291]; +bottle_1's base [463, 287, 596, 316]; +bottle_2 [296.85, 1.15, 416.19, 242.2]; +bottle_2's base [307, 220, 400, 241]; +bottle_2's label [300, 4, 413, 176]; +bottle_2's body [307, 172, 403, 231]; +knife_1 [204.79, 2.4, 238.63, 226.53]; +knife_1's blade [213, 121, 237, 226]; +knife_1's handle [205, 2, 239, 126]; +knife_2 [304.84, 320.65, 615.34, 427.0 +knife_2's blade [305, 321, 601, 422]; +knife_2's handle [529, 399, 616, 426]; + +You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: "Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects and parts given above. However, when the corresponding object and part name appear in the answers, "name [coordinates]" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects and object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two: + +1) the relationship between parts within the image or the relationship between a part and an object. +2) the function or the general information about the parts. + +The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part or object must be attached to the object name or part name in the format: object_1[x1, y1, x2, y2] and object_1's part [x1, y1, x2, y2]. Do not use other format such as "a part of object_1". Here are some additional requirements about generated question and answers: + +1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image. +2. Do not ask the question you are not confident to answer. Only include question that have definite answer. +3. Do not mention the coordinates of a part and an object directly in the question. +4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences. +5. The question should describe a complete activity, a function, or general information. +6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part and the object. Implied other potential parts is strictly prohibited. +7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs. +8. Avoid using incorrectly formatted object names or part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the object and part information given above. ### For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]]. +9. All generated answers must include the given object or part information, without changing the format. + +# Figure 6: The text and image prompt used in our data creation for MMR dataset with GPT-4V. + +segmentation mask information for the objects or parts mentioned in the answers is sourced from PACO-LVIS (Ramanathan et al., 2023) to create new annotations. + +Part-only MMR Test Dataset The MMR dataset includes a substantial amount of information on parts to enhance part-level recognition, which has been overlooked in existing reasoning segmentation datasets. Consequently, we create a part-level test dataset to evaluate part-level recognition separately. Using the text and image prompts shown in Fig. 7, we generate a part-only test dataset from 2000 images with extensive part-level information from PACO-LVIS annotations. As shown in Fig. 8, the output includes a global caption and question-answer pairs for the image. The segmentation mask information for the parts mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations. + +Object-only MMR Test Dataset To evaluate recognition separately for object-level, we create an MMR test dataset that includes only information on objects. We generate an object-only test dataset using the text and image prompts shown in Fig. 9, selecting 2000 images with minimal part-level information. As shown in Fig. 10, the output includes a global caption and question-answer pairs for + +"You are an AI visual assistant capable of analyzing a single image. You receive the specific object's part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 428 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use "object_number" such as "person_1" and "person_2" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as "object's part", like "person's body" and "bus's wheel". The category names and bounding box coordinates of parts are as follow: + +![](images/25f739dd3a4a947b306e9cb4eb83141e8bb97b0f0b8399766bf64388dd9abc7a.jpg) +Figure 7: The text and image prompt used in our data creation for the part-only MMR test dataset with GPT-4V. + +``` +``` +dog_1's eye [235, 67, 291, 100]; +dog_1's ear [324, 36, 426, 145]; +dog_1's nose [184, 98, 212, 127]; +dog_1's teeth [245, 146, 285, 171]; +dog_1's head [169, 20, 427, 202]; +dog_1's foot [337, 204, 510, 407]; +dog_1's leg [212, 95, 542, 356]; +dog_1's body [243, 20, 503, 328]; +bowl_1's rim [143, 298, 369, 378]; +bowl_1's inner_body [150, 302, 361, +bowl_1's bottom [194, 362, 308, 376 +bowl_1's body [153, 351, 354, 422]; + +You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: "Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of object's parts given above. However, when the corresponding object's part name appear in the answers, "name [coordinates]" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two: + +1) the relationship between different parts within the image. 2) the function or the general information about the parts. + +The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part must be attached to the part name in the format: object_1's part [x1, y1, x2, y2]. Do not use other format such as "a part of object_1. Here are some additional requirements about generated question and answers: + +1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image. +2. Do not ask the question you are not confident to answer. Only include question that have definite answer. +3. Do not mention the coordinates of a part and an object directly in the question. +4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences. +5. The question should describe a complete activity, a function, or general information. +6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part. Implied other potential parts is strictly prohibited. +7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs. +8. Avoid using incorrectly formatted part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the part information given above. ## For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]]. +9. All generated answers must include the given part information, without changing the format. +10. When creating questions, ask only questions about the object's parts given above without directly mentioning the part name in the question. Please keep in mind that other parts should not dominate the answer. +11. If the number of object's parts given for an image is large enough, create a question so that each round's answer includes different object's parts. +12. Do not create questions that are answered by parts other than the part information given above. +13. If that part doesn't directly answer the question, do not mention it in the answer. + +the image. The segmentation mask information for the objects mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations. + +Figure 8: An example from the part-only MMR test dataset generated through the prompt in Fig. 7. This example includes information of some object's parts. The left and right pictures show the original image and part-level segmentation masks, respectively. +![](images/5ec30dc5b184ebbebb46389d3aa5a525ef1b4b56476504c52bca311256093ad4.jpg) +Caption: A dog is sitting next to a bowl, possibly after a drink of water or a meal. +Question1: What features of the animal suggest that it may have just had a drink? +Answer1: The dog 1's nose [184, 98, 212, 127] seems moist, and this often indicates the dog has just been drinking. In addition, the presence of a bowl 1's rim [143, 298, 369, 378] next to the dog suggests that it could have contained water or food that the dog might have consumed. +Question2: Considering the animal's current position, which part seems to be supporting its weight? +Answer2: The dog_1's leg [212, 95, 542, 356] and the dog_1's foot [337, 204, 510, 407] are likely bearing the most weight, considering the dog's sitting position adjacent to the bowl_1's body [153, 351, 354, 422]. +Question3: If the animal were to listen for approaching sounds, which physical feature would be most engaged? +Answer3: The dog_1's ear [324, 36, 426, 145] would be most engaged in detecting sounds as ears are responsible for auditory perception in dogs. + +# A.6 DATA FORMAT + +The MMR dataset is given in JSON format. The JSON file for each instance is organized as shown in Fig. 11. + +Table 6: The effect of multiple [SEG] Tokens and Early Local Feature Fusion in $\mathbf{M}^2\mathbf{SA}-7\mathbf{B}$ on MMR benchmark. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings. + +
multiple [SEG] TokensEarly Local Feature Fusionvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
××19.431.634.741.88.013.119.527.1
×26.047.739.555.411.725.228.445.2
27.948.541.055.613.527.031.046.8
+ +# A.7 EFFECTIVENESS OF THE MULTIPLE [SEG] TAXENS AND EARLY LOCAL FEATURE FUSION + +We conduct an ablation study to verify the effectiveness of the multiple [SEG] tokens and Early Local Feature Fusion proposed in $\mathbf{M}^2\mathbf{SA}$ . Tab. 6 demonstrates that merely adding multiple [SEG] tokens results in significant performance improvements in MMR evaluation benchmarks. This finding suggests that using single [SEG] tokens in the LISA is inadequate to fully capture the segmentation capability. Moreover, performance improvements are evident when Early Local Feature Fusion is incorporated. Notably, there is a substantial performance enhancement in the part-only evaluation setting of the MMR test set. This improvement likely arises because Early Layer features contain local detail information (e.g., edges or boundaries), which aids in part and fine-level segmentation. + +"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 333 and 500, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use "object_number" such as "person_1" and "person_2" to differentiate between objects of the same category. The category names and bounding box coordinates of objects are as follow: + +![](images/7cb98bd825e0ff0d51981b9af1c68c0d0b8b01824220fdb5bb54d96a5b1df5e3.jpg) +Figure 9: The text and image prompt used in our data creation for the object-only MMR test dataset with GPT-4V. + +mirror_1 [304.9, 35.42, 476.09, 146.49]; +pillow_1 [169.86, 180.73, 221.21, 229.7]; +pillow_2 [370.81, 175.9, 436.81, 231.85]; + +You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: "Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects given above. However, when the corresponding object name appear in the answers, "name [coordinates]" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two: + +1) the relationship between objects within the image. +2) the function or the general information about the objects. + +The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object. In other words, it's important to make the question challenging by not directly including visual content details. Some of the answers of the rounds should include multiple different types of objects. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target object must be attached to the object name in the format: object_1[x1, y1, x2, y2]. Here are some additional requirements about generated question and answers: + +1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image. +2. Do not ask the question you are not confident to answer. Only include question that have definite answer. +3. Do not mention the coordinates of an object directly in the question. +4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences. +5. The question should describe a complete activity, a function, or general information. +6. The answer to the generated question should include at least two objects and explicitly describe the names of the object. Implied other potential objects is strictly prohibited. +7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs. +8. Avoid using incorrectly formatted object names, such as located at [coordinates] or an object_1 [object_1 [coordinates]]. In other words, use it as it appears in the object information given above. +9. All generated answers must include the given object information, without changing the format. +10. When creating questions, ask only questions about the objects given above without directly mentioning the object name in the question. Please keep in mind that other objects should not dominate the answer. +11. If the number of objects given for an image is large enough, create a question so that each round's answer includes different objects." + +![](images/9fe2a7877d3f23e47deec1f7c697a3c4f4090e80d25a2f9730e1cac4cb517f93.jpg) +Figure 10: An example from the object-only MMR test dataset generated through the prompt in Fig. 9. This example includes information of objects. The left and right pictures show the original image and object-level segmentation masks, respectively. + +Caption: A cozy living room interior with a large mirror on the wall and decorative pillows on furniture. + +Question1: Where might someone rest their head while sitting on the furniture to gain extra comfort? + +Anr: a t r comfort while sitting on the furniture. + +Question2: In what part of the room could someone check their appearance before leaving the house? + +Answer2: Someone could check their appearance in the mirror_1 [304.9, 35.42, 476.09, 146.49], which is located on the wall. + +Question3: If a person were to rearrange the decorative cushions, which items would they be handling? + +Answer3: If a person were to rearrange the decorative cushions, they would be handling pillow_1 [169.86, 180.73, 221.21, 229.7] and pillow_2 [370.81, 175.9, 436.81, 231.85]. + +![](images/d77ec1a7075ac8a0007df97381c66350ab54740ba78c5a9a039a6d9e2c909c36.jpg) +Figure 11: MMR dataset format + +Table 7: Comparison of computational complexity on LISA, GSVA, and GLaMM, and ${\mathrm{M}}^{2}\mathrm{{SA}}$ . + +
MethodsGPU Memory Usage (GB)TFLOPs
LISA-7B (Lai et al., 2023)30.5832.59
GSVA-7B (Xia et al., 2024)30.39203.77
GLaMM (Rasheed et al., 2024)17.14349.28
M2SA-7B30.6032.62
LISA-Llama2-13B (Lai et al., 2023)55.2056.64
M2SA-Llama2-13B55.2356.67
+ +# A.8 TRAINING TIME + +The training takes approximately 40 hours for the $\mathbf{M}^2\mathrm{SA - 7B}$ and about 52 hours for the $\mathbf{M}^2\mathrm{SA - }$ Llama2-13B, respectively. + +# A.9 COMPUTATIONAL COMPLEXITY + +We aim to compare the computational complexity of the proposed $\mathbf{M}^2\mathrm{SA}$ with LISA, GSVA, and GLaMM. For this comparison, we measure GPU memory usage and TFLOPs. As shown in Tab. 7, while the addition of Early Local Feature Fusion and multiple [SEG] tokens leads to a slight increase in GPU memory usage and TFLOPs, $\mathbf{M}^2\mathrm{SA}$ demonstrates a significant improvement in handling multiple targets and fine-grained part-level segmentation compared to LISA. However, despite these performance improvements, there is still room for enhancement from the perspective of computational efficiency. Since $\mathbf{M}^2\mathrm{SA}$ is built upon both MLLM and SAM, it requires substantial memory resources. Future research could focus on optimizing the efficiency of the mask decoder, which predicts the final mask by integrating vision and language information. + +Table 8: Multi-object referring segmentation results on GTAV and Cityscapes validation sets. We adopt mIoU metric for comparison. We evaluate the zero-shot performance of LISA, GSVA, GLaMM, and $\mathbf{M}^2\mathbf{SA}$ . The best results are highlighted in bold. + +
MethodsGTAV-valCityscapes-val
LISA-7B (Lai et al., 2023)3.76.1
GSVA-7B (Xia et al., 2024)15.714.6
GLaMM (Rasheed et al., 2024)12.612.6
M2SA-7B35.141.3
LISA-Llama2-13B (Lai et al., 2023)2.43.4
M2SA-Llama2-13B38.244.0
+ +# A.10 GENERALIZATION ON UNSEEN DATA + +To assess $\mathbf{M}^2\mathrm{SA}$ 's generalization to unseen data, we conduct additional experiments. Although OVPARTS (Wei et al., 2024) was recently proposed for open-vocabulary part-level segmentation using Pascal-Part (Chen et al., 2014) and ADE20K (Zhou et al., 2019), both datasets were used during training. Therefore, we evaluate the model's generalization performance using semantic segmentation datasets from driving scenes, specifically Cityscapes (Cordts et al., 2016) and GTAV (Richter et al., 2016), which were not used during training and pose a more challenging test environment. Since these datasets lack part-level mask annotations, we focus on evaluating multi-target object cases. Furthermore, we curate custom text prompts using predefined category names as they do not provide corresponding text queries. For each query, we randomly select 4 to 6 object categories from an image and create prompts such as "Can you segment the class 1, class 2, ..., and class n?" The model generates masks for the specified objects, and we compute the mIoU score to compare its performance with LISA. As shown in Tab. 8, $\mathbf{M}^2\mathrm{SA}$ performs robustly even on datasets from entirely different domains. Notably, while the existing methods struggle with multi-target cases, $\mathbf{M}^2\mathrm{SA}$ handles them effectively. This demonstrates that the use of multiple [SEG] tokens, combined + +Table 9: Comparison between LISA-7B (Lai et al., 2023) trained on MMR dataset and LISA-7B trained on ReasonSeg (Lai et al., 2023). We measure the performance on ReasonSeg validation set + +
MethodsgIoUcIoU
LISA-7B w/ ReasonSeg44.446.0
LISA-7B w/ MMR49.955.6
+ +with early local feature fusion, enables $\mathbf{M}^2\mathbf{SA}$ to generalize well to unseen domains by improving its ability to manage multi-target cases and fine-grained segmentation tasks. + +# A.11 MMR AND REASONSEG + +To validate the comprehensiveness and effectiveness of the MMR dataset, we conduct a comparative evaluation with ReasonSeg using the LISA-7B model. Specifically, we train the model in two configurations: one using ReasonSeg and the other using MMR instead of ReasonSeg. As shown in Tab. 9, the model trained on MMR shows superior performance on the ReasonSeg validation set than the model trained on ReasonSeg. This improvement highlights the comprehensiveness of the MMR dataset. By incorporating multi-target and part-level annotations alongside object-level data, MMR provides a more robust knowledge for addressing complex reasoning segmentation tasks. + +Table 10: Performance of M2SA on frequently appearing and infrequently appearing object categories. From the total of 75 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively. + +
MethodsMMR test
Obj-only (total)Obj-only (upper)Obj-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B41.055.641.054.839.439.7
+ +Table 11: Performance of M2SA on frequently appearing and infrequently appearing part categories. From the total of 445 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively. + +
MethodsMMR test
Part-only (total)Part-only (upper)Part-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B13.527.012.824.813.328.1
+ +# A.12 ANALYSIS OF THE LONG-TAIL PHENOMENON IN M2SA + +To investigate whether $\mathbf{M}^2\mathrm{SA}$ trained on the MMR dataset exhibits a long-tail phenomenon, we evaluate its performance on frequently and infrequently occurring object and part categories. To this end, we construct subsets of the MMR test set by isolating question-answer pairs based on category frequency. Specifically, we extract the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories for both object-only and part-only test sets. This results in four subsets: object-only (upper: 10/75), object-only (lower: 10/75), part-only (upper: 10/445), and part-only (lower: 10/445). The MMR dataset includes a total of 75 object categories and 445 part categories, respectively. The performance comparison is shown in Tab. 10 and Tab. 11. + +For the object-only dataset, $\mathbf{M}^2\mathbf{SA}$ 's performance on frequently occurring (upper) object categories closely aligns with its overall performance across all object categories (gIoU: 41.0, cIoU: 54.8 vs. gIoU: 41.0, cIoU: 55.6). However, for infrequent object categories (lower), the performance declines, with cIoU dropping from 55.6 to 39.7 and gIoU from 41.0 to 39.4. In contrast, for the + +part-only dataset, $\mathbf{M}^2\mathrm{SA}$ demonstrates consistent performance across both frequent and infrequent categories. The gIoU scores are 12.8 (upper), 13.3 (lower), and 13.5 (overall), while the cIoU scores are 24.8 (upper), 28.1 (lower), and 27.0 (overall). These findings suggest that $\mathbf{M}^2\mathrm{SA}$ is less sensitive to the long-tail distribution in part categories than in object categories. + +This analysis highlights the strengths and limitations of $\mathbf{M}^2\mathrm{SA}$ when addressing long-tail distributions. While $\mathbf{M}^2\mathrm{SA}$ demonstrates robust performance across frequent and infrequent part categories, its reduced performance on infrequent object categories indicates potential areas for improvement. Future work could explore strategies to mitigate the impact of long-tail distributions in object categories while preserving its strengths in part-level reasoning tasks. + +# A.13 QUALITATIVE RESULTS + +Qualitative results of $\mathbf{M}^2\mathbf{SA}$ on the MMR benchmark are visualized in Fig. 12, Fig. 13, and Fig. 14. + +![](images/780bb26b58d04d0ce06307e4a065a037cc9ae0f76ca4642cc1999d365355bba6.jpg) +Figure 12: Qualitative result of $\mathbf{M}^2\mathrm{SA}$ on MMR test set. + +![](images/7b400df9ac2094828eb2b7cbbbccba0f1161001ec9e031962516e85228822e7f.jpg) + +Question: If someone wants to send an e-mail, which equipments on the desk would they be utilizing? + +![](images/f87b1455be084fe49fafc094a3ab25add745daccf0b46690442fe1cde225d8f2.jpg) + +Answer: They would be utilizing the laptop_computer for typing and viewing the screen, and the mouse for navigating and interacting with the computer. + +![](images/51f912496d4858a857465483c94ca2327c9526cb16cccd5e558cb4d25ef277b2.jpg) +Image + +![](images/73ce8ea7b46495d2a5ef432c1dd123d83b993d3b8e847c67c8b4cf55e1719b32.jpg) +Figure 13: Qualitative result of $\mathbf{M}^2\mathbf{SA}$ on MMR test set. + +Question: Where could someone sit while waiting for transportation, and which part provides support for their back? + +![](images/d836da9c1793237c660f8e73f4567737cbd798e73a218a7c0fe2eef6f9998711.jpg) + +Answer: The bench's seat provides a place to sit, and the bench's back offers support for the back + +![](images/9bd09c601ae16a1d155426840c2c9f0a133d5256b0e47e2df2f60bdb65dae879.jpg) +Image + +![](images/58908b471a9e25e9ab603d4d78941437acd61ebd7534794de643b0305ce333e3.jpg) +Figure 14: Qualitative result of $\mathbf{M}^2\mathrm{SA}$ on MMR test set. + +Question: If I need to check how much time is left before my meal is ready, which part of this appliance should I look at? + +![](images/1b1e851165818a2027e98d3a46c169232b0f133a88469a1ed9ccbad48b6bfcef.jpg) + +Answer: You should look at the microwave_oven's time_display to check the remaining time. + +# A.14 ADDITIONAL EXAMPLES OF MMR + +To facilitate a quick and intuitive understanding of the MMR dataset's characteristics, we present additional examples in Figure 15. + +![](images/e2251577118d3f5db074751bf55d95dcfbb091ce7441c2b75eb649453c5695a9.jpg) +Global Caption: A laptop is opened and set on a table next to a computer mouse, suggesting a typical workspace setup. +Question1: If one were to begin typing a document, which two areas of this device would they interact with first? +Answer: They would primarily interact with the laptop_computer_1's keyboard [195, 276, 418, 325] to type and laptop_computer_1's touchpad [113, 290, 231, 312] to navigate within the document. +Question2: Where can one find the manufacturer's branding on the devices pictured here? +Answer2: The manufacturer's branding can be found on the laptop_computer_1's logo [354, 281, 370, 288] and on the mouse_(computer_equipment)_1's logo [314, 403, 345, 416]. + +Figure 15: Additional Examples of MMR dataset. +![](images/158ac0b1214dc77ea85da060158af18dc79beb3e86b9df0e817e0f798e34d4dd.jpg) +Question3: To move the cursor on the screen without touching the laptop, which part of the computer equipment would one use? +Answer3: One would use the mouse (.computer_equipment) 1's body [260, 379, 516, 477] along with either the mouse (.computer_equipment) 1's left button [413, 380, 480, 401] or mouse (.computer_equipment) 1's right button [451, 393, 519, 429] to click and interact with the cursor on the screen. +Question4: After finishing work and deciding to pack up, which two parts of the laptop would come into contact? +Answer4: When closing the laptop, laptop_computer_1's screen [295, 34, 510, 305] would come into contact with laptop_computer_1's base-panel [77, 271, 479, 352]. +Global Caption: A plate with a slice of quiche and a side of home fries is ready to be eaten, with a knife resting on the side. +Question1: During a meal, what would you typically use to cut a portion of food and how is it structured for ease of use? +Answer 1: You would typically use a knife_1 [10.27, 86.49, 258.23, 115.61] to cut a portion of food. It is structured with a knife_1's blade [10, 92, 150, 115] for slicing through food and a knife_1's handle [150, 87, 254, 109] to provide a comfortable grip for handling. +Question2: If I wanted to contain a main dish and sides separately on a table, what items could effectively serve this purpose? + +![](images/12341a737e9db676387e93334d996b5f288beff9471119476b53f4bc40c8980c.jpg) +Answer2: To contain a main dish and sides separately, you could use plate_1[33.38, 74.25, 640.0, 480.0], which has a plate_1's inner_wall [33, 75, 639, 479] to hold the food and prevent it from spilling, and a separate plate_2[0.0, 23.86, 145.25, 200.7] with plate_2's inner_wall [0, 28, 141, 190] and plate_2's rim [0, 24, 145, 201] to hold another portion, like sides or appetizers. +Question3: What part of the tableware should one be cautious of to avoid spills while serving food? +Answer: 3: One should be cautious of the plate_1's inner_wall [33, 75, 639, 479] of a plate_1 [33, 38, 74, 25, 640, 480,0] and the plate_2's inner_wall [0, 28, 141, 190] of a plate_2 [0.0, 23.86, 145, 25, 200.7] to avoid spillps, as these parts help to contain the food within the boundaries of the plates. +Global Caption: A dog wearing a hat is resting on a pillow. +Question1: Where would this animal most likely register scents and how would it express alertness or curiosity? +Answer: This animal would most likely register scents using its dog_1's nose [175, 206, 221, 246], and express alertness or curiosity by adjusting the position of its dog_1's ear [329, 101, 398, 212] and dog_1's head [175, 92, 397, 280]. +Question2: Can you describe the area that supports the dog while it's lying down? +Answer 2: The area that supports the dog while it's lying down is [218, 202, 514, 374], particularly emphasized where the dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] make contact with the pillow_1 [5.32, 268.85, 632.27, 427.0]. +Question3: When this canine looks around its environment, what are the primary features involved in its vision? +Answer3: The primary features involved in this canine's vision when looking around its environment are the dog 1's eye [201, 145, 294, 177] and the movement of the dog 1's head [175, 92, 397, 280]. +Question4: In the case of this dog getting up from its resting position, which parts would engage initially to lift its body? +Answer: To get up from its resting position, the dog would initially engage its dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] to lift its dog_1's body [218, 202, 514, 374]. \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13881/images/00c67b8bab549d0d741c5a3bfe54aac53625989b1e503526d215c1f6bddaf286.jpg b/data/2025/2503_13xxx/2503.13881/images/00c67b8bab549d0d741c5a3bfe54aac53625989b1e503526d215c1f6bddaf286.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1209850891924ffe0ba2fcb230a8fab0542fd59d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/00c67b8bab549d0d741c5a3bfe54aac53625989b1e503526d215c1f6bddaf286.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b385d1486b2e6989b60bb1f72f5fa77eb93e2e3134c0d23ecb331965a439db8 +size 51743 diff --git a/data/2025/2503_13xxx/2503.13881/images/07decf95af415a3da631f490c7ade3a9745b2bb0391c6ea66e340caaa63f3043.jpg b/data/2025/2503_13xxx/2503.13881/images/07decf95af415a3da631f490c7ade3a9745b2bb0391c6ea66e340caaa63f3043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd17c2c2f8e02ed7c9a2038b19631d183a13848c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/07decf95af415a3da631f490c7ade3a9745b2bb0391c6ea66e340caaa63f3043.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4987873ecba8b39b683fff6a5b49e44e58ec1015e1400346717ee17d41a5fa99 +size 12988 diff --git a/data/2025/2503_13xxx/2503.13881/images/0889fdd67ae440737f6ceb95d6e3c377a2dd148baa0b5268b372c0db34ac76ac.jpg b/data/2025/2503_13xxx/2503.13881/images/0889fdd67ae440737f6ceb95d6e3c377a2dd148baa0b5268b372c0db34ac76ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11664018e245318a9865d85b24b0fe9296ff635b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/0889fdd67ae440737f6ceb95d6e3c377a2dd148baa0b5268b372c0db34ac76ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc13e0a075ab7a8161298b00042287840ac1d01b73f9eac2a12c513adaad160d +size 51242 diff --git a/data/2025/2503_13xxx/2503.13881/images/12341a737e9db676387e93334d996b5f288beff9471119476b53f4bc40c8980c.jpg b/data/2025/2503_13xxx/2503.13881/images/12341a737e9db676387e93334d996b5f288beff9471119476b53f4bc40c8980c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57f76d802872045b01a5d82116e573905b3d3eb7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/12341a737e9db676387e93334d996b5f288beff9471119476b53f4bc40c8980c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0242ca12e87bda502e248ee758d22f8e7a4993714d7b42ec498455dabbf6e0f9 +size 25511 diff --git a/data/2025/2503_13xxx/2503.13881/images/158ac0b1214dc77ea85da060158af18dc79beb3e86b9df0e817e0f798e34d4dd.jpg b/data/2025/2503_13xxx/2503.13881/images/158ac0b1214dc77ea85da060158af18dc79beb3e86b9df0e817e0f798e34d4dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..412e8ee5c4df16ec766d7ff62ae3b03c7b914980 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/158ac0b1214dc77ea85da060158af18dc79beb3e86b9df0e817e0f798e34d4dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d435ec9bb2d991b705ee344deb377f7113ea9f7980ef4be656905beed3259a94 +size 36016 diff --git a/data/2025/2503_13xxx/2503.13881/images/1b1e851165818a2027e98d3a46c169232b0f133a88469a1ed9ccbad48b6bfcef.jpg b/data/2025/2503_13xxx/2503.13881/images/1b1e851165818a2027e98d3a46c169232b0f133a88469a1ed9ccbad48b6bfcef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ba4a45571c7454ba7616dd8bdf5f0d010f1780d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/1b1e851165818a2027e98d3a46c169232b0f133a88469a1ed9ccbad48b6bfcef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b368e88206fab1a6b9857bd08edc0163358bef04e89cd72d2b56e20b01ec7f3 +size 2525 diff --git a/data/2025/2503_13xxx/2503.13881/images/1bc0cfe22f88aa17d1c7853a7780b317f2a15b12f7e2ba9be4caab3220821938.jpg b/data/2025/2503_13xxx/2503.13881/images/1bc0cfe22f88aa17d1c7853a7780b317f2a15b12f7e2ba9be4caab3220821938.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d3d02c27b17a59d04cda2ca0dda1d596cd23c73 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/1bc0cfe22f88aa17d1c7853a7780b317f2a15b12f7e2ba9be4caab3220821938.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2a4ce8b827d22b00a6305cdd835c2bc7c1c1b6bd58a6917de69c06e130b7b25 +size 35890 diff --git a/data/2025/2503_13xxx/2503.13881/images/1c6932d80c07652a78d886872a4dd1afc5ca8809e1a16475f4ada3ac643a9465.jpg b/data/2025/2503_13xxx/2503.13881/images/1c6932d80c07652a78d886872a4dd1afc5ca8809e1a16475f4ada3ac643a9465.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08841c7279e13a090d1614e19d0d2f297209c16f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/1c6932d80c07652a78d886872a4dd1afc5ca8809e1a16475f4ada3ac643a9465.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:538c14ebdb157b55a3120a031a966bcd14912e8ef1fb4fa32a4309281e4743c2 +size 23534 diff --git a/data/2025/2503_13xxx/2503.13881/images/24150db4c2490cc27e374eb463bb6d334ecf8dcc379e7e6b81cb2cbbca7a6563.jpg b/data/2025/2503_13xxx/2503.13881/images/24150db4c2490cc27e374eb463bb6d334ecf8dcc379e7e6b81cb2cbbca7a6563.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54190dbe74a0f6498c6616a08a6b7478a88e11b5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/24150db4c2490cc27e374eb463bb6d334ecf8dcc379e7e6b81cb2cbbca7a6563.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b84b911e8ba8352b74980a2883fef42cb3d016d0dd25064afd5527723008719 +size 26244 diff --git a/data/2025/2503_13xxx/2503.13881/images/25f739dd3a4a947b306e9cb4eb83141e8bb97b0f0b8399766bf64388dd9abc7a.jpg b/data/2025/2503_13xxx/2503.13881/images/25f739dd3a4a947b306e9cb4eb83141e8bb97b0f0b8399766bf64388dd9abc7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6dd950569920afc4be11c32474536c056ea700f8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/25f739dd3a4a947b306e9cb4eb83141e8bb97b0f0b8399766bf64388dd9abc7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:279ae265bb6010bf21fd334dbb0da8006c21ad98e65a33a331c072a4e94068ab +size 24522 diff --git a/data/2025/2503_13xxx/2503.13881/images/27ebdc79105d81dce6346da94f8ea4a8348668259ea47a09d9ab995347bce18b.jpg b/data/2025/2503_13xxx/2503.13881/images/27ebdc79105d81dce6346da94f8ea4a8348668259ea47a09d9ab995347bce18b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..855c4112d2a06bbcc216ae41efe9e3033b37a3dc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/27ebdc79105d81dce6346da94f8ea4a8348668259ea47a09d9ab995347bce18b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c836adc8eea7bc6002f1be9ab5e10f85f2806140d3021be644d960d9e5d420bf +size 8028 diff --git a/data/2025/2503_13xxx/2503.13881/images/2891d190457a4dd79a09496ba11c532cfcfbb64d41c1b972f5a26fc652a11bf0.jpg b/data/2025/2503_13xxx/2503.13881/images/2891d190457a4dd79a09496ba11c532cfcfbb64d41c1b972f5a26fc652a11bf0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53ea1b77eeea6fd2c4ec60428ad91a570287eb13 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/2891d190457a4dd79a09496ba11c532cfcfbb64d41c1b972f5a26fc652a11bf0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:909335e27b513bea696260b4058c22bb2c6a42cad21d391fadbee1e0ca00c02b +size 5092 diff --git a/data/2025/2503_13xxx/2503.13881/images/28ee61eabb716ada21fdfdfa49acf467f7748bdb90d86f1e4ef1c603b2d049a3.jpg b/data/2025/2503_13xxx/2503.13881/images/28ee61eabb716ada21fdfdfa49acf467f7748bdb90d86f1e4ef1c603b2d049a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4029b97747dc11db725c2c457c910e794654f0da --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/28ee61eabb716ada21fdfdfa49acf467f7748bdb90d86f1e4ef1c603b2d049a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9eecff96ab216b675457bf1c5892e2772f17a5c55dfcf2bc77e7d9cc9910ce3 +size 19736 diff --git a/data/2025/2503_13xxx/2503.13881/images/29ddc08ffe7a3ed20803202d06a14a5728b0071d5ae94c19106e4e5dc3234b80.jpg b/data/2025/2503_13xxx/2503.13881/images/29ddc08ffe7a3ed20803202d06a14a5728b0071d5ae94c19106e4e5dc3234b80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..826bd68b08264ad812db83ab266fd2bc4e91e9ee --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/29ddc08ffe7a3ed20803202d06a14a5728b0071d5ae94c19106e4e5dc3234b80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:932107b559915de2a6d1d62e8b7487d40c9fc876dd280c755867fc1bee8a9a85 +size 31270 diff --git a/data/2025/2503_13xxx/2503.13881/images/2e464580e9e1dd1464f9ef076f2348b3b1cdbe0b94c2b7f536ca87733c66910c.jpg b/data/2025/2503_13xxx/2503.13881/images/2e464580e9e1dd1464f9ef076f2348b3b1cdbe0b94c2b7f536ca87733c66910c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..797c114fcea8a47d80b355bbb9667e4090c356ca --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/2e464580e9e1dd1464f9ef076f2348b3b1cdbe0b94c2b7f536ca87733c66910c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5fbd4713cb0096a50cf9de39a8de3722a165dc0cc205e3adf9019808b51e41f +size 37226 diff --git a/data/2025/2503_13xxx/2503.13881/images/3216553d07de328f5daf032f72cf7a31f3d843795076dd6ec93c97a4adca5f81.jpg b/data/2025/2503_13xxx/2503.13881/images/3216553d07de328f5daf032f72cf7a31f3d843795076dd6ec93c97a4adca5f81.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6f375f013666771724b67100e2c288e624dbe94 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/3216553d07de328f5daf032f72cf7a31f3d843795076dd6ec93c97a4adca5f81.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28731aa5fb25d6e818fa45732fb0bad7fd7cee4dc8df1fe658e0048ce736c04b +size 24893 diff --git a/data/2025/2503_13xxx/2503.13881/images/3a43e4506a5918098ff444f068e5cb49842f6b8b3de45cc8ae4635bc58eb33b9.jpg b/data/2025/2503_13xxx/2503.13881/images/3a43e4506a5918098ff444f068e5cb49842f6b8b3de45cc8ae4635bc58eb33b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a5e2850d934d1a13dc498b97a145f6d2ad55636 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/3a43e4506a5918098ff444f068e5cb49842f6b8b3de45cc8ae4635bc58eb33b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddc3950b4d00a4772989cacbe25fdeb60cefae412d494afdef1ac110b9bd2432 +size 18636 diff --git a/data/2025/2503_13xxx/2503.13881/images/3d73614a9c442d59351a50ed39e49728c94da6cb8d0b2dfbb31559ef802203d8.jpg b/data/2025/2503_13xxx/2503.13881/images/3d73614a9c442d59351a50ed39e49728c94da6cb8d0b2dfbb31559ef802203d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a4f1cac2a348601a49be6da5443fba446a2b9e6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/3d73614a9c442d59351a50ed39e49728c94da6cb8d0b2dfbb31559ef802203d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c14bd10768ebe66ec5178aeeba93a53545d0e369d27fbcb961bf82f6cb67ec8c +size 40340 diff --git a/data/2025/2503_13xxx/2503.13881/images/49d95cca8a99e7183736adb72e5992742a6708b40215ddab704ed4836b67c487.jpg b/data/2025/2503_13xxx/2503.13881/images/49d95cca8a99e7183736adb72e5992742a6708b40215ddab704ed4836b67c487.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f94a08b407c2b9299e2fc0493eac31450bb3990 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/49d95cca8a99e7183736adb72e5992742a6708b40215ddab704ed4836b67c487.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21b4519c5939bb64d789cd78fc870f4ee9aec0eaa9d8f3dd26283ef86f2f2639 +size 11010 diff --git a/data/2025/2503_13xxx/2503.13881/images/51f912496d4858a857465483c94ca2327c9526cb16cccd5e558cb4d25ef277b2.jpg b/data/2025/2503_13xxx/2503.13881/images/51f912496d4858a857465483c94ca2327c9526cb16cccd5e558cb4d25ef277b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e91fa033b4756c34ce8660ec6a5656ecee83e9e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/51f912496d4858a857465483c94ca2327c9526cb16cccd5e558cb4d25ef277b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a095bbcdfbe1d5917a8ecfac4fbd71a17da8a265b77b27712f0651c5513a5ac +size 65484 diff --git a/data/2025/2503_13xxx/2503.13881/images/58908b471a9e25e9ab603d4d78941437acd61ebd7534794de643b0305ce333e3.jpg b/data/2025/2503_13xxx/2503.13881/images/58908b471a9e25e9ab603d4d78941437acd61ebd7534794de643b0305ce333e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3cbbe927af9287f4d763f64b34732252cfa3d473 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/58908b471a9e25e9ab603d4d78941437acd61ebd7534794de643b0305ce333e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da934c27198232bda47d5f10f0dfd00a1015f6d27e44110fda78e2ed97f4abc4 +size 1625 diff --git a/data/2025/2503_13xxx/2503.13881/images/5ec30dc5b184ebbebb46389d3aa5a525ef1b4b56476504c52bca311256093ad4.jpg b/data/2025/2503_13xxx/2503.13881/images/5ec30dc5b184ebbebb46389d3aa5a525ef1b4b56476504c52bca311256093ad4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a660f76c081f377ed20f833131beffd5119c15cc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/5ec30dc5b184ebbebb46389d3aa5a525ef1b4b56476504c52bca311256093ad4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3b75fa3426888422271300603b322271892b7e78a64e41a5a12604aad4945c6 +size 73124 diff --git a/data/2025/2503_13xxx/2503.13881/images/6af68df24ea5fe2bcd2e8a1c6b52f8c4a4dbe4f613d06f3921cae1f077f251c6.jpg b/data/2025/2503_13xxx/2503.13881/images/6af68df24ea5fe2bcd2e8a1c6b52f8c4a4dbe4f613d06f3921cae1f077f251c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74b003a8abe0e0e8a75991304f2faa72d1d24f9f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/6af68df24ea5fe2bcd2e8a1c6b52f8c4a4dbe4f613d06f3921cae1f077f251c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d4431dc186fd855d857040033ec26cdc7aab445e6c1e963dafe4628c0f7a879 +size 8841 diff --git a/data/2025/2503_13xxx/2503.13881/images/727c972b90b1ecc04fd743aed83f08e577ab1f60f02b3b714179cc2aabd64bfc.jpg b/data/2025/2503_13xxx/2503.13881/images/727c972b90b1ecc04fd743aed83f08e577ab1f60f02b3b714179cc2aabd64bfc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3c95fe94e497e77cc42efc809ef93bb7ad4503b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/727c972b90b1ecc04fd743aed83f08e577ab1f60f02b3b714179cc2aabd64bfc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcc2b880f4a7158f45774239d91e0a3ee17350efece5ea9b6d568ae1a129385f +size 8174 diff --git a/data/2025/2503_13xxx/2503.13881/images/73ce8ea7b46495d2a5ef432c1dd123d83b993d3b8e847c67c8b4cf55e1719b32.jpg b/data/2025/2503_13xxx/2503.13881/images/73ce8ea7b46495d2a5ef432c1dd123d83b993d3b8e847c67c8b4cf55e1719b32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..043947781db798fe2a722ccb8270be2b26877f5a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/73ce8ea7b46495d2a5ef432c1dd123d83b993d3b8e847c67c8b4cf55e1719b32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe7e3264ae84438f51029617d81516a730e5ef8bfab0ee073b74ec524f54fb67 +size 1611 diff --git a/data/2025/2503_13xxx/2503.13881/images/75402cb6ca335d4d37dec42055f59df83bb9e5047c9952f863c51e27ad68564e.jpg b/data/2025/2503_13xxx/2503.13881/images/75402cb6ca335d4d37dec42055f59df83bb9e5047c9952f863c51e27ad68564e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f19cb429bd01077962b4295f3f1cbbb8c1dd610 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/75402cb6ca335d4d37dec42055f59df83bb9e5047c9952f863c51e27ad68564e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a181aaac0c915bd1422db0af4fabeb4c9fb33e2907ad27f915bb4b7c476709fb +size 8021 diff --git a/data/2025/2503_13xxx/2503.13881/images/780bb26b58d04d0ce06307e4a065a037cc9ae0f76ca4642cc1999d365355bba6.jpg b/data/2025/2503_13xxx/2503.13881/images/780bb26b58d04d0ce06307e4a065a037cc9ae0f76ca4642cc1999d365355bba6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d355467d07a904aa8343efd97fece1701eecad9f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/780bb26b58d04d0ce06307e4a065a037cc9ae0f76ca4642cc1999d365355bba6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd225e17cb09c2665460d015e06e449f6b77c722ba1dd55c28c1c2c72739229a +size 41959 diff --git a/data/2025/2503_13xxx/2503.13881/images/78c36daf8e08ee87adcae76de957465e2efddd4d69ab427ee4a382fa5cb691f7.jpg b/data/2025/2503_13xxx/2503.13881/images/78c36daf8e08ee87adcae76de957465e2efddd4d69ab427ee4a382fa5cb691f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f230ce59fd509f401ee163442a1fb99bfb904db8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/78c36daf8e08ee87adcae76de957465e2efddd4d69ab427ee4a382fa5cb691f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:558da6d7557c5863ecba5915cd54db2ce6763b8808169241b953224cbd7bfb87 +size 6014 diff --git a/data/2025/2503_13xxx/2503.13881/images/7b400df9ac2094828eb2b7cbbbccba0f1161001ec9e031962516e85228822e7f.jpg b/data/2025/2503_13xxx/2503.13881/images/7b400df9ac2094828eb2b7cbbbccba0f1161001ec9e031962516e85228822e7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1759d82c998df06f6f169a0a5bbf3359c266117 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/7b400df9ac2094828eb2b7cbbbccba0f1161001ec9e031962516e85228822e7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dff0366538aa31c5f6ca0a90e7dd9f485125509800b926e22f6ef4794806188e +size 1729 diff --git a/data/2025/2503_13xxx/2503.13881/images/7cb98bd825e0ff0d51981b9af1c68c0d0b8b01824220fdb5bb54d96a5b1df5e3.jpg b/data/2025/2503_13xxx/2503.13881/images/7cb98bd825e0ff0d51981b9af1c68c0d0b8b01824220fdb5bb54d96a5b1df5e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08dd28bcc7f8ee998b3ebad6d1a52ba8ce2a257c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/7cb98bd825e0ff0d51981b9af1c68c0d0b8b01824220fdb5bb54d96a5b1df5e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72ecbdda640bc65f3dd32ea8d8d0f2024980ee2763ff42cb325b627fb29680b7 +size 15241 diff --git a/data/2025/2503_13xxx/2503.13881/images/955d59c28fc9ac1c5a98ff89a36a38fbe9c2e717d2f8675830a553efaa127b29.jpg b/data/2025/2503_13xxx/2503.13881/images/955d59c28fc9ac1c5a98ff89a36a38fbe9c2e717d2f8675830a553efaa127b29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfbac50f66d447fba5853fd6b6ecf1e2da5a92a9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/955d59c28fc9ac1c5a98ff89a36a38fbe9c2e717d2f8675830a553efaa127b29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdac864b22c55a70f13f02110012abed43c637490407fd622e01c59e7634853e +size 58631 diff --git a/data/2025/2503_13xxx/2503.13881/images/9bd09c601ae16a1d155426840c2c9f0a133d5256b0e47e2df2f60bdb65dae879.jpg b/data/2025/2503_13xxx/2503.13881/images/9bd09c601ae16a1d155426840c2c9f0a133d5256b0e47e2df2f60bdb65dae879.jpg new file mode 100644 index 0000000000000000000000000000000000000000..532bb8fd5c04110eff7dd3d738ce4a0fcdc4cf46 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/9bd09c601ae16a1d155426840c2c9f0a133d5256b0e47e2df2f60bdb65dae879.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b4d498ab279db9ff33d3eba3b4de37987294331dc82358c4b5e866f4a230c1c +size 47466 diff --git a/data/2025/2503_13xxx/2503.13881/images/9fe2a7877d3f23e47deec1f7c697a3c4f4090e80d25a2f9730e1cac4cb517f93.jpg b/data/2025/2503_13xxx/2503.13881/images/9fe2a7877d3f23e47deec1f7c697a3c4f4090e80d25a2f9730e1cac4cb517f93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a074994abde28935ba9aff8061c16e7f12ab1b8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/9fe2a7877d3f23e47deec1f7c697a3c4f4090e80d25a2f9730e1cac4cb517f93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec66057141c665ab2b1aa95981c3f61432422578ec0e7c9a2c2cb6981dbedd9 +size 56055 diff --git a/data/2025/2503_13xxx/2503.13881/images/cae0895c9435db0c29a00c4b9a3c4973988233cf1548b60d72d8d6cede55a31a.jpg b/data/2025/2503_13xxx/2503.13881/images/cae0895c9435db0c29a00c4b9a3c4973988233cf1548b60d72d8d6cede55a31a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e95c31b5ea1fc55590e6d2a71793cd2a8cca5ef --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/cae0895c9435db0c29a00c4b9a3c4973988233cf1548b60d72d8d6cede55a31a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:400da1621ae66053b547a5a12e2a1e9b9aa660685977173c8788a12f56d49e5c +size 2315 diff --git a/data/2025/2503_13xxx/2503.13881/images/cb1cb6ee8f4d1046e00e0441d898b982ead7bd659cd7608fd7812d75c403cf50.jpg b/data/2025/2503_13xxx/2503.13881/images/cb1cb6ee8f4d1046e00e0441d898b982ead7bd659cd7608fd7812d75c403cf50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1cffb78d3ebb7d89d07fed7cff5c797d8eeb3d82 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/cb1cb6ee8f4d1046e00e0441d898b982ead7bd659cd7608fd7812d75c403cf50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2165202d009c37a98078f6352e7ee9b76e445dc8462759efe41e90e6cdde6be0 +size 50038 diff --git a/data/2025/2503_13xxx/2503.13881/images/d77ec1a7075ac8a0007df97381c66350ab54740ba78c5a9a039a6d9e2c909c36.jpg b/data/2025/2503_13xxx/2503.13881/images/d77ec1a7075ac8a0007df97381c66350ab54740ba78c5a9a039a6d9e2c909c36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0395598dbd57115abafc36343dcd7a13f3ea6317 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/d77ec1a7075ac8a0007df97381c66350ab54740ba78c5a9a039a6d9e2c909c36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:651a10987f631e5bf27677ab49a6f3cbbf2a8bfea48c960ef468cea6506b6320 +size 89897 diff --git a/data/2025/2503_13xxx/2503.13881/images/d836da9c1793237c660f8e73f4567737cbd798e73a218a7c0fe2eef6f9998711.jpg b/data/2025/2503_13xxx/2503.13881/images/d836da9c1793237c660f8e73f4567737cbd798e73a218a7c0fe2eef6f9998711.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1716e9ec1373afd03cb04cb8affdd7e31a9224bc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/d836da9c1793237c660f8e73f4567737cbd798e73a218a7c0fe2eef6f9998711.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee7cb1a1ad0fd505166a3cb43ccd608a872dc51912fa5566bdde618138dc5ba2 +size 2490 diff --git a/data/2025/2503_13xxx/2503.13881/images/dba0b0f92114ba306981b6085c8e195b4cca087c05a93e961e6e03f14f63fb7c.jpg b/data/2025/2503_13xxx/2503.13881/images/dba0b0f92114ba306981b6085c8e195b4cca087c05a93e961e6e03f14f63fb7c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35401cde7fc303abc6e16b56be8a81a0c661fd27 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/dba0b0f92114ba306981b6085c8e195b4cca087c05a93e961e6e03f14f63fb7c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7c87828c8854af86373a080b41b4a529cbbd49173e3a9642f72d4e155695df7 +size 24276 diff --git a/data/2025/2503_13xxx/2503.13881/images/e2251577118d3f5db074751bf55d95dcfbb091ce7441c2b75eb649453c5695a9.jpg b/data/2025/2503_13xxx/2503.13881/images/e2251577118d3f5db074751bf55d95dcfbb091ce7441c2b75eb649453c5695a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40ed7009e7a7a623436e1783373a92d08b17ae27 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/e2251577118d3f5db074751bf55d95dcfbb091ce7441c2b75eb649453c5695a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:200beb845d6642ebc9aee87a442dd7add4210527ad315b7bbeb73b4bf714e0da +size 22453 diff --git a/data/2025/2503_13xxx/2503.13881/images/e98e400479af6f6c5ba0198fac9f6a5a8eabd87b74f6f8c3931fa032e5554076.jpg b/data/2025/2503_13xxx/2503.13881/images/e98e400479af6f6c5ba0198fac9f6a5a8eabd87b74f6f8c3931fa032e5554076.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88eb2646b3300c07d2419e4c459c6fbff418cb77 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/e98e400479af6f6c5ba0198fac9f6a5a8eabd87b74f6f8c3931fa032e5554076.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54a419d7c12958bde5996628cc939554bc94bfeb0e17366031564026d0d9f28d +size 65042 diff --git a/data/2025/2503_13xxx/2503.13881/images/f87b1455be084fe49fafc094a3ab25add745daccf0b46690442fe1cde225d8f2.jpg b/data/2025/2503_13xxx/2503.13881/images/f87b1455be084fe49fafc094a3ab25add745daccf0b46690442fe1cde225d8f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..502a5ecaaa259a9ef2c174de51b9e4f887ac1305 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/images/f87b1455be084fe49fafc094a3ab25add745daccf0b46690442fe1cde225d8f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37b96fe8e96454b9dac758fb6c2eca70288d8b4e4771103ff8803368136ef36d +size 2512 diff --git a/data/2025/2503_13xxx/2503.13881/layout.json b/data/2025/2503_13xxx/2503.13881/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d9beac438d2b567d14dcc6343b122e52c1bcd175 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13881/layout.json @@ -0,0 +1,16106 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 138 + ], + "type": "text", + "content": "MMR: A LARGE-SCALE BENCHMARK DATASET FOR MULTI-TARGET AND MULTI-GRANULARITY REASONING SEGMENTATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 154, + 422, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 154, + 422, + 177 + ], + "spans": [ + { + "bbox": [ + 110, + 154, + 422, + 177 + ], + "type": "text", + "content": "Donggon Jang* Yucheol Cho* Suin Lee Taehyeon Kim Dae-Shik Kim† \nDepartment of Electrical Engineering, KAIST" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 177, + 444, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 177, + 444, + 190 + ], + "spans": [ + { + "bbox": [ + 110, + 177, + 444, + 190 + ], + "type": "text", + "content": "{jdg900,yc_cho,suinlee,rlaxogus0814,daeshik}@kaist.ac.kr" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 217, + 335, + 229 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 217, + 335, + 229 + ], + "spans": [ + { + "bbox": [ + 276, + 217, + 335, + 229 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 242, + 471, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 242, + 471, + 518 + ], + "spans": [ + { + "bbox": [ + 140, + 242, + 471, + 518 + ], + "type": "text", + "content": "The fusion of Large Language Models (LLMs) with vision models is pioneering new possibilities in user-interactive vision-language tasks. A notable application is reasoning segmentation, where models generate pixel-level segmentation masks by comprehending implicit meanings in human instructions. However, seamless human-AI interaction demands more than just object-level recognition; it requires understanding both objects and the functions of their detailed parts, particularly in multi-target scenarios. For example, when instructing a robot to \"turn on the TV\", there could be various ways to accomplish this command. Recognizing multiple objects capable of turning on the TV, such as the TV itself or a remote control (multi-target), provides more flexible options and aids in finding the optimized scenario. Furthermore, understanding specific parts of these objects, like the TV's button or the remote's button (part-level), is important for completing the action. Unfortunately, current reasoning segmentation datasets predominantly focus on a single target object-level reasoning, which limits the detailed recognition of an object's parts in multi-target contexts. To address this gap, we construct a large-scale dataset called Multi-target and Multi-granularity Reasoning (MMR). MMR comprises 194K complex and implicit instructions that consider multi-target, object-level, and part-level aspects, based on pre-existing image-mask sets. This dataset supports diverse and context-aware interactions by hierarchically providing object and part information. Moreover, we propose a straightforward yet effective framework for multi-target, object-level, and part-level reasoning segmentation. Experimental results on MMR show that the proposed method can reason effectively in multi-target and multi-granularity scenarios, while the existing reasoning segmentation model still has room for improvement. The dataset is available at https://github.com/jdg900/MMR." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 539, + 206, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 539, + 206, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 206, + 551 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "content": "Human-machine interaction is a key focus in AI for real-world applications, driving interest in multimodal perception models that integrate vision and language modalities. The model perceives the context within the image related to explicit text query inputs and predicts pixel-level masks or bounding boxes accordingly. For example, Open Vocabulary Segmentation (OVS) (Liang et al., 2023; Cho et al., 2023; Xu et al., 2023), leveraging models like CLIP (Radford et al., 2021), generates segmentation masks from open-set text categories. Similarly, Referring Expression Segmentation (RES) (Wang et al., 2023; Hu et al., 2023; Liu et al., 2023a; Yang et al., 2022) predicts the segmentation mask corresponding to the objects referenced by the text input within the image. However, these models encounter challenges with implicit and complex text queries, limiting their effectiveness in real-world scenarios." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 679, + 504, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 679, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 504, + 703 + ], + "type": "text", + "content": "The emergence of Large Language Models (LLMs) (Zheng et al., 2024; Roumeliotis & Tselikas, 2023; Achiam et al., 2023; Zhang et al., 2023a) offers promising solutions to this challenge. Recent" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 710, + 206, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 206, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 206, + 731 + ], + "type": "text", + "content": "*Equal Contribution †Corresponding Author" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.13881v1 [cs.CV] 18 Mar 2025" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": "studies (Bai et al., 2023; Li et al., 2023; Liu et al., 2024; Zhu et al., 2023; Zhang et al., 2023b; Chen et al., 2023; You et al., 2023) have witnessed that multimodal LLMs with superior reasoning capabilities can effectively perform vision tasks when given implicit text inputs. However, current multimodal LLMs primarily provide information corresponding to images or regions in text form, lacking pixel-level mask generation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 299 + ], + "type": "text", + "content": "To address these limitations, LISA (Lai et al., 2023) introduces reasoning segmentation. Unlike previous tasks that rely on explicit text (e.g., \"steak\"), reasoning segmentation handles implicit queries that require intricate reasoning or world knowledge (e.g., \"the food with most protein\"), by combining LLMs with the Segment Anything Model (SAM) (Kirillov et al., 2023) that has robust mask generation capabilities. LISA also introduces ReasonSeg, a benchmark dataset for reasoning segmentation. ReasonSeg consists of 1,218 image-instruction pairs, each containing implicit text question-answer pairs that involve complex reasoning for each image. Nevertheless, ReasonSeg has two limitations: 1) It does not adequately address scenarios involving multiple targets, and 2) it primarily focuses on object-level reasoning, treating part-level targets ambiguously. Although the recently proposed MUSE dataset by PixelLM (Ren et al., 2023) addresses multi-target object-level reasoning, it does not consider part-level reasoning. These observations underscore that current datasets for reasoning segmentation overlook the complexities of multiple targets and part-level scenarios, concentrating instead solely on object-level reasoning. This limitation restricts more advanced functionalities in reasoning segmentation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 302, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 504, + 425 + ], + "type": "text", + "content": "In this paper, we introduce a Multi-target and Multi-granularity Reasoning segmentation (MMR) dataset to overcome these limitations, which covers both multiple targets and fine-grained part-level reasoning. We collect image and mask annotations from the publicly available PACO-LVIS dataset (Ramanathan et al., 2023). These annotations include class names and bounding box information of objects and parts. Then, inspired by LLaVA (Liu et al., 2024), we generate intricate question-answer pairs using the GPT-4V API (Achiam et al., 2023). Through this, the MMR dataset contains a vast collection of 194K complex and implicit instructions for comprehensive reasoning segmentation. A distinguishing characteristic of the proposed MMR dataset is its ability to handle multiple objects and diverse parts in the question-answer pairs. This diverse granularity enables models to reason and comprehend complex questions about both multiple target objects and their parts within a single query, providing more meaningful and high-quality masks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 429, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 506, + 529 + ], + "type": "text", + "content": "Moreover, we propose a simple yet effective model, Multi-target and Multi-granularity Segmentation Assistant (M²SA), for multi-target, object-level, and part-level reasoning segmentation. The M²SA model incorporates an early local feature fusion and multiple [SEG] tokens, which enables the model to enhance fine-grained visual understanding and consider multi-target segmentation. Experimental results on benchmarks, such as MMR, single-target referring expression segmentation datasets, and a multi-granularity referring expression segmentation dataset, demonstrate that M²SA outperforms existing state-of-the-art methods. We believe that our dataset and model serve as a valuable resource for potential applications in real-world reasoning segmentation tasks, offering enhanced versatility and robustness." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 533, + 290, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 290, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 290, + 545 + ], + "type": "text", + "content": "Our contributions are summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 553, + 503, + 646 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 132, + 553, + 503, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 553, + 503, + 586 + ], + "spans": [ + { + "bbox": [ + 132, + 553, + 503, + 586 + ], + "type": "text", + "content": "- We construct the MMR dataset, which includes 194K complex and implicit question pairs for multi-target, object-level, and part-level reasoning segmentation. This dataset facilitates advanced reasoning segmentation tasks in open-world scenarios." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 590, + 503, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 503, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 503, + 621 + ], + "type": "text", + "content": "- We propose " + }, + { + "bbox": [ + 132, + 590, + 503, + 621 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 132, + 590, + 503, + 621 + ], + "type": "text", + "content": " for multi-target, object-level, and part-level reasoning segmentation. It incorporates an early local feature fusion and multiple [SEG] tokens to improve fine-grained visual understanding and segment multiple targets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 625, + 503, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 625, + 503, + 646 + ], + "spans": [ + { + "bbox": [ + 132, + 625, + 503, + 646 + ], + "type": "text", + "content": "- Experimental results on MMR and other benchmarks show that " + }, + { + "bbox": [ + 132, + 625, + 503, + 646 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 132, + 625, + 503, + 646 + ], + "type": "text", + "content": " outperforms state-of-the-art methods, validating the effectiveness of its components." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 663, + 211, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 663, + 211, + 675 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 211, + 675 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "Multimodal Large Language Models Recent advancements (Peng et al., 2023; Taori et al., 2023; Touvron et al., 2023; Zhang et al., 2022) in multimodal Large Language Models (LLMs) have greatly improved the integration between language models and vision tasks by comprehensively understanding and recognizing multiple modalities. Recently proposed models such as BLIP-2 (Li et al., 2023)," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": "Flamingo (Alayrac et al., 2022), MiniGPT-4 (Zhu et al., 2023), llama-adapter (Gao et al., 2023; Zhang et al., 2023a), LLaVA (Liu et al., 2024), InstructBLIP (Dai et al., 2024), InternGPT (Liu et al., 2023b), and QwenVL (Bai et al., 2023) have shown superiority at multimodal tasks such as visual question-answering and captioning, leveraging the multimodal understanding capability of LLMs. While these methods have demonstrated improved performance in vision-language tasks through instructional tuning, they only provide the text output about the visual target and focus on a holistic understanding of global information in the image. Therefore, their applicability is limited in tasks requiring finer-grained understanding at the pixel level." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 182, + 506, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 338 + ], + "type": "text", + "content": "Reasoning Segmentation The task of reasoning segmentation, introduced by LISA (Lai et al., 2023), is understanding implicit text instruction and providing a corresponding mask for the answer. This task is more challenging and important than the referring expression segmentation task which deals with explicit and simple text queries. For instance, when a user wants to segment a pepper in an image, handling an implicit query like 'the food with a spicy taste' instead of a direct reference such as 'the pepper' is significant for improving human-AI interaction. To tackle this, LISA introduces ReasonSeg, a benchmark containing implicit text queries that require complex reasoning for each image. Recently, PixelLM (Ren et al., 2023), has addressed the limitation of ReasonSeg which considers only a single target in a query text. PixelLM constructs MUSE, a new dataset with multiple target objects in the text instructions. However, both studies are still limited to object-level reasoning segmentation. Methods such as GSVA (Xia et al., 2024) and GLaMM (Rasheed et al., 2024) have also been proposed, but they focus on frameworks for object-level reasoning segmentation rather than introducing new datasets. In this paper, we extend these existing tasks and propose a new benchmark dataset that considers both part-level and object-level reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 348, + 506, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 506, + 504 + ], + "type": "text", + "content": "Part-level Segmentation Recent research (Li et al., 2022; Kirillov et al., 2019; Michieli et al., 2020; Zhou et al., 2021; Pan et al., 2023) has delved into a fine-grained understanding of objects at the part-level. For the part-level visual understanding, datasets with detailed annotations for each part are required. To this end, some initial studies (Gong et al., 2017; Li et al., 2017; Yang et al., 2019; Wah et al., 2011; Jia et al., 2020; Zheng et al., 2018) have introduced datasets with part-level masks on specific domains, such as human body parts (Gong et al., 2017; Li et al., 2017; Yang et al., 2019), bird parts (Wah et al., 2011), and fashion cloth parts (Jia et al., 2020; Zheng et al., 2018). Moreover, recognizing the need for annotations on general objects, some approaches (Chen et al., 2014; Mo et al., 2019; He et al., 2022; Zhou et al., 2019; Meletis et al., 2020; Ramanathan et al., 2023; Wei et al., 2024) have extended the existing object-level datasets by including more fine-grained annotations. Furthermore, there has been an attempt (Wang et al., 2023) to extend the previous Referring Expression Segmentation (RES) task to provide part-level segmentation masks matching explicit text queries. In line with this effort, our work introduces a new dataset that includes multiple target parts and diverse implicit text queries for multi-granularity reasoning segmentation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 518, + 209, + 532 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 518, + 209, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 518, + 209, + 532 + ], + "type": "text", + "content": "3 MMR DATASET" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "text", + "content": "Current publicly available datasets for reasoning segmentation primarily emphasize object-level reasoning. Consequently, Multimodal Large Language Models (MLLMs) often struggle with questions that involve multiple targets or require reasoning at both the object- and part-levels. To address these limitations, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset. MMR includes multi-target, object-level, and part-level reasoning scenarios. This dataset comprises images and masks from the publicly available PACO dataset (Ramanathan et al., 2023), supplemented with implicit and complex question-answer pairs generated by the GPT-API (Achiam et al., 2023). Unlike existing datasets, MMR includes large-scale question-answer pairs that consider multiple target cases and require reasoning at both the object- and part-levels, enhancing its versatility and applicability. In the following sections, we detail the dataset generation process (Sec. 3.1), describe the data filtering process (Sec. 3.2), provide a statistical analysis of MMR (Sec. 3.3), and highlight its distinctiveness compared to existing datasets (Sec. 3.4)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 689, + 216, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 216, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 216, + 700 + ], + "type": "text", + "content": "3.1 DATA GENERATION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": "To generate a multi-target, object-level, and part-level reasoning segmentation dataset, we leverage the PACO-LVIS dataset (Ramanathan et al., 2023). PACO-LVIS includes 456 object-specific part" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 153, + 82, + 459, + 240 + ], + "blocks": [ + { + "bbox": [ + 153, + 82, + 459, + 240 + ], + "lines": [ + { + "bbox": [ + 153, + 82, + 459, + 240 + ], + "spans": [ + { + "bbox": [ + 153, + 82, + 459, + 240 + ], + "type": "image", + "image_path": "e98e400479af6f6c5ba0198fac9f6a5a8eabd87b74f6f8c3931fa032e5554076.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 258, + 444, + 270 + ], + "lines": [ + { + "bbox": [ + 164, + 258, + 444, + 270 + ], + "spans": [ + { + "bbox": [ + 164, + 258, + 444, + 270 + ], + "type": "text", + "content": "Figure 1: The prompt used in our data creation process with GPT-4V." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 288, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 288, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 288, + 504, + 365 + ], + "type": "text", + "content": "classes across 75 object categories, offering 502K part-level masks and bounding boxes annotated across 273K object-level masks and bounding boxes. By utilizing these comprehensive images and multi-granularity mask annotations, we can reduce annotation costs while ensuring detailed and accurate segmentation data. To create intricate and implicit question-answer pairs for multiple target and multi-granularity reasoning, we employ a GPT-assisted data generation scheme similar to LLaVA (Liu et al., 2024). Specifically, we adopt GPT-4V API which has robust visual understanding capabilities. Fig. 1 illustrates the entire data generation process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 371, + 504, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 504, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 504, + 469 + ], + "type": "text", + "content": "To guide the GPT-4V API effectively, we carefully craft prompts that include GPT role, object and part information, task prompts, and requirements. GPT role defines the persona of the GPT-4V API, informing it about the context and objectives of the data generation process. Object & part information provides comprehensive annotations, such as object and part names within the image and their corresponding bounding box coordinates. Task prompt informs the GPT-4V API about the task definition and considerations for generating question-answer pairs. Requirements set the rules and patterns that the GPT-4V API should follow when generating question-answer pairs (e.g., \"questions should avoid direct mention of coordinates of objects or parts\" or \"Q&A pairs should contain multiple objects or parts\"). Please see the Appendix A.5 for the detailed prompt." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 475, + 504, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 542 + ], + "type": "text", + "content": "The GPT-4V-assisted data generation follows a two-step process: 1) Global Caption Generation: GPT-4V API first generates a global caption based on the image to foster a deep understanding of its context. 2) Question-Answer Pair Generation: Leveraging this global caption along with object and part information, GPT-4V autonomously crafts multi-target, multi-granularity question-answer pairs. Carefully designed prompts and a two-step generation process enable GPT-4V to deeply comprehend image context and generate contextually relevant question-answer pairs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 555, + 204, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 204, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 204, + 565 + ], + "type": "text", + "content": "3.2 DATA FILTERING" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 575, + 504, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 504, + 641 + ], + "type": "text", + "content": "Despite meticulously crafted prompts for guiding GPT-4V, occasional deviations from established rules result in the generation of subpar question-answer pairs. These deviations include questions that reveal explicit target coordinates or provide overly direct hints, as well as answers that offer irrelevant information or omit essential details. To enhance the reliability of the question-answer pairs in our dataset, a rigorous filtering process is essential. Therefore, we engage four skilled human inspectors to review the dataset according to strict criteria:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 651, + 504, + 732 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 132, + 651, + 504, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 651, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 132, + 651, + 504, + 694 + ], + "type": "text", + "content": "- Logicality and Reasoning: Questions should avoid explicit target coordinates or strong hints. Non-compliant questions and their corresponding answers are removed. For example, a question like \"Which part of this animal [coordinates] uses its sense of smell?\" would be excluded." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "type": "text", + "content": "- Coherence and Relevance: Answers lacking essential target information or containing irrelevant details are corrected for precision and relevance. This includes cases where answers mention objects or parts not provided in the annotations." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 147, + 80, + 299, + 186 + ], + "blocks": [ + { + "bbox": [ + 147, + 80, + 299, + 186 + ], + "lines": [ + { + "bbox": [ + 147, + 80, + 299, + 186 + ], + "spans": [ + { + "bbox": [ + 147, + 80, + 299, + 186 + ], + "type": "image", + "image_path": "3a43e4506a5918098ff444f068e5cb49842f6b8b3de45cc8ae4635bc58eb33b9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 147, + 186, + 434, + 194 + ], + "lines": [ + { + "bbox": [ + 147, + 186, + 434, + 194 + ], + "spans": [ + { + "bbox": [ + 147, + 186, + 434, + 194 + ], + "type": "text", + "content": "Caption: A knife is inserted vertically into a sandwich on a cutting board, with another knife lying beside it and bottles in the background." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 104, + 298, + 504, + 321 + ], + "lines": [ + { + "bbox": [ + 104, + 298, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 504, + 321 + ], + "type": "text", + "content": "Figure 2: An example from the MMR dataset generated through our data creation process. The left and right pictures show the object- and part-level segmentation masks, respectively." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 300, + 80, + 462, + 187 + ], + "blocks": [ + { + "bbox": [ + 300, + 80, + 462, + 187 + ], + "lines": [ + { + "bbox": [ + 300, + 80, + 462, + 187 + ], + "spans": [ + { + "bbox": [ + 300, + 80, + 462, + 187 + ], + "type": "image", + "image_path": "dba0b0f92114ba306981b6085c8e195b4cca087c05a93e961e6e03f14f63fb7c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 147, + 198, + 459, + 217 + ], + "lines": [ + { + "bbox": [ + 147, + 198, + 459, + 217 + ], + "spans": [ + { + "bbox": [ + 147, + 198, + 459, + 217 + ], + "type": "text", + "content": "Question1: What item on the table is designed to be held at one end while the other end is meant for cutting through food? Answer1: The knife_1 [204.79, 2.4, 238.63, 226.53] is designed with a knife_1's handle [205, 2, 239, 126] to be held while the knife_1's blade [213, 121, 237, 226] is meant for cutting through food." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 147, + 220, + 461, + 239 + ], + "lines": [ + { + "bbox": [ + 147, + 220, + 461, + 239 + ], + "spans": [ + { + "bbox": [ + 147, + 220, + 461, + 239 + ], + "type": "text", + "content": "Question2: Which object on the table appears to be in the process of being used to keep a sandwich upright? Answer2: The knife_2 [304.84, 320.65, 615.34, 427.0] with its knife_2's blade [305, 321, 601, 422] inserted into the sandwich is being used to keep it upright." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 147, + 243, + 447, + 257 + ], + "lines": [ + { + "bbox": [ + 147, + 243, + 447, + 257 + ], + "spans": [ + { + "bbox": [ + 147, + 243, + 447, + 257 + ], + "type": "text", + "content": "Question3: If I wanted to read the product information on a container in view, which part should I look at? Answer3: To read the product information, you should look at the bottle_1's label [460, 105, 500, 282] or the bottle_2's label [300, 4, 413, 176]." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 147, + 260, + 454, + 279 + ], + "lines": [ + { + "bbox": [ + 147, + 260, + 454, + 279 + ], + "spans": [ + { + "bbox": [ + 147, + 260, + 454, + 279 + ], + "type": "text", + "content": "Question4: Which objects in the scene are meant to contain liquids, and what part of them touches the surface they rest on? Answer4: The objects meant to contain liquids are bottle_1[459.07, 0.0, 603.49, 315.78] and bottle_2[296.85, 1.15, 416.19, 242.2]. The part that touches the surface they rest on is bottle_1's base [463, 287, 596, 316] and bottle_2's base [307, 220, 400, 241]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 340, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 340, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 132, + 340, + 504, + 396 + ], + "type": "text", + "content": "- Clarity and Precision: Questions and answers should be clear, concise, and free of ambiguity. For example, ill-defined data, such as asking about the function of an object or part from a segmentation perspective, is removed (e.g., \"What is the function of object_1?\"). Answers should provide precise information that directly addresses the question without causing confusion." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 406, + 504, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 504, + 473 + ], + "type": "text", + "content": "Originally, 222K question-answer pairs are generated. Of these, " + }, + { + "bbox": [ + 104, + 406, + 504, + 473 + ], + "type": "inline_equation", + "content": "12.6\\%" + }, + { + "bbox": [ + 104, + 406, + 504, + 473 + ], + "type": "text", + "content": " are filtered out through a review process conducted by the four inspectors, resulting in the final MMR dataset. Since dataset generation is a key contribution to our work, each inspector thoroughly reviews the entire set of 222K question-answer pairs. To minimize human error, we only filter out question-answer pairs flagged by two or more inspectors. This meticulous filtering regimen ensures the integrity and trustworthiness of the MMR dataset. An example of the generated question-answer pairs is illustrated in Fig. 2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 486, + 208, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 486, + 208, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 208, + 497 + ], + "type": "text", + "content": "3.3 DATA STATISTICS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "content": "The MMR dataset includes 194,398 intricate and implicit question-answer pairs with 57,643 corresponding images and masks selected from PACO-LVIS. The entire dataset is split into distinct sets for training (154,127 pairs), validation (8,194 pairs), and test (32,077 pairs). Moreover, the test set is further categorized into three subsets: object-only, part-only, and mixed sets, providing a benchmark for evaluating multi-granularity reasoning segmentation capabilities." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 567, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 506, + 733 + ], + "type": "text", + "content": "Additionally, our dataset inherits a rich coverage of 75 object categories and 445 part categories from PACO-LVIS, enhancing its diversity and utility. We delve into the frequency distribution per object and part category across question-answer pairs. Fig. 3 (b) and (d) provide a comprehensive overview of the number of questions per object category and part category, respectively. The results show that our dataset encompasses a wide range of categories, ensuring that the question-answer pairs are not biased toward specific categories and exhibit a high level of diversity. Furthermore, the word clouds illustrated in Fig. 3 (a) and (c) highlight the prevalent head object and part categories, respectively. These word clouds demonstrate that our question-answer pairs are grounded in common and general objects and their associated parts. Fig. 3 (e) presents statistics on the number of targets in each question-answer pair. On average, there are 1.8 targets per answer, with the maximum number of targets in a single pair being 16. This demonstrates that our dataset can consider multiple targets in an image and cover diverse target reasoning. To evaluate the comprehensiveness of both objects and parts in the proposed dataset, we compare their occurrences within the total question-answer pairs. As depicted in Fig. 3 (f), there are 114,704 descriptions for objects and 226,869 for parts, maintaining a ratio of approximately 1:2. This ratio is reasonable because objects typically" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 176, + 86, + 244, + 152 + ], + "blocks": [ + { + "bbox": [ + 157, + 87, + 166, + 95 + ], + "lines": [ + { + "bbox": [ + 157, + 87, + 166, + 95 + ], + "spans": [ + { + "bbox": [ + 157, + 87, + 166, + 95 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 176, + 86, + 244, + 152 + ], + "lines": [ + { + "bbox": [ + 176, + 86, + 244, + 152 + ], + "spans": [ + { + "bbox": [ + 176, + 86, + 244, + 152 + ], + "type": "image", + "image_path": "6af68df24ea5fe2bcd2e8a1c6b52f8c4a4dbe4f613d06f3921cae1f077f251c6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 277, + 82, + 348, + 152 + ], + "blocks": [ + { + "bbox": [ + 258, + 87, + 267, + 95 + ], + "lines": [ + { + "bbox": [ + 258, + 87, + 267, + 95 + ], + "spans": [ + { + "bbox": [ + 258, + 87, + 267, + 95 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 277, + 82, + 348, + 152 + ], + "lines": [ + { + "bbox": [ + 277, + 82, + 348, + 152 + ], + "spans": [ + { + "bbox": [ + 277, + 82, + 348, + 152 + ], + "type": "image", + "image_path": "49d95cca8a99e7183736adb72e5992742a6708b40215ddab704ed4836b67c487.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 365, + 89, + 453, + 154 + ], + "blocks": [ + { + "bbox": [ + 358, + 88, + 367, + 95 + ], + "lines": [ + { + "bbox": [ + 358, + 88, + 367, + 95 + ], + "spans": [ + { + "bbox": [ + 358, + 88, + 367, + 95 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 365, + 89, + 453, + 154 + ], + "lines": [ + { + "bbox": [ + 365, + 89, + 453, + 154 + ], + "spans": [ + { + "bbox": [ + 365, + 89, + 453, + 154 + ], + "type": "image", + "image_path": "75402cb6ca335d4d37dec42055f59df83bb9e5047c9952f863c51e27ad68564e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 157, + 163, + 249, + 232 + ], + "blocks": [ + { + "bbox": [ + 157, + 163, + 249, + 232 + ], + "lines": [ + { + "bbox": [ + 157, + 163, + 249, + 232 + ], + "spans": [ + { + "bbox": [ + 157, + 163, + 249, + 232 + ], + "type": "image", + "image_path": "27ebdc79105d81dce6346da94f8ea4a8348668259ea47a09d9ab995347bce18b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 249, + 504, + 295 + ], + "lines": [ + { + "bbox": [ + 104, + 249, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 504, + 295 + ], + "type": "text", + "content": "Figure 3: Statistics of the proposed MMR dataset. (a) the word cloud for the object categories, (b) the number of objects per each object category in questions (log scale), (c) the word cloud for the part categories, (d) the number of parts per each part category in questions (log scale), (e) the distribution of target count in answers, and (f) the total number of expressions of objects and parts." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 258, + 163, + 351, + 232 + ], + "blocks": [ + { + "bbox": [ + 258, + 163, + 351, + 232 + ], + "lines": [ + { + "bbox": [ + 258, + 163, + 351, + 232 + ], + "spans": [ + { + "bbox": [ + 258, + 163, + 351, + 232 + ], + "type": "image", + "image_path": "727c972b90b1ecc04fd743aed83f08e577ab1f60f02b3b714179cc2aabd64bfc.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 359, + 163, + 453, + 232 + ], + "blocks": [ + { + "bbox": [ + 359, + 163, + 453, + 232 + ], + "lines": [ + { + "bbox": [ + 359, + 163, + 453, + 232 + ], + "spans": [ + { + "bbox": [ + 359, + 163, + 453, + 232 + ], + "type": "image", + "image_path": "78c36daf8e08ee87adcae76de957465e2efddd4d69ab427ee4a382fa5cb691f7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 155, + 352, + 455, + 392 + ], + "blocks": [ + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "type": "text", + "content": "Table 1: Comparison among several reasoning segmentation datasets, including ReasonSeg (Lai et al., 2023), MUSE (Ren et al., 2023), and the proposed MMR. Here, part-level is an expression that refers to various parts of an object that appear in the image." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 155, + 352, + 455, + 392 + ], + "lines": [ + { + "bbox": [ + 155, + 352, + 455, + 392 + ], + "spans": [ + { + "bbox": [ + 155, + 352, + 455, + 392 + ], + "type": "table", + "html": "
DatasetsObject-levelPart-levelMulti-target# of Q&A pairsGPT models
ReasonSeg×1.2KGPT-3.5
MUSE×214KGPT-4V
MMR194KGPT-4V
", + "image_path": "28ee61eabb716ada21fdfdfa49acf467f7748bdb90d86f1e4ef1c603b2d049a3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 418, + 504, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 441 + ], + "type": "text", + "content": "consist of multiple parts. Therefore, it reflects a balanced distribution, contributing to the dataset's comprehensiveness and facilitating multi-granularity knowledge understanding." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 457, + 430, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 430, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 430, + 468 + ], + "type": "text", + "content": "3.4 COMPARISON WITH EXISTING REASONING SEGMENTATION DATASETS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 479, + 504, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 504, + 501 + ], + "type": "text", + "content": "Tab. 1 presents a comparative overview of existing reasoning segmentation datasets and the proposed MMR dataset. As observed, MMR offers several notable advantages over existing datasets." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 506, + 504, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 551 + ], + "type": "text", + "content": "First, MMR contains 194K question-answer pairs, comparable to MUSE (Ren et al., 2023), and far exceeds ReasonSeg (Lai et al., 2023) which has only 1,218 question-answer pairs primarily designed for validation and testing purposes. This extensive scale facilitates both training and evaluation for reasoning segmentation." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 556, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 506, + 732 + ], + "type": "text", + "content": "Second, MMR supports question-answer pairs covering multi-target and multi-granularity (object-level and part-level) visual reasoning. Although MUSE includes multi-target instances, its coverage is limited to object-level reasoning. This lack of part-level detail reduces its effectiveness in fine-grained visual tasks. Part-level reasoning in MMR enables a more comprehensive understanding of visual contexts and hierarchical relationships between parts and objects. While ReasonSeg appears to include part-level reasoning, ReasonSeg often has ambiguous boundaries between objects and their parts because it doesn't specify which object a part belongs to. For instance, in a scene with a \"car\" and a \"tire\", ReasonSeg considers the \"tire\" as part of the \"car\", even if the tire is not attached. In contrast, MMR clearly distinguishes the boundaries between objects and their parts by specifying hierarchy like which object a part belongs to based on their spatial context. Additionally, unlike ReasonSeg, MMR distinguishes multiple objects of the same class within a single image at the instance level. For example, ReasonSeg might group all buses in a scene under a single \"Bus\" label. On the other hand, MMR treats them as distinct entities like \"Bus_1,\" \"Bus_2\", etc. Also, ReasonSeg treats all screens simply as \"screen,\" whereas MMR would specify \"laptop_1's screen,\" \"laptop_2's screen,\" and so forth. This allows MMR to handle objects or parts of the same class separately by considering their spatial context within the image." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": "Third, MMR leverages the advanced visual understanding capabilities of GPT-4V for question-answer generation. GPT-4V receives the image along with information such as class names and bounding boxes of objects and parts, enabling detailed and contextually accurate question-answer generation. In comparison, ReasonSeg generates questions using the language-specialized GPT-3.5 and pre-trained image tagging models, which do not fully capture the visual context, leading to less relevant question-answer pairs with the image." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 504, + 189 + ], + "type": "text", + "content": "In summary, MMR provides a substantial improvement over ReasonSeg and MUSE by including large-scale, multi-target, and multi-granularity question-answer pairs. It strengthens real-world applicability, making it a valuable asset for advancing research in reasoning-based segmentation tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 208, + 243, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 208, + 243, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 243, + 220 + ], + "type": "text", + "content": "4 BASELINE FRAMEWOK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 236, + 238, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 238, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 238, + 422 + ], + "type": "text", + "content": "We propose a novel baseline framework for multi-target and multi-granularity reasoning segmentation, " + }, + { + "bbox": [ + 104, + 236, + 238, + 422 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 236, + 238, + 422 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 236, + 238, + 422 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 236, + 238, + 422 + ], + "type": "text", + "content": " enhances the LISA framework with two key features: 1) Early Local Feature Fusion and 2) multiple [SEG] tokens. For Early Local Feature Fusion, we extract local features from the early layer of the SAM's vision encoder, which contains fine-grained details such as image edges and boundaries. These local features are fused with the global semantic context features from the last layer of SAM's vi" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 246, + 257, + 506, + 387 + ], + "blocks": [ + { + "bbox": [ + 246, + 257, + 506, + 387 + ], + "lines": [ + { + "bbox": [ + 246, + 257, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 246, + 257, + 506, + 387 + ], + "type": "image", + "image_path": "cb1cb6ee8f4d1046e00e0441d898b982ead7bd659cd7608fd7812d75c403cf50.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 400, + 465, + 412 + ], + "lines": [ + { + "bbox": [ + 282, + 400, + 465, + 412 + ], + "spans": [ + { + "bbox": [ + 282, + 400, + 465, + 412 + ], + "type": "text", + "content": "Figure 4: The overview of " + }, + { + "bbox": [ + 282, + 400, + 465, + 412 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 282, + 400, + 465, + 412 + ], + "type": "text", + "content": " framework." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 422, + 504, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 499 + ], + "type": "text", + "content": "sion encoder for more informative visual features in the mask decoder. Multiple [SEG] tokens overcome the LISA framework's limitation of a single [SEG] token, which struggles to segment multiple targets simultaneously.. To overcome this, we propose utilizing multiple [SEG] tokens. In our MMR dataset, we append a [SEG] token to each target object and part in the answer annotations (e.g., \"When closing the laptop, laptop computer's screen [SEG] would come into contact with laptop computer's base panel [SEG].\"). This approach enables the model to predict separate [SEG] tokens for each target, reducing ambiguity among multiple targets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": "Model Architecture Fig. 4 presents the overall architecture of the proposed " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " framework, which integrates two core components: Segment Anything Model (SAM)(Kirillov et al., 2023) and Multimodal Large Language Model (MLLM), specifically LLaVA(Liu et al., 2024). SAM module consists of SAM Vision Encoder " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(E)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " and SAM Mask Decoder " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(D)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", while the MLLM comprises CLIP Vision Encoder " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(I)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", vision-to-text projector " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(\\psi)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", and Large Language Model " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(F)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ". The image " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{img} \\in R^{h \\times w \\times 3}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " is fed into the SAM Vision Encoder " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(E)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", which generates global context features " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_g = E(x_{img}) \\in R^{h/16 \\times w/16 \\times c}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " and early local features " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_l = E_l(x_{img}) \\in R^{h/16 \\times w/16 \\times c'}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ". To align the channel dimensions of " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_l" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_g" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", the early local features " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_l" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " are passed through two convolution layers, resulting in refined features " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{v}_l \\in R^{h/16 \\times w/16 \\times c}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_g" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{v}_l" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " are then summed to obtain visual features " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_{seg} \\in R^{h/16 \\times w/16 \\times c}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " for segmentation. Simultaneously, the image " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{img}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " is input into the CLIP Vision Encoder " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(I)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", producing visual token embeddings " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "f_{img} = \\psi(I(x_{img})) \\in R^{N_{img} \\times d}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", which are mapped to the LLM input space using the vision-to-text projector " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ". In parallel, the text queries " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{txt}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " are tokenized by the " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": "'s tokenizer, producing text token embeddings " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "f_{txt} \\in R^{N_{txt} \\times d}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ". The visual token embeddings " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "f_{img}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " and text token embeddings " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "f_{txt}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " are concatenated and processed by LLM " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", resulting in output response " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{y}_{txt} = F(\\text{concat}(f_{img}, f_{txt}))" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{y}_{txt}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " contains the textual response to the text query and special [SEG] tokens that correspond to each target entity to be segmented. These multiple [SEG] token embeddings are extracted and projected into SAM's prompt space via the projector " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ", resulting in embeddings " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "f_{seg} = \\phi(\\hat{y}_{txt}[SEG]) \\in R^{N_{seg} \\times c}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": ". Finally, the SAM Mask Decoder " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "(D)" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " takes the visual features " + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_{seg}" + }, + { + "bbox": [ + 104, + 515, + 506, + 733 + ], + "type": "text", + "content": " and the multiple [SEG] token embeddings" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "f_{seg}" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " as input to generate the segmentation mask " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "M = D(\\text{concat}(v_{seg}, f_{seg}))" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": ", which identifies the target regions in the image corresponding to the text queries." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": "**Optimization** Our model is trained end-to-end through two sources of supervision. For the text generation, we compute auto-regressive cross-entropy loss " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "L_{txt}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " between the text output " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\hat{y}_{txt}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " and the ground-truth text answer " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "y_{txt}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": ". For the high-quality segmentation mask generation, the mask loss " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "L_{mask}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " is calculated between the output mask " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\hat{M}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " and the ground-truth mask " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": ". The mask loss " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "L_{mask}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " is a weighted sum of per-pixel binary cross-entropy loss " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "L_{bce}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " and a DICE loss " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "L_{dice}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": ", determined by " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\lambda_{bce}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\lambda_{dice}" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": ". The overall loss " + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 116, + 504, + 184 + ], + "type": "text", + "content": " is formulated as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 259, + 202, + 339, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 202, + 339, + 213 + ], + "spans": [ + { + "bbox": [ + 259, + 202, + 339, + 213 + ], + "type": "interline_equation", + "content": "L = L _ {t x t} + L _ {m a s k},", + "image_path": "cae0895c9435db0c29a00c4b9a3c4973988233cf1548b60d72d8d6cede55a31a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 239, + 210, + 504, + 227 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 210, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 239, + 210, + 504, + 227 + ], + "type": "interline_equation", + "content": "L _ {m a s k} = \\lambda_ {b c e} L _ {b c e} + \\lambda_ {d i c e} L _ {d i c e}, \\tag {1}", + "image_path": "2891d190457a4dd79a09496ba11c532cfcfbb64d41c1b972f5a26fc652a11bf0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 230, + 333, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 230, + 333, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 333, + 243 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 230, + 333, + 243 + ], + "type": "inline_equation", + "content": "\\lambda_{bce}" + }, + { + "bbox": [ + 104, + 230, + 333, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 230, + 333, + 243 + ], + "type": "inline_equation", + "content": "\\lambda_{dice}" + }, + { + "bbox": [ + 104, + 230, + 333, + 243 + ], + "type": "text", + "content": " are set to 0.5 and 2.0, respectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 257, + 195, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 257, + 195, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 195, + 269 + ], + "type": "text", + "content": "5 EXPERIMENT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 281, + 230, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 230, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 230, + 293 + ], + "type": "text", + "content": "5.1 EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": "Implementation Details We use pre-trained LLaVA-7B (Liu et al., 2024) and LLaVA-Llama2-13B with CLIP-ViT-L/14 (Radford et al., 2021) and Vicuna-7B (Chiang et al., 2023)/Llama2-13B (Touvron et al., 2023) to form Multimodal Large Language Model (MLLM). We adopt the pre-trained SAM-ViT-H (Kirillov et al., 2023) for the segmentation model. For CLIP-ViT-L/14, input image " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "x_{img}" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": " is resized to " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "224 \\times 224 \\times 3" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": " and processed with a patch size of 14, resulting in " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "N_{img} = 256" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": ". LLM dimensions " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": " are set to 4096 and 5120 for Vicuna-7B and Llama2-13B. For SAM-ViT-H, " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "c'" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": " are 256 and 1280, respectively. Efficient fine-tuning of the MLLM is facilitated using LoRA (Hu et al., 2021). The trainable components in " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": " include the SAM Mask Decoder " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": ", the projector " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": ", two convolution layers, the LoRA adapter in MLLM, and the token embeddings. We use features from the 8th layer in the SAM Vision Encoder " + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 301, + 506, + 489 + ], + "type": "text", + "content": " for early layer feature fusion. Our model is trained for 10 epochs, with each epoch consisting of 5,000 steps. We employ the AdamW (Loshchilov & Hutter, 2017) optimizer with a learning rate of 0.0003 and set gradient accumulation to 10 steps per update. Additionally, we use WarmupDecayLR as the learning rate scheduler. The learning rate is linearly decayed after 100 steps. The batch size and LoRA rank are set to 2 and 8, respectively. All experiments are conducted using 4 NVIDIA RTX A6000 GPUs. The results reported in the paper are the average values obtained from experiments conducted with 3 different random seeds." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 500, + 506, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 500, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 500, + 506, + 622 + ], + "type": "text", + "content": "Datasets For model training, we adopt the mixed training dataset composition scheme proposed by LISA (Lai et al., 2023), comprising four types: semantic segmentation datasets (ADE20K (Zhou et al., 2019), COCO-Stuff (Caesar et al., 2018), Mapillary (Neuhold et al., 2017), PACO-LVIS (Ramanathan et al., 2023), and PASCAL-Part (Chen et al., 2014)), referring expression segmentation datasets (RefCOCO (Kazemzadeh et al., 2014), RefCOCO+ (Kazemzadeh et al., 2014), RefCOCOg (Mao et al., 2016), and RefCLEF (Kazemzadeh et al., 2014)), a visual question answering dataset (LLaVA-Instruct-150K (Liu et al., 2024)), and the proposed MMR dataset for multi-target and multi-granularity reasoning segmentation. We sample the data from the mixed training dataset in a ratio of 2:9:2:6, where 2 represents semantic segmentation datasets, 9 represents referring expression segmentation datasets, 2 represents the visual question answering dataset, and 6 represents the proposed MMR dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "content": "Baseline Methods To validate the effectiveness of the " + }, + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "content": " for a multi-target and multi-granularity reasoning segmentation task, we adopt LISA (Lai et al., 2023), GSVA (Xia et al., 2024), and GLaMM (Rasheed et al., 2024) along with their variants. The pre-trained models refer to those trained solely on their respective datasets. In contrast, the variant models referred to as " + }, + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathrm{model}_{tr}" + }, + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "content": ", are trained from scratch on a mixed training set that includes the MMR dataset. Due to issues with the publicly available code from the PixelLM, we exclude PixelLM from the baseline methods to ensure reliable and consistent comparison results. For a Multi-granularity Referring Expression Segmentation (MRES) task, we additionally adopt the class RES models (Yang et al., 2022; Liu et al., 2023a; Wang et al., 2023; 2022) and the general models (Zhu et al., 2022; Zou et al., 2023; 2024)." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 171, + 126, + 441, + 223 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 124 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 124 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 124 + ], + "type": "text", + "content": "Table 2: Results on MMR benchmark. The gIoU and cIoU metrics are reported for the comparison. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings. The best results are highlighted in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 171, + 126, + 441, + 223 + ], + "lines": [ + { + "bbox": [ + 171, + 126, + 441, + 223 + ], + "spans": [ + { + "bbox": [ + 171, + 126, + 441, + 223 + ], + "type": "table", + "html": "
Methodsvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
LISA-7B (Lai et al., 2023)13.818.323.525.16.67.914.517.9
LISA-7Btr19.431.634.741.88.013.119.527.1
GSVA-7B (Xia et al., 2024)14.625.126.434.36.011.615.524.8
GSVA-7Btr19.838.930.241.18.018.621.234.5
GLaMM (Rasheed et al., 2024)12.619.223.731.93.96.413.318.7
GLaMMtr26.947.140.354.212.125.530.345.0
M2SA-7B27.848.641.055.613.527.030.946.8
LISA-Llama2-13B (Lai et al., 2023)15.420.026.127.97.48.416.119.8
LISA-Llama2-13Btr22.333.440.245.210.716.423.029.2
M2SA-Llama2-13B28.449.142.357.613.627.231.647.6
", + "image_path": "00c67b8bab549d0d741c5a3bfe54aac53625989b1e503526d215c1f6bddaf286.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 171, + 284, + 441, + 393 + ], + "blocks": [ + { + "bbox": [ + 104, + 247, + 504, + 282 + ], + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 282 + ], + "type": "text", + "content": "Table 3: Referring expression segmentation results on RefCOCO, RefCOCO+ (Kazemzadeh et al., 2014) and RefCOCOg (Mao et al., 2016) among " + }, + { + "bbox": [ + 104, + 247, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 247, + 504, + 282 + ], + "type": "text", + "content": " and existing methods. For a fair comparison with previous methods, the cIoU metrics are adopted. The best results are highlighted in bold." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 171, + 284, + 441, + 393 + ], + "lines": [ + { + "bbox": [ + 171, + 284, + 441, + 393 + ], + "spans": [ + { + "bbox": [ + 171, + 284, + 441, + 393 + ], + "type": "table", + "html": "
MethodsRefCOCORefCOCO+RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
MCN (Luo et al., 2020)62.464.259.750.655.544.749.249.4
VLT (Ding et al., 2021)67.570.565.256.361.050.155.057.0
CRIS (Wang et al., 2022)70.573.266.162.368.153.759.960.4
LAVT (Yang et al., 2022)72.775.868.862.168.455.161.262.1
ReLA (Liu et al., 2023a)73.876.570.266.071.057.765.066.0
X-Decoder (Zou et al., 2023)------64.6-
SEEM (Zou et al., 2024)------65.7-
LISA-7B (Lai et al., 2023)74.176.571.162.467.456.566.468.5
GSVA-7B (Xia et al., 2024)76.477.472.864.567.758.671.172.0
GLaMM (Rasheed et al., 2024)79.583.276.972.678.764.674.274.9
M2SA-7B74.076.869.763.167.256.167.068.3
LISA-Llama2-13B (Lai et al., 2023)73.677.370.563.268.257.067.068.4
M2SA-Llama2-13B74.677.671.064.068.157.669.069.3
", + "image_path": "955d59c28fc9ac1c5a98ff89a36a38fbe9c2e717d2f8675830a553efaa127b29.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "type": "text", + "content": "Evaluation Metrics Following the implementation of the referring expression segmentation works, we adopt gIoU and cIoU scores to assess the quality of the segmentation mask. gIoU denotes the mean IoU for each mask, whereas cIoU is computed by the cumulative intersection area over the cumulative union area across the entire dataset. Given that cIoU may exhibit bias towards large-area objects, gIoU is preferable for evaluating part regions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 488, + 292, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 488, + 292, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 292, + 499 + ], + "type": "text", + "content": "5.2 RESULTS ON BENCHMARK DATASETS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "text", + "content": "Comparison on MMR Tab. 2 compares " + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "text", + "content": " and the baseline models in a multi-target and multi-granularity reasoning segmentation task (MMR dataset). The pre-trained models perform poorly on the proposed MMR dataset, particularly struggling with the part-only set due to its lack of detailed part-level understanding. Conversely, " + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\mathrm{LISA}_{tr}" + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\mathrm{GSVA}_{tr}" + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\mathrm{GLaMM}_{tr}" + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "text", + "content": ", trained using the proposed MMR dataset, exhibit superior performance as they acquire both object-level and part-level knowledge. However, its ability to handle multi-target and fine-detail reasoning remains limited. In contrast, the proposed " + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 509, + 506, + 609 + ], + "type": "text", + "content": " shows highly competitive performance, effectively managing multi-target scenarios and fine-detail tasks, thus showcasing its strength in comprehensive reasoning segmentation. Qualitative results are provided in the Appendix A.13." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "Comparison on Referring Expression Segmentation Task Tab. 3 presents the single-target object-level RefCOCO series dataset results. While " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " achieves commendable performance, it is important to note that single-target referring expression segmentation is a relatively simple task, involving explicit queries that focus on identifying a single object. The true strength of " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " lies in its ability to excel in more complex and challenging tasks, such as multi-target referring expression segmentation and multi-granularity referring segmentation. To evaluate its performance on multi-target referring expression segmentation, we curate text queries for multi-target objects using annotation information from the RefCOCO-series datasets. Each query is constructed by randomly selecting 4 to 6 object categories from each image and generating text prompts like \"Can you segment the class 1, class 2, ..., and class n?\" We then compare " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "'s performance against LISA," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 171, + 114, + 442, + 175 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 111 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 111 + ], + "type": "text", + "content": "Table 4: Multi-referring expression segmentation results. We adopt the cIoU metric for comparison. The best results are highlighted in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 171, + 114, + 442, + 175 + ], + "lines": [ + { + "bbox": [ + 171, + 114, + 442, + 175 + ], + "spans": [ + { + "bbox": [ + 171, + 114, + 442, + 175 + ], + "type": "table", + "html": "
MethodsMulti-RefCOCOMulti-RefCOCO+Multi-RefCOCOg
valtestAtestBvaltestAtestBval(U)test(U)
LISA-7B (Lai et al., 2023)34.032.736.428.228.628.545.248.7
GSVA-7B (Xia et al., 2024)50.753.347.844.847.440.647.748.6
GLaMM (Rasheed et al., 2024)30.832.030.028.829.627.232.535.0
M2SA-7B71.373.367.261.865.355.862.063.6
LISA-Llama2-13B (Lai et al., 2023)33.232.632.427.729.926.744.047.1
M2SA-Llama2-13B72.075.668.062.367.156.165.465.8
", + "image_path": "1bc0cfe22f88aa17d1c7853a7780b317f2a15b12f7e2ba9be4caab3220821938.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 171, + 255, + 442, + 358 + ], + "blocks": [ + { + "bbox": [ + 104, + 206, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 206, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 206, + 504, + 251 + ], + "type": "text", + "content": "Table 5: Multi-granularity referring expression segmentation results on RefCOCom (Wang et al., 2023). For a fair comparison with previous methods, the mIoU metrics are adopted. Part denotes part-only evaluation, and Obj & Part denotes multi-granularity evaluation. The best results are highlighted in bold." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 171, + 255, + 442, + 358 + ], + "lines": [ + { + "bbox": [ + 171, + 255, + 442, + 358 + ], + "spans": [ + { + "bbox": [ + 171, + 255, + 442, + 358 + ], + "type": "table", + "html": "
MethodsvaltestAtestB
PartObj & PartPartObj & PartPartObj & Part
SeqTR (Zhu et al., 2022)13.928.212.122.818.134.7
CRIS (Wang et al., 2022)10.625.410.121.212.930.0
LAVT (Yang et al., 2022)15.329.913.224.418.735.5
X-Decoder (Zou et al., 2023)16.229.513.623.620.333.8
SEEM (Zou et al., 2024)16.129.413.623.420.433.9
UniRES (Wang et al., 2023)19.634.316.427.825.241.7
LISA-7B (Lai et al., 2023)21.334.318.528.625.740.1
GSVA-7B (Xia et al., 2024)11.423.19.219.216.828.2
GLaMM (Rasheed et al., 2024)21.435.318.629.526.941.1
M²SA-7B22.435.519.930.127.141.4
LISA-Llama2-13B (Lai et al., 2023)22.135.219.429.727.241.6
M²SA-Llama2-13B24.537.321.931.928.542.7
", + "image_path": "0889fdd67ae440737f6ceb95d6e3c377a2dd148baa0b5268b372c0db34ac76ac.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 392, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 426 + ], + "type": "text", + "content": "GSVA, and GLaMM. As shown in Tab. 4, " + }, + { + "bbox": [ + 104, + 392, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 392, + 504, + 426 + ], + "type": "text", + "content": " significantly outperforms these methods, showcasing its ability to reason about multiple objects simultaneously and effectively leverage its multi [SEG] tokens for diverse and intricate queries." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "text", + "content": "Additionally, we evaluate " + }, + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "text", + "content": " on RefCOCOM, a multi-granularity referring segmentation dataset. As demonstrated in Tab. 5, " + }, + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "text", + "content": " surpasses existing methods in this task, though the performance improvement is less pronounced. This is likely because the MMR dataset does not include the person class, which constitutes a significant portion of the categories in RefCOCOM. These results emphasize the versatility and effectiveness of " + }, + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 430, + 506, + 498 + ], + "type": "text", + "content": " in addressing complex, real-world scenarios, extending well beyond simple single-target segmentation tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 523, + 196, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 523, + 196, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 196, + 536 + ], + "type": "text", + "content": "6 CONCLUSION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "text", + "content": "This paper addresses the limitations of current reasoning segmentation datasets, which often overlook multi-target or part-level reasoning. To resolve these issues, we introduce the Multi-target and Multi-granularity Reasoning (MMR) dataset, providing 194K comprehensive question-answer pairs that cover multi-target, object-level, and part-level aspects, enhancing diverse and context-aware interactions. We also propose the " + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "text", + "content": " model, designed for multi-target, object-level, and part-level reasoning segmentation. " + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "text", + "content": " incorporates early local feature fusion and multiple [SEG] tokens, improving fine-grained visual understanding and multi-target segmentation. Experimental results show that " + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "text", + "content": " outperforms existing models on the MMR benchmark. The MMR dataset aims to drive progress in reasoning segmentation by emphasizing the importance of multi-target and part-level aspects in human-AI interactions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 690, + 219, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 690, + 219, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 219, + 702 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 719, + 490, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 719, + 490, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 490, + 733 + ], + "type": "text", + "content": "This research has been supported by the LG Electronics Corporation. (Project No. G01230381)" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 100, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 105, + 100, + 505, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 100, + 505, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 100, + 505, + 134 + ], + "type": "text", + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 143, + 505, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 143, + 505, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 143, + 505, + 187 + ], + "type": "text", + "content": "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 197, + 505, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 197, + 505, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 197, + 505, + 230 + ], + "type": "text", + "content": "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 239, + 505, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 505, + 272 + ], + "type": "text", + "content": "Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1209-1218, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 281, + 505, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 505, + 304 + ], + "type": "text", + "content": "Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 313, + 505, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 505, + 357 + ], + "type": "text", + "content": "Xianjie Chen, Roozbeh Mottaghi, Xiaobai Liu, Sanja Fidler, Raquel Urtasun, and Alan Yuille. Detect what you can: Detecting and representing objects using holistic models and body parts. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1971-1978, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 367, + 505, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 367, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 505, + 411 + ], + "type": "text", + "content": "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with " + }, + { + "bbox": [ + 105, + 367, + 505, + 411 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 105, + 367, + 505, + 411 + ], + "type": "text", + "content": " chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 420, + 505, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 420, + 505, + 454 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 505, + 454 + ], + "type": "text", + "content": "Seokju Cho, Heeseong Shin, Sunghwan Hong, Seungjun An, Seungjun Lee, Anurag Arnab, Paul Hongsuck Seo, and Seungryong Kim. Cat-seg: Cost aggregation for open-vocabulary semantic segmentation. arXiv preprint arXiv:2303.11797, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 463, + 505, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 463, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 463, + 505, + 508 + ], + "type": "text", + "content": "Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3213-3223, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 517, + 505, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 517, + 505, + 561 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 505, + 561 + ], + "type": "text", + "content": "Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale N Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 570, + 505, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 570, + 505, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 570, + 505, + 604 + ], + "type": "text", + "content": "Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 16321-16330, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 613, + 505, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 505, + 647 + ], + "type": "text", + "content": "Peng Gao, Jiaming Han, Renrui Zhang, Ziyi Lin, Shijie Geng, Aojun Zhou, Wei Zhang, Pan Lu, Conghui He, Xiangyu Yue, et al. Llama-adapter v2: Parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 655, + 505, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 505, + 689 + ], + "type": "text", + "content": "Ke Gong, Xiaodan Liang, Dongyu Zhang, Xiaohui Shen, and Liang Lin. Look into person: Self-supervised structure-sensitive learning and a new benchmark for human parsing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 932-940, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Ju He, Shuo Yang, Shaokang Yang, Adam Kortylewski, Xiaoding Yuan, Jie-Neng Chen, Shuai Liu, Cheng Yang, Qihang Yu, and Alan Yuille. Partimagenet: A large, high-quality dataset of parts. In European Conference on Computer Vision, pp. 128-145. Springer, 2022." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "text", + "content": "Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4067-4077, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 165, + 505, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 165, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 211 + ], + "type": "text", + "content": "Menglin Jia, Mengyun Shi, Mikhail Sirotenko, Yin Cui, Claire Cardie, Bharath Hariharan, Hartwig Adam, and Serge Belongie. Fashionpedia: Ontology, segmentation, and an attribute localization dataset. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16, pp. 316-332. Springer, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 218, + 505, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 218, + 505, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 505, + 253 + ], + "type": "text", + "content": "Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitgame: Referring to objects in photographs of natural scenes. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pp. 787-798, 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 259, + 505, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 259, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 505, + 294 + ], + "type": "text", + "content": "Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother, and Piotr Dólar. Panoptic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9404-9413, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 301, + 505, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 301, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 505, + 336 + ], + "type": "text", + "content": "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 343, + 505, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 343, + 505, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 505, + 367 + ], + "type": "text", + "content": "Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 374, + 505, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 505, + 398 + ], + "type": "text", + "content": "Jianshu Li, Jian Zhao, Yunchao Wei, Congyan Lang, Yidong Li, Terence Sim, Shuicheng Yan, and Jiashi Feng. Multiple-human parsing in the wild. arXiv preprint arXiv:1705.07206, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 405, + 505, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 505, + 440 + ], + "type": "text", + "content": "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 447, + 505, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 447, + 505, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 505, + 482 + ], + "type": "text", + "content": "Xiangtai Li, Shilin Xu, Yibo Yang, Guangliang Cheng, Yunhai Tong, and Dacheng Tao. Panoptic-partformer: Learning a unified model for panoptic part segmentation. In European Conference on Computer Vision, pp. 729-747. Springer, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 489, + 505, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 489, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 489, + 505, + 534 + ], + "type": "text", + "content": "Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7061-7070, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 541, + 505, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 541, + 505, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 541, + 505, + 575 + ], + "type": "text", + "content": "Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 23592-23601, 2023a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 583, + 505, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 583, + 505, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 505, + 607 + ], + "type": "text", + "content": "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 614, + 505, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 614, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 505, + 649 + ], + "type": "text", + "content": "Zhaoyang Liu, Yinan He, Wenhai Wang, Weiyun Wang, Yi Wang, Shoufa Chen, Qinglong Zhang, Yang Yang, Qingyun Li, Jiashuo Yu, et al. Internchat: Solving vision-centric tasks by interacting with chatbots beyond language. arXiv preprint arXiv:2305.05662, 2023b." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 655, + 505, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 505, + 679 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "type": "text", + "content": "Gen Luo, Yiyi Zhou, Xiaoshuai Sun, Liujuan Cao, Chenglin Wu, Cheng Deng, and Rongrong Ji. Multi-task collaborative network for joint referring expression comprehension and segmentation. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pp. 10034-10043, 2020." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "text", + "content": "Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 11-20, 2016." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "type": "text", + "content": "Panagiotis Meletis, Xiaoxiao Wen, Chenyang Lu, Daan de Geus, and Gijs Dubbelman. Cityscapes-panoptic-parts and Pascal-panoptic-parts datasets for scene understanding. arXiv preprint arXiv:2004.07944, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 162, + 505, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 162, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 505, + 208 + ], + "type": "text", + "content": "Umberto Michieli, Edoardo Borsato, Luca Rossi, and Pietro Zanuttigh. Gmnet: Graph matching network for large scale part semantic segmentation in the wild. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VIII 16, pp. 397-414. Springer, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 213, + 505, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 505, + 258 + ], + "type": "text", + "content": "Kaichun Mo, Shilin Zhu, Angel X Chang, Li Yi, Subarna Tripathi, Leonidas J Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 909-918, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 265, + 505, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 265, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 265, + 505, + 300 + ], + "type": "text", + "content": "Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In Proceedings of the IEEE international conference on computer vision, pp. 4990-4999, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 305, + 505, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 305, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 505, + 339 + ], + "type": "text", + "content": "Tai-Yu Pan, Qing Liu, Wei-Lun Chao, and Brian Price. Towards open-world segmentation of parts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15392-15401, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 346, + 505, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 505, + 369 + ], + "type": "text", + "content": "Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, and Jianfeng Gao. Instruction tuning with gpt-4. arXiv preprint arXiv:2304.03277, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 375, + 505, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 375, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 375, + 505, + 419 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 426, + 505, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 472 + ], + "type": "text", + "content": "Vignesh Ramanathan, Anmol Kalia, Vladan Petrovic, Yi Wen, Baixue Zheng, Baishan Guo, Rui Wang, Aaron Marquez, Rama Kovvuri, Abhishek Kadian, et al. Paco: Parts and attributes of common objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7141-7151, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 478, + 505, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 478, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 505, + 523 + ], + "type": "text", + "content": "Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13009-13018, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 529, + 505, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 505, + 563 + ], + "type": "text", + "content": "Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. arXiv preprint arXiv:2312.02228, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 570, + 505, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 570, + 505, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 570, + 505, + 604 + ], + "type": "text", + "content": "Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 102-118. Springer, 2016." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 610, + 505, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 610, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 610, + 505, + 633 + ], + "type": "text", + "content": "Konstantinos I Roumeliotis and Nikolaos D Tselikas. Chatgpt and open-ai models: A preliminary review. Future Internet, 15(6):192, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 639, + 505, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 505, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 505, + 663 + ], + "type": "text", + "content": "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. Stanford alpaca: An instruction-following llama model, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 669, + 505, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 505, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 505, + 703 + ], + "type": "text", + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 709, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 732 + ], + "type": "text", + "content": "Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Wenxuan Wang, Tongtian Yue, Yisi Zhang, Longteng Guo, Xingjian He, Xinlong Wang, and Jing Liu. Unveiling parts beyond objects: Towards finer-granularity referring expression segmentation. arXiv preprint arXiv:2312.08007, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "text", + "content": "Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 11686-11695, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "text", + "content": "Meng Wei, Xiaoyu Yue, Wenwei Zhang, Shu Kong, Xihui Liu, and Jiangmiao Pang. Ov-parts: Towards open-vocabulary part segmentation. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 205, + 504, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 504, + 239 + ], + "type": "text", + "content": "Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3858-3869, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 246, + 506, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 246, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 506, + 281 + ], + "type": "text", + "content": "Jilan Xu, Junlin Hou, Yuejie Zhang, Rui Feng, Yi Wang, Yu Qiao, and Weidi Xie. Learning open-vocabulary semantic segmentation models from natural language supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2935-2944, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 287, + 504, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 504, + 320 + ], + "type": "text", + "content": "Lu Yang, Qing Song, Zhihui Wang, and Ming Jiang. Parsing r-cnn for instance-level human analysis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 364-373, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 328, + 504, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 328, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 504, + 363 + ], + "type": "text", + "content": "Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18155-18165, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 369, + 504, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 504, + 404 + ], + "type": "text", + "content": "Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. arXiv preprint arXiv:2310.07704, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 411, + 504, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 504, + 445 + ], + "type": "text", + "content": "Renrui Zhang, Jiaming Han, Chris Liu, Peng Gao, Aojun Zhou, Xiangfei Hu, Shilin Yan, Pan Lu, Hongsheng Li, and Yu Qiao. Llama-adapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv:2303.16199, 2023a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 451, + 504, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 451, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 504, + 485 + ], + "type": "text", + "content": "Shilong Zhang, Peize Sun, Shoufa Chen, Min Xiao, Wenqi Shao, Wenwei Zhang, Kai Chen, and Ping Luo. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 492, + 504, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 504, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 504, + 527 + ], + "type": "text", + "content": "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 533, + 504, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 504, + 568 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 574, + 504, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 504, + 609 + ], + "type": "text", + "content": "Shuai Zheng, Fan Yang, M Hadi Kiapour, and Robinson Piramuthu. Modanet: A large-scale street fashion dataset with polygon annotations. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1670-1678, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 616, + 504, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 504, + 650 + ], + "type": "text", + "content": "Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127:302-321, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 656, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 504, + 692 + ], + "type": "text", + "content": "Tianfei Zhou, Wenguan Wang, Si Liu, Yi Yang, and Luc Van Gool. Differentiable multi-granularity human representation learning for instance-aware human semantic parsing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 1622-1631, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 698, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 733 + ], + "type": "text", + "content": "Chaoyang Zhu, Yiyi Zhou, Yunhang Shen, Gen Luo, Xingjia Pan, Mingbao Lin, Chao Chen, Liujuan Cao, Xiaoshuai Sun, and Rongrong Ji. Seqtr: A simple yet universal network for visual grounding. In European Conference on Computer Vision, pp. 598-615. Springer, 2022." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 209 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "type": "text", + "content": "Xueyan Zou, Zi-Yi Dou, Jianwei Yang, Zhe Gan, Linjie Li, Chunyuan Li, Xiyang Dai, Harkirat Behl, Jianfeng Wang, Lu Yuan, et al. Generalized decoding for pixel, image, and language. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15116-15127, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 175, + 505, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 505, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 505, + 209 + ], + "type": "text", + "content": "Xueyan Zou, Jianwei Yang, Hao Zhang, Feng Li, Linjie Li, Jianfeng Wang, Lijuan Wang, Jianfeng Gao, and Yong Jae Lee. Segment everything everywhere all at once. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "type": "text", + "content": "A APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 186, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 186, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 186, + 118 + ], + "type": "text", + "content": "A.1 LIMITATION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 128, + 504, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 128, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 504, + 205 + ], + "type": "text", + "content": "While PACO-LVIS provides diverse and comprehensive object-part mask annotations for common objects, it lacks information on the human class and its parts. Consequently, our question-answer pairs generated based on PACO-LVIS do not consider reasoning about human class and its parts, which is a drawback. Therefore, there is a need for future dataset expansion to include a wider range of objects and parts that exist in real-world environments. Additionally, although we carefully design the prompts to ensure the diversity and quality of the dataset, the content of the question-answer pairs is inherently dependent on the pre-trained knowledge of ChatGPT." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 220, + 212, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 212, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 212, + 231 + ], + "type": "text", + "content": "A.2 ETHICS CONCERN" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 240, + 504, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 504, + 308 + ], + "type": "text", + "content": "The MMR dataset is constructed based on the publicly available PACO-LVIS dataset (Ramanathan et al., 2023), which helps mitigate privacy concerns. As the objects and parts within the images are already annotated, we only add text question-answer pairs, ensuring that potential privacy issues remain minimal. These question-answer pairs are generated using the ChatGPT/GPT-4V API (Achiam et al., 2023). While there is a risk of bias from the training data of the ChatGPT/GPT-4V API, we have implemented a thorough data filtering process to remove any ethically problematic content." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 321, + 172, + 332 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 321, + 172, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 172, + 332 + ], + "type": "text", + "content": "A.3 LICENSE" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 342, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 431 + ], + "type": "text", + "content": "We utilize the released code from LISA (Lai et al., 2023) for the baseline model code construction. Since LISA follows Apache License 2.0, our code is also licensed under Apache License 2.0. Additionally, the PACO-LVIS dataset is licensed under a Creative Commons Attribution 4.0 (CC BY 4.0) license. Consequently, our MMR dataset is also licensed under Creative Commons Attribution 4.0 (CC BY 4.0). To download the PACO-LVIS dataset (Ramanathan et al., 2023), we utilize author-released code under the MIT license. We use ChatGPT/GPT-4V API (Achiam et al., 2023) developed by OpenAI to generate the question-answer pairs in the MMR dataset. Specific licensing information for the ChatGPT/GPT-4V API model is proprietary to OpenAI." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 445, + 315, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 315, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 315, + 456 + ], + "type": "text", + "content": "A.4 THE SPECIFIC DETAILS OF CHATGPT API" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 465, + 504, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 465, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 465, + 504, + 489 + ], + "type": "text", + "content": "The specific command to use the ChatGPT API (Achiam et al., 2023) for generating question-answer pairs in MMR is as follows:" + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 226, + 504, + 386, + 579 + ], + "blocks": [ + { + "bbox": [ + 226, + 504, + 386, + 579 + ], + "lines": [ + { + "bbox": [ + 226, + 504, + 386, + 579 + ], + "spans": [ + { + "bbox": [ + 226, + 504, + 386, + 579 + ], + "type": "text", + "content": "response = open aiCompletion.create \n( \n model=\"gpt-4-vision-preview\", \n messages=prompt, \n temperature=0.7, \n max_tokens=850, \n)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 104, + 601, + 504, + 624 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 624 + ], + "type": "text", + "content": "Figure 5: To generate question-answer pairs in MMR dataset, we use gpt-4-vision-preview model. For the hyper-parameters, we set the temperature to 0.7 and max_tokens to 850." + } + ] + } + ], + "index": 11, + "type": "text" + }, + { + "bbox": [ + 105, + 645, + 246, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 246, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 246, + 656 + ], + "type": "text", + "content": "A.5 PROMPTS AND EXAMPLES" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "content": "General MMR Dataset The MMR dataset fundamentally includes multi-target (both objects and parts) answers to each question. In this section, we discuss the full prompt not covered in the main manuscript. Fig. 6 illustrates the prompt used to generate the train, validation, and test datasets. Both text and image prompts are input into GPT-4V (Achiam et al., 2023), resulting in the creation of question-answer pairs that encompass various information about objects and parts. As shown in Fig. 2, the output includes a global caption and question-answer pairs for the image. The" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 504, + 127 + ], + "type": "text", + "content": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations and part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 427 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as \"object's part\", like \"person's body\" and \"bus's wheel\". The category names and bounding box coordinates of objects and parts are as follow:" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 287, + 133, + 473, + 258 + ], + "blocks": [ + { + "bbox": [ + 287, + 133, + 473, + 258 + ], + "lines": [ + { + "bbox": [ + 287, + 133, + 473, + 258 + ], + "spans": [ + { + "bbox": [ + 287, + 133, + 473, + 258 + ], + "type": "image", + "image_path": "24150db4c2490cc27e374eb463bb6d334ecf8dcc379e7e6b81cb2cbbca7a6563.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 132, + 212, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 132, + 212, + 260 + ], + "spans": [ + { + "bbox": [ + 107, + 132, + 212, + 260 + ], + "type": "text", + "content": "```c\n```\nbottle_1 [459.07, 0.0, 603.49, 315.78];\nbottle_1's label [460, 105, 500, 282];\nbottle_1's neck [470, 0, 593, 62];\nbottle_1's shoulder [461, 56, 603, 103];\nbottle_1's body [460, 94, 604, 291];\nbottle_1's base [463, 287, 596, 316];\nbottle_2 [296.85, 1.15, 416.19, 242.2];\nbottle_2's base [307, 220, 400, 241];\nbottle_2's label [300, 4, 413, 176];\nbottle_2's body [307, 172, 403, 231];\nknife_1 [204.79, 2.4, 238.63, 226.53];\nknife_1's blade [213, 121, 237, 226];\nknife_1's handle [205, 2, 239, 126];\nknife_2 [304.84, 320.65, 615.34, 427.0\nknife_2's blade [305, 321, 601, 422];\nknife_2's handle [529, 399, 616, 426];" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 269, + 503, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 269, + 503, + 321 + ], + "spans": [ + { + "bbox": [ + 107, + 269, + 503, + 321 + ], + "type": "text", + "content": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects and parts given above. However, when the corresponding object and part name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects and object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 321, + 373, + 335 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 108, + 321, + 373, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 321, + 373, + 328 + ], + "spans": [ + { + "bbox": [ + 108, + 321, + 373, + 328 + ], + "type": "text", + "content": "1) the relationship between parts within the image or the relationship between a part and an object." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 328, + 262, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 328, + 262, + 335 + ], + "spans": [ + { + "bbox": [ + 108, + 328, + 262, + 335 + ], + "type": "text", + "content": "2) the function or the general information about the parts." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 107, + 342, + 503, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 342, + 503, + 392 + ], + "spans": [ + { + "bbox": [ + 107, + 342, + 503, + 392 + ], + "type": "text", + "content": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part or object must be attached to the object name or part name in the format: object_1[x1, y1, x2, y2] and object_1's part [x1, y1, x2, y2]. Do not use other format such as \"a part of object_1\". Here are some additional requirements about generated question and answers:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 108, + 393, + 500, + 487 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 108, + 393, + 468, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 393, + 468, + 401 + ], + "spans": [ + { + "bbox": [ + 108, + 393, + 468, + 401 + ], + "type": "text", + "content": "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 401, + 399, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 401, + 399, + 407 + ], + "spans": [ + { + "bbox": [ + 108, + 401, + 399, + 407 + ], + "type": "text", + "content": "2. Do not ask the question you are not confident to answer. Only include question that have definite answer." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 407, + 327, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 407, + 327, + 415 + ], + "spans": [ + { + "bbox": [ + 108, + 407, + 327, + 415 + ], + "type": "text", + "content": "3. Do not mention the coordinates of a part and an object directly in the question." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 108, + 415, + 434, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 415, + 434, + 422 + ], + "spans": [ + { + "bbox": [ + 108, + 415, + 434, + 422 + ], + "type": "text", + "content": "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 422, + 341, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 422, + 341, + 430 + ], + "spans": [ + { + "bbox": [ + 108, + 422, + 341, + 430 + ], + "type": "text", + "content": "5. The question should describe a complete activity, a function, or general information." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 430, + 490, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 430, + 490, + 444 + ], + "spans": [ + { + "bbox": [ + 108, + 430, + 490, + 444 + ], + "type": "text", + "content": "6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part and the object. Implied other potential parts is strictly prohibited." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 444, + 492, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 444, + 492, + 458 + ], + "spans": [ + { + "bbox": [ + 108, + 444, + 492, + 458 + ], + "type": "text", + "content": "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 108, + 458, + 500, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 458, + 500, + 479 + ], + "spans": [ + { + "bbox": [ + 108, + 458, + 500, + 479 + ], + "type": "text", + "content": "8. Avoid using incorrectly formatted object names or part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the object and part information given above. ### For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 108, + 479, + 391, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 479, + 391, + 487 + ], + "spans": [ + { + "bbox": [ + 108, + 479, + 391, + 487 + ], + "type": "text", + "content": "9. All generated answers must include the given object or part information, without changing the format." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 115, + 509, + 494, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 509, + 494, + 521 + ], + "spans": [ + { + "bbox": [ + 115, + 509, + 494, + 521 + ], + "type": "text", + "content": "Figure 6: The text and image prompt used in our data creation for MMR dataset with GPT-4V." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 544, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 504, + 567 + ], + "type": "text", + "content": "segmentation mask information for the objects or parts mentioned in the answers is sourced from PACO-LVIS (Ramanathan et al., 2023) to create new annotations." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "content": "Part-only MMR Test Dataset The MMR dataset includes a substantial amount of information on parts to enhance part-level recognition, which has been overlooked in existing reasoning segmentation datasets. Consequently, we create a part-level test dataset to evaluate part-level recognition separately. Using the text and image prompts shown in Fig. 7, we generate a part-only test dataset from 2000 images with extensive part-level information from PACO-LVIS annotations. As shown in Fig. 8, the output includes a global caption and question-answer pairs for the image. The segmentation mask information for the parts mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Object-only MMR Test Dataset To evaluate recognition separately for object-level, we create an MMR test dataset that includes only information on objects. We generate an object-only test dataset using the text and image prompts shown in Fig. 9, selecting 2000 images with minimal part-level information. As shown in Fig. 10, the output includes a global caption and question-answer pairs for" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 188, + 502, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 188, + 502, + 232 + ], + "spans": [ + { + "bbox": [ + 107, + 188, + 502, + 232 + ], + "type": "text", + "content": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object's part locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 428 and 640, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. If a region is a part of an object, the category name is described as \"object's part\", like \"person's body\" and \"bus's wheel\". The category names and bounding box coordinates of parts are as follow:" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 302, + 236, + 462, + 342 + ], + "blocks": [ + { + "bbox": [ + 302, + 236, + 462, + 342 + ], + "lines": [ + { + "bbox": [ + 302, + 236, + 462, + 342 + ], + "spans": [ + { + "bbox": [ + 302, + 236, + 462, + 342 + ], + "type": "image", + "image_path": "25f739dd3a4a947b306e9cb4eb83141e8bb97b0f0b8399766bf64388dd9abc7a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 607, + 504, + 628 + ], + "lines": [ + { + "bbox": [ + 105, + 607, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 607, + 504, + 628 + ], + "type": "text", + "content": "Figure 7: The text and image prompt used in our data creation for the part-only MMR test dataset with GPT-4V." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 237, + 206, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 237, + 206, + 333 + ], + "spans": [ + { + "bbox": [ + 107, + 237, + 206, + 333 + ], + "type": "text", + "content": "```\n```\ndog_1's eye [235, 67, 291, 100];\ndog_1's ear [324, 36, 426, 145];\ndog_1's nose [184, 98, 212, 127];\ndog_1's teeth [245, 146, 285, 171];\ndog_1's head [169, 20, 427, 202];\ndog_1's foot [337, 204, 510, 407];\ndog_1's leg [212, 95, 542, 356];\ndog_1's body [243, 20, 503, 328];\nbowl_1's rim [143, 298, 369, 378];\nbowl_1's inner_body [150, 302, 361,\nbowl_1's bottom [194, 362, 308, 376\nbowl_1's body [153, 351, 354, 422];" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 346, + 501, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 501, + 389 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 501, + 389 + ], + "type": "text", + "content": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of object's parts given above. However, when the corresponding object's part name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about object's parts, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 391, + 267, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 391, + 267, + 401 + ], + "spans": [ + { + "bbox": [ + 108, + 391, + 267, + 401 + ], + "type": "text", + "content": "1) the relationship between different parts within the image. 2) the function or the general information about the parts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 410, + 499, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 410, + 499, + 453 + ], + "spans": [ + { + "bbox": [ + 107, + 410, + 499, + 453 + ], + "type": "text", + "content": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object and part. In other words, it's important to make the question challenging by not directly including visual content details. The answer should include multiple object's parts. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target part must be attached to the part name in the format: object_1's part [x1, y1, x2, y2]. Do not use other format such as \"a part of object_1. Here are some additional requirements about generated question and answers:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 108, + 453, + 500, + 582 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 108, + 453, + 460, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 453, + 460, + 460 + ], + "spans": [ + { + "bbox": [ + 108, + 453, + 460, + 460 + ], + "type": "text", + "content": "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 461, + 392, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 461, + 392, + 467 + ], + "spans": [ + { + "bbox": [ + 108, + 461, + 392, + 467 + ], + "type": "text", + "content": "2. Do not ask the question you are not confident to answer. Only include question that have definite answer." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 108, + 468, + 323, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 468, + 323, + 475 + ], + "spans": [ + { + "bbox": [ + 108, + 468, + 323, + 475 + ], + "type": "text", + "content": "3. Do not mention the coordinates of a part and an object directly in the question." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 475, + 428, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 475, + 428, + 482 + ], + "spans": [ + { + "bbox": [ + 108, + 475, + 428, + 482 + ], + "type": "text", + "content": "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 483, + 336, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 483, + 336, + 488 + ], + "spans": [ + { + "bbox": [ + 108, + 483, + 336, + 488 + ], + "type": "text", + "content": "5. The question should describe a complete activity, a function, or general information." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 108, + 489, + 483, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 489, + 483, + 503 + ], + "spans": [ + { + "bbox": [ + 108, + 489, + 483, + 503 + ], + "type": "text", + "content": "6. The answer to the generated question should include at least two object's parts and explicitly describe the names of the part. Implied other potential parts is strictly prohibited." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 504, + 495, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 504, + 495, + 517 + ], + "spans": [ + { + "bbox": [ + 108, + 504, + 495, + 517 + ], + "type": "text", + "content": "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 517, + 495, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 517, + 495, + 537 + ], + "spans": [ + { + "bbox": [ + 108, + 517, + 495, + 537 + ], + "type": "text", + "content": "8. Avoid using incorrectly formatted part names, such as located at [coordinates] or a part [object_1's part [coordinates]]. In other words, use it as it appears in the part information given above. ## For example: shoe_1's outsole [42, 332, 62, 336], not an outsole [shoe_1's outsole [42, 332, 62, 336]]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 539, + 357, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 539, + 357, + 546 + ], + "spans": [ + { + "bbox": [ + 108, + 539, + 357, + 546 + ], + "type": "text", + "content": "9. All generated answers must include the given part information, without changing the format." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 108, + 547, + 486, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 547, + 486, + 560 + ], + "spans": [ + { + "bbox": [ + 108, + 547, + 486, + 560 + ], + "type": "text", + "content": "10. When creating questions, ask only questions about the object's parts given above without directly mentioning the part name in the question. Please keep in mind that other parts should not dominate the answer." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 108, + 561, + 500, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 561, + 500, + 567 + ], + "spans": [ + { + "bbox": [ + 108, + 561, + 500, + 567 + ], + "type": "text", + "content": "11. If the number of object's parts given for an image is large enough, create a question so that each round's answer includes different object's parts." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 108, + 568, + 371, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 568, + 371, + 574 + ], + "spans": [ + { + "bbox": [ + 108, + 568, + 371, + 574 + ], + "type": "text", + "content": "12. Do not create questions that are answered by parts other than the part information given above." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 108, + 575, + 329, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 575, + 329, + 582 + ], + "spans": [ + { + "bbox": [ + 108, + 575, + 329, + 582 + ], + "type": "text", + "content": "13. If that part doesn't directly answer the question, do not mention it in the answer." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "the image. The segmentation mask information for the objects mentioned in the answers is sourced from the PACO-LVIS test dataset to create new annotations." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 108, + 115, + 504, + 248 + ], + "blocks": [ + { + "bbox": [ + 108, + 115, + 504, + 248 + ], + "lines": [ + { + "bbox": [ + 108, + 115, + 504, + 248 + ], + "spans": [ + { + "bbox": [ + 108, + 115, + 504, + 248 + ], + "type": "image", + "image_path": "5ec30dc5b184ebbebb46389d3aa5a525ef1b4b56476504c52bca311256093ad4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 109, + 249, + 321, + 258 + ], + "lines": [ + { + "bbox": [ + 109, + 249, + 321, + 258 + ], + "spans": [ + { + "bbox": [ + 109, + 249, + 321, + 258 + ], + "type": "text", + "content": "Caption: A dog is sitting next to a bowl, possibly after a drink of water or a meal." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 109, + 262, + 326, + 270 + ], + "lines": [ + { + "bbox": [ + 109, + 262, + 326, + 270 + ], + "spans": [ + { + "bbox": [ + 109, + 262, + 326, + 270 + ], + "type": "text", + "content": "Question1: What features of the animal suggest that it may have just had a drink?" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 109, + 270, + 494, + 285 + ], + "lines": [ + { + "bbox": [ + 109, + 270, + 494, + 285 + ], + "spans": [ + { + "bbox": [ + 109, + 270, + 494, + 285 + ], + "type": "text", + "content": "Answer1: The dog 1's nose [184, 98, 212, 127] seems moist, and this often indicates the dog has just been drinking. In addition, the presence of a bowl 1's rim [143, 298, 369, 378] next to the dog suggests that it could have contained water or food that the dog might have consumed." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 109, + 291, + 372, + 300 + ], + "lines": [ + { + "bbox": [ + 109, + 291, + 372, + 300 + ], + "spans": [ + { + "bbox": [ + 109, + 291, + 372, + 300 + ], + "type": "text", + "content": "Question2: Considering the animal's current position, which part seems to be supporting its weight?" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 109, + 300, + 488, + 314 + ], + "lines": [ + { + "bbox": [ + 109, + 300, + 488, + 314 + ], + "spans": [ + { + "bbox": [ + 109, + 300, + 488, + 314 + ], + "type": "text", + "content": "Answer2: The dog_1's leg [212, 95, 542, 356] and the dog_1's foot [337, 204, 510, 407] are likely bearing the most weight, considering the dog's sitting position adjacent to the bowl_1's body [153, 351, 354, 422]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 109, + 319, + 404, + 327 + ], + "lines": [ + { + "bbox": [ + 109, + 319, + 404, + 327 + ], + "spans": [ + { + "bbox": [ + 109, + 319, + 404, + 327 + ], + "type": "text", + "content": "Question3: If the animal were to listen for approaching sounds, which physical feature would be most engaged?" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 109, + 327, + 493, + 335 + ], + "lines": [ + { + "bbox": [ + 109, + 327, + 493, + 335 + ], + "spans": [ + { + "bbox": [ + 109, + 327, + 493, + 335 + ], + "type": "text", + "content": "Answer3: The dog_1's ear [324, 36, 426, 145] would be most engaged in detecting sounds as ears are responsible for auditory perception in dogs." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 104, + 354, + 504, + 389 + ], + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 389 + ], + "type": "text", + "content": "Figure 8: An example from the part-only MMR test dataset generated through the prompt in Fig. 7. This example includes information of some object's parts. The left and right pictures show the original image and part-level segmentation masks, respectively." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 414, + 197, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 414, + 197, + 424 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 197, + 424 + ], + "type": "text", + "content": "A.6 DATA FORMAT" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 458 + ], + "type": "text", + "content": "The MMR dataset is given in JSON format. The JSON file for each instance is organized as shown in Fig. 11." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 108, + 526, + 504, + 583 + ], + "blocks": [ + { + "bbox": [ + 104, + 479, + 504, + 514 + ], + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 504, + 514 + ], + "type": "text", + "content": "Table 6: The effect of multiple [SEG] Tokens and Early Local Feature Fusion in " + }, + { + "bbox": [ + 104, + 479, + 504, + 514 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}-7\\mathbf{B}" + }, + { + "bbox": [ + 104, + 479, + 504, + 514 + ], + "type": "text", + "content": " on MMR benchmark. Obj & Part, Obj, and Part denote multi-granularity, object-only, and part-only evaluation settings." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 526, + 504, + 583 + ], + "lines": [ + { + "bbox": [ + 108, + 526, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 108, + 526, + 504, + 583 + ], + "type": "table", + "html": "
multiple [SEG] TokensEarly Local Feature Fusionvaltest
Obj & PartObjPartObj & Part
gIoUcIoUgIoUcIoUgIoUcIoUgIoUcIoU
××19.431.634.741.88.013.119.527.1
×26.047.739.555.411.725.228.445.2
27.948.541.055.613.527.031.046.8
", + "image_path": "29ddc08ffe7a3ed20803202d06a14a5728b0071d5ae94c19106e4e5dc3234b80.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 611, + 477, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 477, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 477, + 633 + ], + "type": "text", + "content": "A.7 EFFECTIVENESS OF THE MULTIPLE [SEG] TAXENS AND EARLY LOCAL FEATURE FUSION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "We conduct an ablation study to verify the effectiveness of the multiple [SEG] tokens and Early Local Feature Fusion proposed in " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": ". Tab. 6 demonstrates that merely adding multiple [SEG] tokens results in significant performance improvements in MMR evaluation benchmarks. This finding suggests that using single [SEG] tokens in the LISA is inadequate to fully capture the segmentation capability. Moreover, performance improvements are evident when Early Local Feature Fusion is incorporated. Notably, there is a substantial performance enhancement in the part-only evaluation setting of the MMR test set. This improvement likely arises because Early Layer features contain local detail information (e.g., edges or boundaries), which aids in part and fine-level segmentation." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 207, + 497, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 207, + 497, + 246 + ], + "spans": [ + { + "bbox": [ + 107, + 207, + 497, + 246 + ], + "type": "text", + "content": "\"You are an AI visual assistant capable of analyzing a single image. You receive the specific object locations within the image, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2). These values correspond to the top left x, top left y, bottom right x, and bottom right y. The height and width of the image you receive are 333 and 500, respectively. Additionally, there may be multiple objects of the same category in the image. To resolve this ambiguity, we use \"object_number\" such as \"person_1\" and \"person_2\" to differentiate between objects of the same category. The category names and bounding box coordinates of objects are as follow:" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 329, + 250, + 462, + 338 + ], + "blocks": [ + { + "bbox": [ + 329, + 250, + 462, + 338 + ], + "lines": [ + { + "bbox": [ + 329, + 250, + 462, + 338 + ], + "spans": [ + { + "bbox": [ + 329, + 250, + 462, + 338 + ], + "type": "image", + "image_path": "7cb98bd825e0ff0d51981b9af1c68c0d0b8b01824220fdb5bb54d96a5b1df5e3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 583, + 504, + 605 + ], + "lines": [ + { + "bbox": [ + 105, + 583, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 504, + 605 + ], + "type": "text", + "content": "Figure 9: The text and image prompt used in our data creation for the object-only MMR test dataset with GPT-4V." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 108, + 287, + 218, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 287, + 218, + 310 + ], + "spans": [ + { + "bbox": [ + 108, + 287, + 218, + 310 + ], + "type": "text", + "content": "mirror_1 [304.9, 35.42, 476.09, 146.49]; \npillow_1 [169.86, 180.73, 221.21, 229.7]; \npillow_2 [370.81, 175.9, 436.81, 231.85];" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 345, + 501, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 345, + 501, + 388 + ], + "spans": [ + { + "bbox": [ + 107, + 345, + 501, + 388 + ], + "type": "text", + "content": "You first need to create a global caption for the image without given information. A global caption should summarize the content of the image within a maximum of two sentences. The format for the global caption strictly follows: \"Global caption: GLOBAL_CAPTION_FOR_the_IMAGE. What you need to do next is create question-answers-pairs using the information of objects given above. However, when the corresponding object name appear in the answers, \"name [coordinates]\" is used as the given information above without changing its form. The goal of generating the question-answers-pair is to use the provided information about objects, create a plausible and challenging question about the image, and provide the answer in detail for the image reasoning segmentation. The content of the question must address one of the following two:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 388, + 269, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 388, + 269, + 403 + ], + "spans": [ + { + "bbox": [ + 108, + 388, + 269, + 403 + ], + "type": "text", + "content": "1) the relationship between objects within the image. \n2) the function or the general information about the objects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 410, + 500, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 410, + 500, + 455 + ], + "spans": [ + { + "bbox": [ + 107, + 410, + 500, + 455 + ], + "type": "text", + "content": "The question should be implicit and require commonsense reasoning, rather than explicitly mentioning the names of the object. In other words, it's important to make the question challenging by not directly including visual content details. Some of the answers of the rounds should include multiple different types of objects. You must build at least 3 rounds of natural question-answer pairs, and if there is sufficient information create up to 5 rounds of question-answer pairs. In addition, please follow the format strictly: The order must be attached to the questions and answers like Question 1: and Answer 1.: In the answer, the coordinates referring to the target object must be attached to the object name in the format: object_1[x1, y1, x2, y2]. Here are some additional requirements about generated question and answers:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 108, + 460, + 501, + 568 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 108, + 460, + 465, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 460, + 465, + 468 + ], + "spans": [ + { + "bbox": [ + 108, + 460, + 465, + 468 + ], + "type": "text", + "content": "1. Do not mention that the information source is provided in text description. Always answer as if you are directly looking at the image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 468, + 396, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 468, + 396, + 475 + ], + "spans": [ + { + "bbox": [ + 108, + 468, + 396, + 475 + ], + "type": "text", + "content": "2. Do not ask the question you are not confident to answer. Only include question that have definite answer." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 108, + 475, + 295, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 475, + 295, + 483 + ], + "spans": [ + { + "bbox": [ + 108, + 475, + 295, + 483 + ], + "type": "text", + "content": "3. Do not mention the coordinates of an object directly in the question." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 483, + 432, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 483, + 432, + 490 + ], + "spans": [ + { + "bbox": [ + 108, + 483, + 432, + 490 + ], + "type": "text", + "content": "4. Make the questions and answers concise and easy to understand, avoiding overly complex and ambiguous sentences." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 490, + 339, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 490, + 339, + 497 + ], + "spans": [ + { + "bbox": [ + 108, + 490, + 339, + 497 + ], + "type": "text", + "content": "5. The question should describe a complete activity, a function, or general information." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 108, + 497, + 501, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 497, + 501, + 511 + ], + "spans": [ + { + "bbox": [ + 108, + 497, + 501, + 511 + ], + "type": "text", + "content": "6. The answer to the generated question should include at least two objects and explicitly describe the names of the object. Implied other potential objects is strictly prohibited." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 511, + 500, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 511, + 500, + 525 + ], + "spans": [ + { + "bbox": [ + 108, + 511, + 500, + 525 + ], + "type": "text", + "content": "7. Even if the image includes the real people and the brand name, or is not associated with the mentioned information, make sure to still create the question-answer pairs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 525, + 500, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 525, + 500, + 540 + ], + "spans": [ + { + "bbox": [ + 108, + 525, + 500, + 540 + ], + "type": "text", + "content": "8. Avoid using incorrectly formatted object names, such as located at [coordinates] or an object_1 [object_1 [coordinates]]. In other words, use it as it appears in the object information given above." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 540, + 365, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 540, + 365, + 548 + ], + "spans": [ + { + "bbox": [ + 108, + 540, + 365, + 548 + ], + "type": "text", + "content": "9. All generated answers must include the given object information, without changing the format." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 108, + 548, + 500, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 548, + 500, + 562 + ], + "spans": [ + { + "bbox": [ + 108, + 548, + 500, + 562 + ], + "type": "text", + "content": "10. When creating questions, ask only questions about the objects given above without directly mentioning the object name in the question. Please keep in mind that other objects should not dominate the answer." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 108, + 562, + 474, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 562, + 474, + 568 + ], + "spans": [ + { + "bbox": [ + 108, + 562, + 474, + 568 + ], + "type": "text", + "content": "11. If the number of objects given for an image is large enough, create a question so that each round's answer includes different objects.\"" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 103, + 504, + 236 + ], + "blocks": [ + { + "bbox": [ + 108, + 103, + 504, + 236 + ], + "lines": [ + { + "bbox": [ + 108, + 103, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 108, + 103, + 504, + 236 + ], + "type": "image", + "image_path": "9fe2a7877d3f23e47deec1f7c697a3c4f4090e80d25a2f9730e1cac4cb517f93.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 342, + 504, + 377 + ], + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 377 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 377 + ], + "type": "text", + "content": "Figure 10: An example from the object-only MMR test dataset generated through the prompt in Fig. 9. This example includes information of objects. The left and right pictures show the original image and object-level segmentation masks, respectively." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 109, + 237, + 374, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 237, + 374, + 245 + ], + "spans": [ + { + "bbox": [ + 109, + 237, + 374, + 245 + ], + "type": "text", + "content": "Caption: A cozy living room interior with a large mirror on the wall and decorative pillows on furniture." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 109, + 251, + 373, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 251, + 373, + 258 + ], + "spans": [ + { + "bbox": [ + 109, + 251, + 373, + 258 + ], + "type": "text", + "content": "Question1: Where might someone rest their head while sitting on the furniture to gain extra comfort?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 109, + 258, + 495, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 258, + 495, + 273 + ], + "spans": [ + { + "bbox": [ + 109, + 258, + 495, + 273 + ], + "type": "text", + "content": "Anr: a t r comfort while sitting on the furniture." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 109, + 279, + 379, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 279, + 379, + 287 + ], + "spans": [ + { + "bbox": [ + 109, + 279, + 379, + 287 + ], + "type": "text", + "content": "Question2: In what part of the room could someone check their appearance before leaving the house?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 109, + 287, + 438, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 287, + 438, + 294 + ], + "spans": [ + { + "bbox": [ + 109, + 287, + 438, + 294 + ], + "type": "text", + "content": "Answer2: Someone could check their appearance in the mirror_1 [304.9, 35.42, 476.09, 146.49], which is located on the wall." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 109, + 300, + 382, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 300, + 382, + 308 + ], + "spans": [ + { + "bbox": [ + 109, + 300, + 382, + 308 + ], + "type": "text", + "content": "Question3: If a person were to rearrange the decorative cushions, which items would they be handling?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 109, + 308, + 484, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 308, + 484, + 323 + ], + "spans": [ + { + "bbox": [ + 109, + 308, + 484, + 323 + ], + "type": "text", + "content": "Answer3: If a person were to rearrange the decorative cushions, they would be handling pillow_1 [169.86, 180.73, 221.21, 229.7] and pillow_2 [370.81, 175.9, 436.81, 231.85]." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 108, + 432, + 504, + 677 + ], + "blocks": [ + { + "bbox": [ + 108, + 432, + 504, + 677 + ], + "lines": [ + { + "bbox": [ + 108, + 432, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 108, + 432, + 504, + 677 + ], + "type": "image", + "image_path": "d77ec1a7075ac8a0007df97381c66350ab54740ba78c5a9a039a6d9e2c909c36.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 694, + 371, + 708 + ], + "lines": [ + { + "bbox": [ + 239, + 694, + 371, + 708 + ], + "spans": [ + { + "bbox": [ + 239, + 694, + 371, + 708 + ], + "type": "text", + "content": "Figure 11: MMR dataset format" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 143, + 105, + 468, + 188 + ], + "blocks": [ + { + "bbox": [ + 114, + 90, + 495, + 102 + ], + "lines": [ + { + "bbox": [ + 114, + 90, + 495, + 102 + ], + "spans": [ + { + "bbox": [ + 114, + 90, + 495, + 102 + ], + "type": "text", + "content": "Table 7: Comparison of computational complexity on LISA, GSVA, and GLaMM, and " + }, + { + "bbox": [ + 114, + 90, + 495, + 102 + ], + "type": "inline_equation", + "content": "{\\mathrm{M}}^{2}\\mathrm{{SA}}" + }, + { + "bbox": [ + 114, + 90, + 495, + 102 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 143, + 105, + 468, + 188 + ], + "lines": [ + { + "bbox": [ + 143, + 105, + 468, + 188 + ], + "spans": [ + { + "bbox": [ + 143, + 105, + 468, + 188 + ], + "type": "table", + "html": "
MethodsGPU Memory Usage (GB)TFLOPs
LISA-7B (Lai et al., 2023)30.5832.59
GSVA-7B (Xia et al., 2024)30.39203.77
GLaMM (Rasheed et al., 2024)17.14349.28
M2SA-7B30.6032.62
LISA-Llama2-13B (Lai et al., 2023)55.2056.64
M2SA-Llama2-13B55.2356.67
", + "image_path": "3d73614a9c442d59351a50ed39e49728c94da6cb8d0b2dfbb31559ef802203d8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 213, + 203, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 203, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 203, + 224 + ], + "type": "text", + "content": "A.8 TRAINING TIME" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "text", + "content": "The training takes approximately 40 hours for the " + }, + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA - 7B}" + }, + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "text", + "content": " and about 52 hours for the " + }, + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA - }" + }, + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "text", + "content": " Llama2-13B, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 271, + 269, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 271, + 269, + 283 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 269, + 283 + ], + "type": "text", + "content": "A.9 COMPUTATIONAL COMPLEXITY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "text", + "content": "We aim to compare the computational complexity of the proposed " + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "text", + "content": " with LISA, GSVA, and GLaMM. For this comparison, we measure GPU memory usage and TFLOPs. As shown in Tab. 7, while the addition of Early Local Feature Fusion and multiple [SEG] tokens leads to a slight increase in GPU memory usage and TFLOPs, " + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "text", + "content": " demonstrates a significant improvement in handling multiple targets and fine-grained part-level segmentation compared to LISA. However, despite these performance improvements, there is still room for enhancement from the perspective of computational efficiency. Since " + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 293, + 506, + 392 + ], + "type": "text", + "content": " is built upon both MLLM and SAM, it requires substantial memory resources. Future research could focus on optimizing the efficiency of the mask decoder, which predicts the final mask by integrating vision and language information." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 164, + 449, + 447, + 532 + ], + "blocks": [ + { + "bbox": [ + 104, + 411, + 504, + 446 + ], + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 446 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 446 + ], + "type": "text", + "content": "Table 8: Multi-object referring segmentation results on GTAV and Cityscapes validation sets. We adopt mIoU metric for comparison. We evaluate the zero-shot performance of LISA, GSVA, GLaMM, and " + }, + { + "bbox": [ + 104, + 411, + 504, + 446 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 411, + 504, + 446 + ], + "type": "text", + "content": ". The best results are highlighted in bold." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 164, + 449, + 447, + 532 + ], + "lines": [ + { + "bbox": [ + 164, + 449, + 447, + 532 + ], + "spans": [ + { + "bbox": [ + 164, + 449, + 447, + 532 + ], + "type": "table", + "html": "
MethodsGTAV-valCityscapes-val
LISA-7B (Lai et al., 2023)3.76.1
GSVA-7B (Xia et al., 2024)15.714.6
GLaMM (Rasheed et al., 2024)12.612.6
M2SA-7B35.141.3
LISA-Llama2-13B (Lai et al., 2023)2.43.4
M2SA-Llama2-13B38.244.0
", + "image_path": "2e464580e9e1dd1464f9ef076f2348b3b1cdbe0b94c2b7f536ca87733c66910c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 557, + 296, + 568 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 557, + 296, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 296, + 568 + ], + "type": "text", + "content": "A.10 GENERALIZATION ON UNSEEN DATA" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "text", + "content": "To assess " + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "text", + "content": "'s generalization to unseen data, we conduct additional experiments. Although OVPARTS (Wei et al., 2024) was recently proposed for open-vocabulary part-level segmentation using Pascal-Part (Chen et al., 2014) and ADE20K (Zhou et al., 2019), both datasets were used during training. Therefore, we evaluate the model's generalization performance using semantic segmentation datasets from driving scenes, specifically Cityscapes (Cordts et al., 2016) and GTAV (Richter et al., 2016), which were not used during training and pose a more challenging test environment. Since these datasets lack part-level mask annotations, we focus on evaluating multi-target object cases. Furthermore, we curate custom text prompts using predefined category names as they do not provide corresponding text queries. For each query, we randomly select 4 to 6 object categories from an image and create prompts such as \"Can you segment the class 1, class 2, ..., and class n?\" The model generates masks for the specified objects, and we compute the mIoU score to compare its performance with LISA. As shown in Tab. 8, " + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "text", + "content": " performs robustly even on datasets from entirely different domains. Notably, while the existing methods struggle with multi-target cases, " + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "text", + "content": " handles them effectively. This demonstrates that the use of multiple [SEG] tokens, combined" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 217, + 114, + 394, + 152 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 111 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 111 + ], + "type": "text", + "content": "Table 9: Comparison between LISA-7B (Lai et al., 2023) trained on MMR dataset and LISA-7B trained on ReasonSeg (Lai et al., 2023). We measure the performance on ReasonSeg validation set" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 217, + 114, + 394, + 152 + ], + "lines": [ + { + "bbox": [ + 217, + 114, + 394, + 152 + ], + "spans": [ + { + "bbox": [ + 217, + 114, + 394, + 152 + ], + "type": "table", + "html": "
MethodsgIoUcIoU
LISA-7B w/ ReasonSeg44.446.0
LISA-7B w/ MMR49.955.6
", + "image_path": "07decf95af415a3da631f490c7ade3a9745b2bb0391c6ea66e340caaa63f3043.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": "with early local feature fusion, enables " + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": " to generalize well to unseen domains by improving its ability to manage multi-target cases and fine-grained segmentation tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 213, + 243, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 243, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 243, + 224 + ], + "type": "text", + "content": "A.11 MMR AND REASONSEG" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "text", + "content": "To validate the comprehensiveness and effectiveness of the MMR dataset, we conduct a comparative evaluation with ReasonSeg using the LISA-7B model. Specifically, we train the model in two configurations: one using ReasonSeg and the other using MMR instead of ReasonSeg. As shown in Tab. 9, the model trained on MMR shows superior performance on the ReasonSeg validation set than the model trained on ReasonSeg. This improvement highlights the comprehensiveness of the MMR dataset. By incorporating multi-target and part-level annotations alongside object-level data, MMR provides a more robust knowledge for addressing complex reasoning segmentation tasks." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 167, + 378, + 446, + 426 + ], + "blocks": [ + { + "bbox": [ + 104, + 331, + 504, + 375 + ], + "lines": [ + { + "bbox": [ + 104, + 331, + 504, + 375 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 504, + 375 + ], + "type": "text", + "content": "Table 10: Performance of M2SA on frequently appearing and infrequently appearing object categories. From the total of 75 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 167, + 378, + 446, + 426 + ], + "lines": [ + { + "bbox": [ + 167, + 378, + 446, + 426 + ], + "spans": [ + { + "bbox": [ + 167, + 378, + 446, + 426 + ], + "type": "table", + "html": "
MethodsMMR test
Obj-only (total)Obj-only (upper)Obj-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B41.055.641.054.839.439.7
", + "image_path": "1c6932d80c07652a78d886872a4dd1afc5ca8809e1a16475f4ada3ac643a9465.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 167, + 503, + 446, + 550 + ], + "blocks": [ + { + "bbox": [ + 104, + 455, + 504, + 500 + ], + "lines": [ + { + "bbox": [ + 104, + 455, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 504, + 500 + ], + "type": "text", + "content": "Table 11: Performance of M2SA on frequently appearing and infrequently appearing part categories. From the total of 445 categories, question-answer pairs containing the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories are extracted to construct the upper and lower subsets, respectively." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 167, + 503, + 446, + 550 + ], + "lines": [ + { + "bbox": [ + 167, + 503, + 446, + 550 + ], + "spans": [ + { + "bbox": [ + 167, + 503, + 446, + 550 + ], + "type": "table", + "html": "
MethodsMMR test
Part-only (total)Part-only (upper)Part-only (lower)
gIoUcIoUgIoUcIoUgIoUcIoU
M2SA-7B13.527.012.824.813.328.1
", + "image_path": "3216553d07de328f5daf032f72cf7a31f3d843795076dd6ec93c97a4adca5f81.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 573, + 375, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 573, + 375, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 375, + 585 + ], + "type": "text", + "content": "A.12 ANALYSIS OF THE LONG-TAIL PHENOMENON IN M2SA" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 594, + 505, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 505, + 682 + ], + "type": "text", + "content": "To investigate whether " + }, + { + "bbox": [ + 104, + 594, + 505, + 682 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 594, + 505, + 682 + ], + "type": "text", + "content": " trained on the MMR dataset exhibits a long-tail phenomenon, we evaluate its performance on frequently and infrequently occurring object and part categories. To this end, we construct subsets of the MMR test set by isolating question-answer pairs based on category frequency. Specifically, we extract the top 10 most frequent (upper) and bottom 10 least frequent (lower) categories for both object-only and part-only test sets. This results in four subsets: object-only (upper: 10/75), object-only (lower: 10/75), part-only (upper: 10/445), and part-only (lower: 10/445). The MMR dataset includes a total of 75 object categories and 445 part categories, respectively. The performance comparison is shown in Tab. 10 and Tab. 11." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "For the object-only dataset, " + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "'s performance on frequently occurring (upper) object categories closely aligns with its overall performance across all object categories (gIoU: 41.0, cIoU: 54.8 vs. gIoU: 41.0, cIoU: 55.6). However, for infrequent object categories (lower), the performance declines, with cIoU dropping from 55.6 to 39.7 and gIoU from 41.0 to 39.4. In contrast, for the" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "part-only dataset, " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " demonstrates consistent performance across both frequent and infrequent categories. The gIoU scores are 12.8 (upper), 13.3 (lower), and 13.5 (overall), while the cIoU scores are 24.8 (upper), 28.1 (lower), and 27.0 (overall). These findings suggest that " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " is less sensitive to the long-tail distribution in part categories than in object categories." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "content": "This analysis highlights the strengths and limitations of " + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "content": " when addressing long-tail distributions. While " + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "content": " demonstrates robust performance across frequent and infrequent part categories, its reduced performance on infrequent object categories indicates potential areas for improvement. Future work could explore strategies to mitigate the impact of long-tail distributions in object categories while preserving its strengths in part-level reasoning tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 201, + 240, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 240, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 240, + 213 + ], + "type": "text", + "content": "A.13 QUALITATIVE RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 221, + 504, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 504, + 234 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 504, + 234 + ], + "type": "text", + "content": "Qualitative results of " + }, + { + "bbox": [ + 104, + 221, + 504, + 234 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 104, + 221, + 504, + 234 + ], + "type": "text", + "content": " on the MMR benchmark are visualized in Fig. 12, Fig. 13, and Fig. 14." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 112, + 248, + 499, + 412 + ], + "blocks": [ + { + "bbox": [ + 112, + 248, + 499, + 412 + ], + "lines": [ + { + "bbox": [ + 112, + 248, + 499, + 412 + ], + "spans": [ + { + "bbox": [ + 112, + 248, + 499, + 412 + ], + "type": "image", + "image_path": "780bb26b58d04d0ce06307e4a065a037cc9ae0f76ca4642cc1999d365355bba6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 540, + 419, + 552 + ], + "lines": [ + { + "bbox": [ + 190, + 540, + 419, + 552 + ], + "spans": [ + { + "bbox": [ + 190, + 540, + 419, + 552 + ], + "type": "text", + "content": "Figure 12: Qualitative result of " + }, + { + "bbox": [ + 190, + 540, + 419, + 552 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 190, + 540, + 419, + 552 + ], + "type": "text", + "content": " on MMR test set." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 115, + 423, + 148, + 456 + ], + "blocks": [ + { + "bbox": [ + 115, + 423, + 148, + 456 + ], + "lines": [ + { + "bbox": [ + 115, + 423, + 148, + 456 + ], + "spans": [ + { + "bbox": [ + 115, + 423, + 148, + 456 + ], + "type": "image", + "image_path": "7b400df9ac2094828eb2b7cbbbccba0f1161001ec9e031962516e85228822e7f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 153, + 426, + 440, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 426, + 440, + 456 + ], + "spans": [ + { + "bbox": [ + 153, + 426, + 440, + 456 + ], + "type": "text", + "content": "Question: If someone wants to send an e-mail, which equipments on the desk would they be utilizing?" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 120, + 472, + 148, + 506 + ], + "blocks": [ + { + "bbox": [ + 120, + 472, + 148, + 506 + ], + "lines": [ + { + "bbox": [ + 120, + 472, + 148, + 506 + ], + "spans": [ + { + "bbox": [ + 120, + 472, + 148, + 506 + ], + "type": "image", + "image_path": "f87b1455be084fe49fafc094a3ab25add745daccf0b46690442fe1cde225d8f2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 156, + 474, + 481, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 474, + 481, + 520 + ], + "spans": [ + { + "bbox": [ + 156, + 474, + 481, + 520 + ], + "type": "text", + "content": "Answer: They would be utilizing the laptop_computer for typing and viewing the screen, and the mouse for navigating and interacting with the computer." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 111, + 499, + 258 + ], + "blocks": [ + { + "bbox": [ + 188, + 92, + 231, + 108 + ], + "lines": [ + { + "bbox": [ + 188, + 92, + 231, + 108 + ], + "spans": [ + { + "bbox": [ + 188, + 92, + 231, + 108 + ], + "type": "text", + "content": "Image" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 112, + 111, + 499, + 258 + ], + "lines": [ + { + "bbox": [ + 112, + 111, + 499, + 258 + ], + "spans": [ + { + "bbox": [ + 112, + 111, + 499, + 258 + ], + "type": "image", + "image_path": "51f912496d4858a857465483c94ca2327c9526cb16cccd5e558cb4d25ef277b2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 118, + 268, + 147, + 300 + ], + "blocks": [ + { + "bbox": [ + 118, + 268, + 147, + 300 + ], + "lines": [ + { + "bbox": [ + 118, + 268, + 147, + 300 + ], + "spans": [ + { + "bbox": [ + 118, + 268, + 147, + 300 + ], + "type": "image", + "image_path": "73ce8ea7b46495d2a5ef432c1dd123d83b993d3b8e847c67c8b4cf55e1719b32.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 383, + 419, + 396 + ], + "lines": [ + { + "bbox": [ + 190, + 383, + 419, + 396 + ], + "spans": [ + { + "bbox": [ + 190, + 383, + 419, + 396 + ], + "type": "text", + "content": "Figure 13: Qualitative result of " + }, + { + "bbox": [ + 190, + 383, + 419, + 396 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathbf{SA}" + }, + { + "bbox": [ + 190, + 383, + 419, + 396 + ], + "type": "text", + "content": " on MMR test set." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 153, + 269, + 493, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 269, + 493, + 300 + ], + "spans": [ + { + "bbox": [ + 153, + 269, + 493, + 300 + ], + "type": "text", + "content": "Question: Where could someone sit while waiting for transportation, and which part provides support for their back?" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 120, + 316, + 147, + 350 + ], + "blocks": [ + { + "bbox": [ + 120, + 316, + 147, + 350 + ], + "lines": [ + { + "bbox": [ + 120, + 316, + 147, + 350 + ], + "spans": [ + { + "bbox": [ + 120, + 316, + 147, + 350 + ], + "type": "image", + "image_path": "d836da9c1793237c660f8e73f4567737cbd798e73a218a7c0fe2eef6f9998711.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 156, + 316, + 465, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 316, + 465, + 348 + ], + "spans": [ + { + "bbox": [ + 156, + 316, + 465, + 348 + ], + "type": "text", + "content": "Answer: The bench's seat provides a place to sit, and the bench's back offers support for the back" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 112, + 441, + 499, + 588 + ], + "blocks": [ + { + "bbox": [ + 188, + 420, + 230, + 436 + ], + "lines": [ + { + "bbox": [ + 188, + 420, + 230, + 436 + ], + "spans": [ + { + "bbox": [ + 188, + 420, + 230, + 436 + ], + "type": "text", + "content": "Image" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 112, + 441, + 499, + 588 + ], + "lines": [ + { + "bbox": [ + 112, + 441, + 499, + 588 + ], + "spans": [ + { + "bbox": [ + 112, + 441, + 499, + 588 + ], + "type": "image", + "image_path": "9bd09c601ae16a1d155426840c2c9f0a133d5256b0e47e2df2f60bdb65dae879.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 118, + 596, + 147, + 628 + ], + "blocks": [ + { + "bbox": [ + 118, + 596, + 147, + 628 + ], + "lines": [ + { + "bbox": [ + 118, + 596, + 147, + 628 + ], + "spans": [ + { + "bbox": [ + 118, + 596, + 147, + 628 + ], + "type": "image", + "image_path": "58908b471a9e25e9ab603d4d78941437acd61ebd7534794de643b0305ce333e3.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 711, + 419, + 723 + ], + "lines": [ + { + "bbox": [ + 190, + 711, + 419, + 723 + ], + "spans": [ + { + "bbox": [ + 190, + 711, + 419, + 723 + ], + "type": "text", + "content": "Figure 14: Qualitative result of " + }, + { + "bbox": [ + 190, + 711, + 419, + 723 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^2\\mathrm{SA}" + }, + { + "bbox": [ + 190, + 711, + 419, + 723 + ], + "type": "text", + "content": " on MMR test set." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 153, + 597, + 479, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 597, + 479, + 628 + ], + "spans": [ + { + "bbox": [ + 153, + 597, + 479, + 628 + ], + "type": "text", + "content": "Question: If I need to check how much time is left before my meal is ready, which part of this appliance should I look at?" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 121, + 643, + 147, + 678 + ], + "blocks": [ + { + "bbox": [ + 121, + 643, + 147, + 678 + ], + "lines": [ + { + "bbox": [ + 121, + 643, + 147, + 678 + ], + "spans": [ + { + "bbox": [ + 121, + 643, + 147, + 678 + ], + "type": "image", + "image_path": "1b1e851165818a2027e98d3a46c169232b0f133a88469a1ed9ccbad48b6bfcef.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 156, + 645, + 428, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 645, + 428, + 676 + ], + "spans": [ + { + "bbox": [ + 156, + 645, + 428, + 676 + ], + "type": "text", + "content": "Answer: You should look at the microwave_oven's time_display to check the remaining time." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 288, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 288, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 288, + 94 + ], + "type": "text", + "content": "A.14 ADDITIONAL EXAMPLES OF MMR" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 126 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 126 + ], + "type": "text", + "content": "To facilitate a quick and intuitive understanding of the MMR dataset's characteristics, we present additional examples in Figure 15." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 187, + 144, + 421, + 232 + ], + "blocks": [ + { + "bbox": [ + 187, + 144, + 421, + 232 + ], + "lines": [ + { + "bbox": [ + 187, + 144, + 421, + 232 + ], + "spans": [ + { + "bbox": [ + 187, + 144, + 421, + 232 + ], + "type": "image", + "image_path": "e2251577118d3f5db074751bf55d95dcfbb091ce7441c2b75eb649453c5695a9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 233, + 355, + 240 + ], + "lines": [ + { + "bbox": [ + 143, + 233, + 355, + 240 + ], + "spans": [ + { + "bbox": [ + 143, + 233, + 355, + 240 + ], + "type": "text", + "content": "Global Caption: A laptop is opened and set on a table next to a computer mouse, suggesting a typical workspace setup." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 143, + 243, + 345, + 250 + ], + "lines": [ + { + "bbox": [ + 143, + 243, + 345, + 250 + ], + "spans": [ + { + "bbox": [ + 143, + 243, + 345, + 250 + ], + "type": "text", + "content": "Question1: If one were to begin typing a document, which two areas of this device would they interact with first?" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 143, + 250, + 457, + 260 + ], + "lines": [ + { + "bbox": [ + 143, + 250, + 457, + 260 + ], + "spans": [ + { + "bbox": [ + 143, + 250, + 457, + 260 + ], + "type": "text", + "content": "Answer: They would primarily interact with the laptop_computer_1's keyboard [195, 276, 418, 325] to type and laptop_computer_1's touchpad [113, 290, 231, 312] to navigate within the document." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 143, + 264, + 304, + 270 + ], + "lines": [ + { + "bbox": [ + 143, + 264, + 304, + 270 + ], + "spans": [ + { + "bbox": [ + 143, + 264, + 304, + 270 + ], + "type": "text", + "content": "Question2: Where can one find the manufacturer's branding on the devices pictured here?" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 143, + 270, + 462, + 277 + ], + "lines": [ + { + "bbox": [ + 143, + 270, + 462, + 277 + ], + "spans": [ + { + "bbox": [ + 143, + 270, + 462, + 277 + ], + "type": "text", + "content": "Answer2: The manufacturer's branding can be found on the laptop_computer_1's logo [354, 281, 370, 288] and on the mouse_(computer_equipment)_1's logo [314, 403, 345, 416]." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 187, + 330, + 421, + 418 + ], + "blocks": [ + { + "bbox": [ + 143, + 280, + 375, + 286 + ], + "lines": [ + { + "bbox": [ + 143, + 280, + 375, + 286 + ], + "spans": [ + { + "bbox": [ + 143, + 280, + 375, + 286 + ], + "type": "text", + "content": "Question3: To move the cursor on the screen without touching the laptop, which part of the computer equipment would one use?" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 143, + 286, + 465, + 297 + ], + "lines": [ + { + "bbox": [ + 143, + 286, + 465, + 297 + ], + "spans": [ + { + "bbox": [ + 143, + 286, + 465, + 297 + ], + "type": "text", + "content": "Answer3: One would use the mouse (.computer_equipment) 1's body [260, 379, 516, 477] along with either the mouse (.computer_equipment) 1's left button [413, 380, 480, 401] or mouse (.computer_equipment) 1's right button [451, 393, 519, 429] to click and interact with the cursor on the screen." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 143, + 300, + 344, + 307 + ], + "lines": [ + { + "bbox": [ + 143, + 300, + 344, + 307 + ], + "spans": [ + { + "bbox": [ + 143, + 300, + 344, + 307 + ], + "type": "text", + "content": "Question4: After finishing work and deciding to pack up, which two parts of the laptop would come into contact?" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 143, + 307, + 440, + 312 + ], + "lines": [ + { + "bbox": [ + 143, + 307, + 440, + 312 + ], + "spans": [ + { + "bbox": [ + 143, + 307, + 440, + 312 + ], + "type": "text", + "content": "Answer4: When closing the laptop, laptop_computer_1's screen [295, 34, 510, 305] would come into contact with laptop_computer_1's base-panel [77, 271, 479, 352]." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 187, + 330, + 421, + 418 + ], + "lines": [ + { + "bbox": [ + 187, + 330, + 421, + 418 + ], + "spans": [ + { + "bbox": [ + 187, + 330, + 421, + 418 + ], + "type": "image", + "image_path": "158ac0b1214dc77ea85da060158af18dc79beb3e86b9df0e817e0f798e34d4dd.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 419, + 364, + 426 + ], + "lines": [ + { + "bbox": [ + 146, + 419, + 364, + 426 + ], + "spans": [ + { + "bbox": [ + 146, + 419, + 364, + 426 + ], + "type": "text", + "content": "Global Caption: A plate with a slice of quiche and a side of home fries is ready to be eaten, with a knife resting on the side." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 146, + 430, + 361, + 436 + ], + "lines": [ + { + "bbox": [ + 146, + 430, + 361, + 436 + ], + "spans": [ + { + "bbox": [ + 146, + 430, + 361, + 436 + ], + "type": "text", + "content": "Question1: During a meal, what would you typically use to cut a portion of food and how is it structured for ease of use?" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 146, + 436, + 461, + 447 + ], + "lines": [ + { + "bbox": [ + 146, + 436, + 461, + 447 + ], + "spans": [ + { + "bbox": [ + 146, + 436, + 461, + 447 + ], + "type": "text", + "content": "Answer 1: You would typically use a knife_1 [10.27, 86.49, 258.23, 115.61] to cut a portion of food. It is structured with a knife_1's blade [10, 92, 150, 115] for slicing through food and a knife_1's handle [150, 87, 254, 109] to provide a comfortable grip for handling." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 146, + 450, + 370, + 456 + ], + "lines": [ + { + "bbox": [ + 146, + 450, + 370, + 456 + ], + "spans": [ + { + "bbox": [ + 146, + 450, + 370, + 456 + ], + "type": "text", + "content": "Question2: If I wanted to contain a main dish and sides separately on a table, what items could effectively serve this purpose?" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 203, + 712, + 406, + 724 + ], + "lines": [ + { + "bbox": [ + 203, + 712, + 406, + 724 + ], + "spans": [ + { + "bbox": [ + 203, + 712, + 406, + 724 + ], + "type": "text", + "content": "Figure 15: Additional Examples of MMR dataset." + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 175, + 508, + 436, + 597 + ], + "blocks": [ + { + "bbox": [ + 146, + 456, + 460, + 472 + ], + "lines": [ + { + "bbox": [ + 146, + 456, + 460, + 472 + ], + "spans": [ + { + "bbox": [ + 146, + 456, + 460, + 472 + ], + "type": "text", + "content": "Answer2: To contain a main dish and sides separately, you could use plate_1[33.38, 74.25, 640.0, 480.0], which has a plate_1's inner_wall [33, 75, 639, 479] to hold the food and prevent it from spilling, and a separate plate_2[0.0, 23.86, 145.25, 200.7] with plate_2's inner_wall [0, 28, 141, 190] and plate_2's rim [0, 24, 145, 201] to hold another portion, like sides or appetizers." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 146, + 477, + 324, + 483 + ], + "lines": [ + { + "bbox": [ + 146, + 477, + 324, + 483 + ], + "spans": [ + { + "bbox": [ + 146, + 477, + 324, + 483 + ], + "type": "text", + "content": "Question3: What part of the tableware should one be cautious of to avoid spills while serving food?" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 146, + 483, + 461, + 493 + ], + "lines": [ + { + "bbox": [ + 146, + 483, + 461, + 493 + ], + "spans": [ + { + "bbox": [ + 146, + 483, + 461, + 493 + ], + "type": "text", + "content": "Answer: 3: One should be cautious of the plate_1's inner_wall [33, 75, 639, 479] of a plate_1 [33, 38, 74, 25, 640, 480,0] and the plate_2's inner_wall [0, 28, 141, 190] of a plate_2 [0.0, 23.86, 145, 25, 200.7] to avoid spillps, as these parts help to contain the food within the boundaries of the plates." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 175, + 508, + 436, + 597 + ], + "lines": [ + { + "bbox": [ + 175, + 508, + 436, + 597 + ], + "spans": [ + { + "bbox": [ + 175, + 508, + 436, + 597 + ], + "type": "image", + "image_path": "12341a737e9db676387e93334d996b5f288beff9471119476b53f4bc40c8980c.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 148, + 597, + 252, + 604 + ], + "lines": [ + { + "bbox": [ + 148, + 597, + 252, + 604 + ], + "spans": [ + { + "bbox": [ + 148, + 597, + 252, + 604 + ], + "type": "text", + "content": "Global Caption: A dog wearing a hat is resting on a pillow." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 609, + 347, + 614 + ], + "lines": [ + { + "bbox": [ + 148, + 609, + 347, + 614 + ], + "spans": [ + { + "bbox": [ + 148, + 609, + 347, + 614 + ], + "type": "text", + "content": "Question1: Where would this animal most likely register scents and how would it express alertness or curiosity?" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 615, + 459, + 624 + ], + "lines": [ + { + "bbox": [ + 148, + 615, + 459, + 624 + ], + "spans": [ + { + "bbox": [ + 148, + 615, + 459, + 624 + ], + "type": "text", + "content": "Answer: This animal would most likely register scents using its dog_1's nose [175, 206, 221, 246], and express alertness or curiosity by adjusting the position of its dog_1's ear [329, 101, 398, 212] and dog_1's head [175, 92, 397, 280]." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 629, + 294, + 635 + ], + "lines": [ + { + "bbox": [ + 148, + 629, + 294, + 635 + ], + "spans": [ + { + "bbox": [ + 148, + 629, + 294, + 635 + ], + "type": "text", + "content": "Question2: Can you describe the area that supports the dog while it's lying down?" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 635, + 459, + 646 + ], + "lines": [ + { + "bbox": [ + 148, + 635, + 459, + 646 + ], + "spans": [ + { + "bbox": [ + 148, + 635, + 459, + 646 + ], + "type": "text", + "content": "Answer 2: The area that supports the dog while it's lying down is [218, 202, 514, 374], particularly emphasized where the dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] make contact with the pillow_1 [5.32, 268.85, 632.27, 427.0]." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 650, + 347, + 655 + ], + "lines": [ + { + "bbox": [ + 148, + 650, + 347, + 655 + ], + "spans": [ + { + "bbox": [ + 148, + 650, + 347, + 655 + ], + "type": "text", + "content": "Question3: When this canine looks around its environment, what are the primary features involved in its vision?" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 655, + 459, + 666 + ], + "lines": [ + { + "bbox": [ + 148, + 655, + 459, + 666 + ], + "spans": [ + { + "bbox": [ + 148, + 655, + 459, + 666 + ], + "type": "text", + "content": "Answer3: The primary features involved in this canine's vision when looking around its environment are the dog 1's eye [201, 145, 294, 177] and the movement of the dog 1's head [175, 92, 397, 280]." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 671, + 361, + 677 + ], + "lines": [ + { + "bbox": [ + 148, + 671, + 361, + 677 + ], + "spans": [ + { + "bbox": [ + 148, + 671, + 361, + 677 + ], + "type": "text", + "content": "Question4: In the case of this dog getting up from its resting position, which parts would engage initially to lift its body?" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 148, + 677, + 461, + 687 + ], + "lines": [ + { + "bbox": [ + 148, + 677, + 461, + 687 + ], + "spans": [ + { + "bbox": [ + 148, + 677, + 461, + 687 + ], + "type": "text", + "content": "Answer: To get up from its resting position, the dog would initially engage its dog_1's leg [174, 326, 520, 397] and dog_1's foot [146, 373, 331, 426] to lift its dog_1's body [218, 202, 514, 374]." + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_content_list.json b/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..575e5c331830a2c13d5392279aee27cf2021f9b0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_content_list.json @@ -0,0 +1,3816 @@ +[ + { + "type": "text", + "text": "Tensor-decomposition-based A Priori Surrogate (TAPS) modeling for ultra large-scale simulations", + "text_level": 1, + "bbox": [ + 134, + 126, + 867, + 170 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiachen Guo $^{a}$ , Gino Domel $^{b}$ , Chanwook Park $^{b}$ , Hantao Zhang $^{a}$ , Ozgur Can Gumus $^{b}$ , Ye Lu $^{c}$ , Gregory J. Wagner $^{b}$ , Dong Qian $^{d,e}$ , Jian Cao $^{b}$ , Thomas J.R. Hughes $^{f}$ , Wing Kam Liu $^{b,e}$", + "bbox": [ + 115, + 187, + 880, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "aTheoretical and Applied Mechanics Program, Northwestern University, 2145 Sheridan Road, Evanston, 60201, IL, USA", + "bbox": [ + 168, + 227, + 823, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{b}$ Department of Mechanical Engineering, Northwestern University, 2145 Sheridan Road, Evanston, IL, USA", + "bbox": [ + 184, + 239, + 811, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{c}$ Department of Mechanical Engineering, University of Maryland, Baltimore County, 1000 Hilltop Circle, Baltimore, 21250, MD, USA", + "bbox": [ + 136, + 250, + 860, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{d}$ Department of Mechanical Engineering, University of Texas, Dallas, 800 W. Campbell Road, Richardson, 75080, TX, USA", + "bbox": [ + 164, + 261, + 831, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "${}^{e}$ Co-Founders of HIDENN-AI,LLC,1801 Maple Ave,Evanston,60201,IL,USA", + "bbox": [ + 280, + 273, + 714, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "fInstitute for Computational Engineering and Sciences, The University of Texas at Austin, 201 East 24th Street, Stop", + "bbox": [ + 184, + 284, + 811, + 294 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "C0200, Austin, 78712, TX, USA", + "bbox": [ + 411, + 294, + 584, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 102, + 360, + 171, + 373 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A data-free, predictive scientific AI model, Tensor-decomposition-based A Priori Surrogate (TAPS), is proposed for tackling ultra large-scale engineering simulations with significant speedup, memory savings, and storage gain. TAPS can effectively obtain surrogate models for high-dimensional parametric problems with equivalent zetta-scale $(10^{21})$ degrees of freedom (DoFs). TAPS achieves this by directly obtaining reduced-order models through solving governing equations with multiple independent variables such as spatial coordinates, parameters, and time. The paper first introduces an AI-enhanced finite element-type interpolation function called convolution hierarchical deep-learning neural network (C-HiDeNN) with tensor decomposition (TD). Subsequently, the generalized space-parameter-time Galerkin weak form and the corresponding matrix form are derived. Through the choice of TAPS hyperparameters, an arbitrary convergence rate can be achieved. To show the capabilities of this framework, TAPS is then used to simulate a large-scale additive manufacturing process as an example and achieves around 1,370x speedup, 14.8x memory savings, and 955x storage gain compared to the finite difference method with 3.46 billion spatial degrees of freedom (DoFs). As a result, the TAPS framework opens a new avenue for many challenging ultra large-scale engineering problems, such as additive manufacturing and integrated circuit design, among others.", + "bbox": [ + 100, + 380, + 895, + 567 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords:", + "text_level": 1, + "bbox": [ + 102, + 574, + 178, + 587 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Predictive scientific AI, hierarchical neural network finite element interpolation, generalized Galerkin formulation for parametric PDEs, large-scale simulation, additive manufacturing", + "bbox": [ + 102, + 588, + 873, + 617 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 102, + 659, + 221, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Precision is a fundamental aspect of scientific and engineering applications, especially in advanced industries such as semiconductor manufacturing. The capability to perform accurate computational simulations for these applications is essential for advancing these fields. Precise simulations enable the optimization of design and manufacturing processes by utilizing virtual prototypes and process simulations. This reduces the need for expensive physical prototypes and tests and provides virtual prototypes in circumstances where physical ones are impractical. Traditional computational methods for engineering simulations, however, suffer from prohibitive computational costs when attempting to accurately predict responses across multiple length and time scales (typically done by increasing mesh resolution), making achieving high precision for large-scale problems challenging. In fact, the random-access memory (RAM) requirement can be far beyond the capability of typical workstations and may require massive parallelization on supercomputers. In other industries, such as additive manufacturing (a term encompassing all forms of 3D printing), the vast design space further exacerbates these limitations, as numerous expensive simulations are required to thoroughly explore the effects of different design parameters.", + "bbox": [ + 100, + 684, + 895, + 857 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13933v1 [cs.CE] 18 Mar 2025", + "bbox": [ + 21, + 305, + 60, + 722 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint submitted to Elsevier", + "bbox": [ + 102, + 879, + 270, + 891 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "March 19, 2025", + "bbox": [ + 801, + 879, + 892, + 890 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/fcdf1794fb5d67c5bdbc473ff1b75e5a1a997c2b94024e8b3ffb6d00ac66f12a.jpg", + "table_caption": [ + "Table 1: Nomenclature" + ], + "table_footnote": [], + "table_body": "
VariablesDescription
uh(x)Interpolated scalar field defined inside of an element
AeNodes within element e
AsNodes within patch domain of element e
WiS,a,p,j(x)Convolution patch function at node j for i-th nodal patch with hyperparameters s, a, and p
MTotal number of modes in tensor decomposition (TD)
mIndex for mode
DTotal number of dimensions
dIndex for dimension
xIndependent variable which includes spatial variable xs, parametric variable xp and temporal variable xt
Nd(xd;ad,sd,pd)Global C-HiDeNN shape function for dimension d with dilation parameter ad, patch size sd and reproducing polynomial order pd
bSource function in laser powder bed fusion process
uTDApproximation of the solution field expressed via TD
TTime slab index for space-parameter-time problem
kThermal conductivity
ρMaterial density
cpHeat capacity
ηMaterial absorptivity
PLaser power
rStandard deviation that characterizes the width of the heat source
qHeat flux
qconvHeat flux from convection
qradHeat flux from radiation
qevapHeat flux from evaporative cooling
hconvConvection coefficient
σSBStefan-Boltzman constant
mevapMass evaporation flux
LevapHeat of evaporation
BndShape function derivative
UdSolution matrix (Rnd×M) for dimension d that contains all the modes
", + "bbox": [ + 105, + 149, + 892, + 623 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To fulfill the ever-growing challenges in predictive scientific models, data-driven surrogates, especially artificial intelligence (AI)-based models, present an alternative to conventional numerical models by significantly reducing the forward prediction time. These models can be treated as a reasonably accurate, reduced representation of real physics. Once trained properly, they can be used for fast prediction on unseen parameters [1, 2]. However, it is still uncertain whether a data-driven surrogate model can be trained to achieve the level of accuracy required in engineering design. Recently, it has been pointed out by Wolfram Research that standard AI models cannot easily fulfill the high accuracy requirement of predictive scientific tasks [3]. Furthermore, as suggested by Google Deepmind, the real potential of AI models lies in enhancing, rather than thoroughly replacing, well-established classical numerical algorithms [4]. In addition, the current standard data-driven approaches follow an offline-online scheme, where the offline stage involves a huge amount of training data, which can again be prohibitive. For problems with known physics, this data can be obtained by running multiple expensive simulations relying on standard numerical algorithms. In scenarios involving high-dimensional design spaces governed by parameterized partial differential equations (PDEs), such as in additive manufacturing (AM), conducting repetitive simulations with varying parameters in this offline stage becomes exceedingly expensive both in terms of computation time and data storage.", + "bbox": [ + 100, + 646, + 892, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To avoid the prohibitive offline stage, one can try to obtain a surrogate model directly from governing equations", + "bbox": [ + 127, + 846, + 892, + 860 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 879, + 504, + 889 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ba32edcab434e8ad5e0767139009d16908e9de00ed4f1dfddd99493d69460436.jpg", + "image_caption": [ + "Figure 1: The parameterized PDE is a PDE that includes parameters $\\mathbf{x}_p$ that can vary and influence the solution $\\mathbf{u}(\\mathbf{x}_s, \\mathbf{x}_p, x_t)$ , where $\\mathbf{x}_s$ and $x_t$ are the spatial and time variables, respectively. The a priori approach directly finds a surrogate model from the governing parameterized PDE, whereas the data-driven approach has to solve the parameter-fixed PDE on sampled parameters to generate simulation data, followed by training tasks. FEM: Finite Element Method [5], C-HiDeNN: Convolution Hierarchical Deep-learning Neural Network [6], TAPS: Tensor-decomposition-based A Priori Surrogate, PINN: Physics Informed Neural Network [7]." + ], + "image_footnote": [], + "bbox": [ + 188, + 130, + 816, + 244 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "without generating any data. As shown in Fig. 1 denoted by the words \"A Priori\", this approach aims to find the surrogate model before actually \"seeing\" any data. For example, multilayer perceptron (MLP) architectures have been vastly used in physics-informed neural networks (PINNs) and their variations to approximate solutions to PDEs without requiring data [7, 8]. However, the results of these efforts are underwhelming, as it has been shown that PINN results have often been compared to weak baselines [9], and it is unclear if they guarantee convergence. Moreover, this method is still susceptible to high computational costs for both large-scale and high-dimensional problems [10].", + "bbox": [ + 100, + 341, + 892, + 428 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/436e24e7870b3692253c8bb03616ef37d9e6600fc07fc58cdc25ddba38c6aade.jpg", + "image_caption": [ + "Figure 2: Development history of INN [11]. Figures are borrowed from references: HiDeNN [12], HiDeNN-TD [13], C-HiDeNN [14], C-HiDeNN-TD [6]." + ], + "image_footnote": [], + "bbox": [ + 110, + 437, + 890, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instead of developing solvers solely based on machine learning concepts, a new class of Hierarchical Deep-learning Neural Networks (HiDeNN) has been developed recently. This network architecture incorporates principles from the finite element method (FEM) to construct their architecture [15, 12]. Originally designed to advance FEM as opposed to solve parameterized PDEs, this approach significantly enhances computational accuracy and efficiency for both linear and nonlinear problems compared to standard FEM [16]. HiDeNN was then enhanced by adding an additional hidden layer in the form of a nonlinear convolutional filter, formulating a new neural network architecture named Convolutional HiDeNN (C-HiDeNN) [6, 14]. C-HiDeNN mimics the structure of the generalized finite element method but leverages machine learning to optimize its hyperparameters to further improve accuracy and efficiency. Arbitrary orders of convergence have been observed for C-HiDeNN despite utilizing a linear finite element mesh [14]. Although these methods offers greater accuracy with fewer DoFs, like FEM, they still encounter computational challenges such as balancing memory usage against mesh resolution, which limits their efficiency in modeling ultra large-scale and high-dimensional problems. Therefore, it becomes necessary to employ model order reduction", + "bbox": [ + 100, + 690, + 894, + 860 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 492, + 879, + 504, + 889 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "techniques to address these limitations.", + "bbox": [ + 102, + 131, + 371, + 145 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Model order reduction techniques have been widely used to tackle the ever-growing challenges from high-dimensional and large-scale problems. For example, proper generalized decomposition (PGD) [17, 18, 19] has been proposed to efficiently solve high-dimensional PDEs. Recently, tensor decomposition (TD) has been successfully leveraged within the HiDeNN framework. For example, Zhang showed that HiDeNN combined with TD (HiDeNN-TD) significantly improved the speed of HiDeNN while maintaining higher accuracy [13]. Li proposed C-HiDeNN combined with TD (C-HiDeNN-TD) for extremely large-scale nested topology optimization problems [20]. Recently, Park generalized the HiDeNN-family networks under the umbrella of Interpolating Neural Networks (INNs) and demonstrated that the network can be used for both data-driven learning and data-free (i.e., a priori) solving [11]. The development history of HiDeNN family networks and INN is summarized in Fig. 2. While INN clearly explains how to construct the network architecture, an efficient optimization scheme for solving ultra large-scale and high-dimensional problems remains underdeveloped. In this paper, ultra large-scale problems refer to problems on the zetta-scale $(10^{21})$ in terms of DoFs.", + "bbox": [ + 100, + 146, + 915, + 313 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The demand for high-precision engineering simulations and efficient solution schemes highlights the need for innovative modeling approaches that swiftly solve large-scale problems while optimizing the design space. This research aims to fulfill this need by developing tensor-decomposition-based A Priori Surrogate (TAPS), a data-free predictive AI model, which aims to enhance high-resolution capabilities while simultaneously optimizing computational efficiency with a minimal memory footprint, low data storage needs, and fast prediction. The proposed comprehensive framework sets a foundation for scalable, adaptable, and future-proof solutions to counter the ever-growing complexity in simulation-driven advanced industries. TAPS is particularly well-suited for engineering challenges where: 1) the finite element method and other conventional methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained.", + "bbox": [ + 100, + 316, + 892, + 458 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This paper is structured as follows. We first introduce the formulation of TAPS in section 2. In section 3, we examine the numerical convergence of TAPS for both space-time (S-T) and space-parameter-time (S-P-T) problems (i.e., problems that are dependent on spatial, parametric, and temporal inputs). In section 4, TAPS is applied to large-scale additive manufacturing problems that are considered intractable with standard numerical algorithms. This application effectively demonstrates TAPS's capability to address all of the three identified challenges.", + "bbox": [ + 100, + 458, + 892, + 530 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2. Theory", + "text_level": 1, + "bbox": [ + 102, + 549, + 184, + 564 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1. Review of C-HiDeNN interpolation theory", + "text_level": 1, + "bbox": [ + 102, + 573, + 426, + 588 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Leveraging the universal approximation theorem, multilayer perceptrons (MLPs) have been successfully applied as global basis functions in deep learning-based solvers [7]. However, as shown in Table 2, MLPs have a few potential caveats when approximating PDE solutions. To overcome these limitations, we leverage the Convolutional HiDeNN (C-HiDeNN) interpolation function, which leverages the merits of both locally supported finite element shape functions and the flexibility of machine learning. Note that C-HiDeNN also belongs to the INN category as shown in Fig. 2. C-HiDeNN maintains all the essential finite element approximation properties such as Kronecker delta and partition of unity [14].", + "bbox": [ + 100, + 590, + 892, + 689 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/043303db4e5c82962bde585e2e5978667fa6324d753cd893b2e7c4471a16091e.jpg", + "table_caption": [ + "Table 2: Comparison of MLP and C-HiDeNN as approximation functions of PDE solutions." + ], + "table_footnote": [], + "table_body": "
MLPC-HiDeNN
Boundary/initial conditionPenalty term in the loss function [7]Automatic satisfaction [6]
Convergence and stabilityStochastic in nature and not guaranteedShown for different PDEs [6]
Numerical integrationQuasi-Monte Carlo integration [21]Gaussian integration [22]
InterpretabilityBlack-box modelInterpretable [11]
", + "bbox": [ + 173, + 722, + 823, + 791 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We first review the C-HiDeNN formulation as illustrated in Fig. 3 (a) [14]. A scalar field $u(\\pmb{x})$ defined in each element within a domain $\\Omega_{\\pmb{x}}$ can be approximated using C-HiDeNN interpolation as:", + "bbox": [ + 102, + 804, + 892, + 833 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nu _ {e} ^ {h} (\\boldsymbol {x}) = \\sum_ {i \\in A ^ {e}} N _ {i} (\\boldsymbol {x}) \\sum_ {j \\in A _ {s} ^ {i}} \\mathcal {W} _ {s, a, p, j} ^ {i} (\\boldsymbol {x}) u _ {j} = \\sum_ {k \\in A _ {s} ^ {e}} \\widetilde {N} _ {k} (\\boldsymbol {x}; s, a, p) u _ {k} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 304, + 840, + 892, + 872 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 492, + 879, + 504, + 889 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/26d6d373a42682554486e271777c219d82e17016041bbe53dc8f96b1cfed8012.jpg", + "image_caption": [], + "image_footnote": [ + "- $A^e$ : nodes at element $e$", + "- s: patch size, integer", + "a: dilation parameter" + ], + "bbox": [ + 189, + 134, + 763, + 187 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/788d964946bd243c8ce37085ed0bd577906f91cee22b4fe5ceb8bc3ee147c7e1.jpg", + "image_caption": [], + "image_footnote": [ + "- $A_{\\mathrm{s}}^{i}$ : patch domain at node $i$ with patch size $s$", + "- $W_{a,p,j}^{i}$ : convolution interpolant for node $j$", + "- $R_{i}(x)$ : radial basis function centered at node $i$", + "1 $G$ : moment matrix", + "1 $\\cdot x^{A_s^i}$ : nodal coordinates of nodes in $A_{s}^{i}$" + ], + "bbox": [ + 188, + 227, + 480, + 363 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2bafb2a9d73b20b8974d709a877de33ff2d86e93482c01b687869081af82ca9e.jpg", + "image_caption": [ + "Figure 3: (a) Covolution patch in 1D C-HiDeNN shape function (b) Construction of convolution patch function (c) C-HiDeNN shape function as MLP with 3 hidden layers" + ], + "image_footnote": [ + "- $A_{s}^{e} = \\bigcup_{i\\in A^{e}}A_{s}^{i}$ : patch nodes of element $e$ with patch size $s$", + "- $p$ : reproducing polynomial order,integer" + ], + "bbox": [ + 482, + 225, + 806, + 417 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $u_{j}$ is the nodal value and $u_{j} = u(\\pmb{x}_{j})$ ; $N_{i}$ is the linear finite element shape function at node $j$ centered in $i$ -th nodal patch; $\\mathcal{W}_{s,a,p,j}^{i}$ is the convolution patch function at node $i$ that can be represented with a partially connected MLP as illustrated in Fig. 3 (b). The convolution patch functions are controlled by three hyperparameters: patch size $s$ that controls nodal connectivity, dilation parameter $a$ that normalizes distances between patch nodes, and reproducing order $p$ that defines types/orders of activation functions to be reproduced by the patch functions. Due to the inherent local support nature of both $N_{i}$ and $\\mathcal{W}_{s,a,p,j}^{i}$ , the C-HiDeNN shape function $\\widetilde{N}_k(\\pmb{x};s,a,p)$ is also locally supported.", + "bbox": [ + 102, + 478, + 892, + 565 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similar to standard finite element, the approximation for the solution field can be written as:", + "bbox": [ + 127, + 564, + 749, + 577 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {h} (\\boldsymbol {x}) = \\sum_ {k} ^ {n n o d e} \\widetilde {N} _ {k} (\\boldsymbol {x}; s _ {k}, a _ {k}, p _ {k}) u _ {k} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 391, + 583, + 892, + 621 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $nnode$ is the total number of nodes and $k$ is the nodal index. It should be noted that the hyperparameters $s, a, p$ can vary across nodes since C-HiDeNN can optimize these hyperparameters like machine learning parameters, rendering an adaptable functional space without altering the number of global nodes or hidden layers. This clearly distinguishes C-HiDeNN from MLP, where the activation functions and network architectures are mostly fixed.", + "bbox": [ + 102, + 627, + 892, + 683 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The C-HiDeNN shape function $\\widetilde{N}_k(\\pmb{x})$ satisfies Kronecker-delta property at nodal positions [6] (hyperparameters $s, a, p$ are dropped for brevity):", + "bbox": [ + 102, + 683, + 890, + 711 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {N} _ {k} \\left(\\boldsymbol {x} _ {l}\\right) = \\delta_ {k l} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 455, + 709, + 890, + 727 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the Kronecker delta is defined as:", + "bbox": [ + 102, + 730, + 376, + 743 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\delta_ {k l} = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} k \\neq l, \\\\ 1 & \\text {i f} k = l. \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 740, + 892, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Thus, at the Dirichlet boundary node $\\mathbf{x}_b$ where $u(\\mathbf{x}_b) = u_b$ , C-HiDeNN automatically satisfies the Dirichlet boundary condition:", + "bbox": [ + 102, + 774, + 890, + 800 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {h} \\left(\\boldsymbol {x} _ {b}\\right) = \\sum_ {k} ^ {n n o d e} \\widetilde {N} _ {k} \\left(\\boldsymbol {x} _ {b}\\right) u _ {k} = u _ {b} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 800, + 892, + 837 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Going forward, we will employ the C-HiDeNN shape function $\\widetilde{N}_k(\\pmb{x})$ as the locally supported basis function for the interpolation.", + "bbox": [ + 100, + 841, + 892, + 871 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 878, + 504, + 889 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2. Discrete Tensor decomposition", + "text_level": 1, + "bbox": [ + 102, + 131, + 349, + 146 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Tensor decomposition is a mathematical technique used to break down a high-dimensional tensor, such as a 3D finite element solution, into a set of simpler components, making it easier to analyze, store, and process [23]. It generalizes matrix decomposition methods like singular value decomposition (SVD) to higher-order tensors.", + "bbox": [ + 100, + 149, + 892, + 192 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Consider a cubic spatial domain $\\Omega_{x}$ discretized with a regular Cartesian grid where each grid point (or node) stores a scalar value (see Fig. 4). The discrete nodal values can be represented as a 3rd order tensor $u_{JK}$ where $I = 1,..,n_1;J = 1,\\dots,n_2;K = 1,\\dots,n_3$ . The number of DoFs for this structured mesh is $n_1\\times n_2\\times n_3$ . When high resolution is required for the analysis, as the case in AM simulations, the number of DoFs can be extremely large. To effectively reduce the DoFs, different discrete tensor decomposition methods can be used to project the original 3rd order tensor into lower order tensors. In this paper, we focus on CANDECOMP/PARAFAC (CP) decomposition, where the higher-order tensors are approximated using a finite sum of products of 1D vectors [23]:", + "bbox": [ + 100, + 193, + 892, + 293 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nu _ {I J K} \\approx u _ {I J K} ^ {T D} = \\sum_ {m = 1} ^ {M} u _ {I m} ^ {[ 1 ]} u _ {J m} ^ {[ 2 ]} u _ {K m} ^ {[ 3 ]} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 396, + 302, + 892, + 340 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $M$ is defined as the total number of modes in CP decomposition; $u_{lm}^{[1]}$ refers to the projected 1D vector in the first dimension and $m$ -th mode; the superscript $[d]$ represents the dimension index and $d = 1,2,3$ ; the 1st subscript $I$ is the nodal index, and the 2nd subscript $m$ refers to the modal index.", + "bbox": [ + 100, + 351, + 892, + 395 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/65f7d6cba907050260e3f59100990b220e18b3e661510dbe74314d86ce119fce.jpg", + "image_caption": [ + "(a)", + "Figure 4: (a) 3D Cartesian mesh. (b) Nodal values can be treated as a 3rd order tensor." + ], + "image_footnote": [], + "bbox": [ + 188, + 430, + 435, + 581 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3b85eb658518d72411e3ec79246a5857219aa58d19ca589a83cb951c71802013.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 477, + 411, + 810, + 571 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As can be seen from Eq. 6, with CP decomposition, the total number of DoFs can be reduced from $n_1 \\times n_2 \\times n_3$ to $M \\times (n_1 + n_2 + n_3)$ . Assuming $M$ does not increase when the mesh is refined along each dimension, then the solution matrix $u_{IJK}$ will have cubic growth, whereas CP decomposition $\\sum_{m=1}^{M} u_{Im}^{[1]} u_{Jm}^{[2]} u_{Km}^{[3]}$ only exhibits linear growth, as shown in Fig. 5 (a). This reduction is paramount to making large-scale simulation achievable.", + "bbox": [ + 100, + 649, + 892, + 706 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As an extension of the previous case, we consider $D$ dimensional general time-dependent parametric problems where the independent variables $(x_{1},x_{2},\\ldots ,x_{D})$ can be classified into 3 different categories, namely, spatial variables $\\pmb{x}_s$ , parametric variables $\\pmb{x}_p$ , and temporal variable $x_{t}$ . Spatial variables $\\pmb{x}_s$ describe the spatial coordinates of the problem. Parametric variables $\\pmb{x}_p$ can represent any PDE coefficients, initial/boundary conditions, or geometry descriptors as extra-coordinates. The temporal variable $x_{t}$ represents time. Assuming the spatial domain $\\Omega_{\\pmb{x}_s}$ is cubic, the parametric domain $\\Omega_{\\pmb{x}_p}$ is hypercubic and Cartesian grids are used for discretization, then the nodal solution to these problems can be written as a discrete $D$ -th order tensor $u_{I_1I_2,\\dots,I_D}$ . Similarly, CP decomposition can be used to effectively decompose higher-order tensors into a finite sum of tensor products of 1D vectors.", + "bbox": [ + 100, + 706, + 892, + 820 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nu _ {I _ {1} I _ {2}, \\dots , I _ {D}} \\approx u _ {I _ {1} I _ {2}, \\dots , I _ {D}} ^ {T D} = \\sum_ {m = 1} ^ {M} u _ {I _ {1} m} ^ {[ 1 ]} u _ {I _ {2} m} ^ {[ 2 ]} \\dots u _ {I _ {D} m} ^ {[ D ]} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 829, + 892, + 866 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 879, + 504, + 889 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/27d2162fec1e0e4eba57ca6fdf9b4a421edbc273be308e22965966bcd12bf083.jpg", + "image_caption": [ + "(a)", + "Figure 5: Comparison of number of DoFs, (a) in terms of mesh size $n$ , (b) in terms of problem dimension $D$" + ], + "image_footnote": [], + "bbox": [ + 188, + 130, + 489, + 318 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2356fbd6103d0f1224ab953d79b4dd1accc5dfa6c206f9f994a88a02796022c1.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 492, + 130, + 810, + 318 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "If every dimension is discretized into $n$ grid points, then a $D$ -th order tensor will have DoFs of $n^D$ , whereas CP decomposition only requires $M \\times D \\times n$ DoFs. Consequently, CP decomposition can dramatically reduce the total DoFs of general high-dimensional parametric problems, as shown in Fig. 5 (b).", + "bbox": [ + 100, + 384, + 892, + 428 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2.3. TD interpolation in TAPS", + "text_level": 1, + "bbox": [ + 102, + 441, + 315, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Assume that the $D$ -th order tensor $u_{I_1I_2,\\dots,I_D}$ represents a $D$ -input one-output continuous function $u(\\pmb{x})$ measured at a Cartesian grid discretized with $I_1, I_2, \\dots, I_D$ grid points in each input dimension. The discrete tensor decomposition $u_{I_1I_2,\\dots,I_D}^{TD}$ can only approximate the function $u(\\pmb{x})$ at these grid points. In this case, how can we measure the value of the function on an arbitrary input $\\pmb{x}$ with tensor decomposition? A natural answer is using C-HiDeNN interpolation functions.", + "bbox": [ + 100, + 458, + 894, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Similar to standard finite element shape functions, for a 3D spatial problem discretized with a Cartesian grid, a 3D C-HiDeNN interpolation function can be rewritten as a tensor product of one-dimensional C-HiDeNN interpolation functions (hyperparameters $s$ , $a$ and $p$ will be dropped from now on for brevity):", + "bbox": [ + 100, + 529, + 892, + 574 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {N} _ {k} \\left(x _ {1}, x _ {2}, x _ {3}\\right) = \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 581, + 892, + 601 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where the superscript refers to the dimension of the 1D C-HiDeNN shape function. Therefore, we can rewrite Eq. 2 as:", + "bbox": [ + 100, + 609, + 892, + 637 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {h} \\left(\\boldsymbol {x} _ {s}\\right) = \\sum_ {I} \\sum_ {J} \\sum_ {K} \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) u _ {I J K} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 649, + 892, + 678 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\boldsymbol{x}_s = [x_1, x_2, x_3]$ is the spatial variable. Plugging the CP decomposition form of the tensor $u_{IJK}^{TD}$ into Eq. 6 into Eq. 9 and rearranging the terms, we have:", + "bbox": [ + 102, + 684, + 892, + 715 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}\\right) = \\sum_ {m = 1} ^ {M} \\left[ \\sum_ {I} \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) u _ {I m} ^ {[ 1 ]} \\right] \\left[ \\sum_ {J} \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) u _ {J m} ^ {[ 2 ]} \\right] \\left[ \\sum_ {K} \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) u _ {K m} ^ {[ 3 ]} \\right] \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 270, + 724, + 892, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Eq. 10 represents the TD interpolation (with C-HiDeNN) for a 3D spatial problem. Extending this framework to a general $D$ -dimensional space-parameter-time (S-P-T) problem with independent variables defined in Eq. 11:", + "bbox": [ + 100, + 766, + 892, + 796 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} = \\left( \\begin{array}{l l l l} x _ {1}, \\dots , x _ {S} & \\underbrace {x _ {S + 1}} _ {\\text {上}} & \\dots & x _ {P} \\end{array} , x _ {t}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 810, + 892, + 828 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "spatial variables parametric variables", + "bbox": [ + 413, + 829, + 594, + 840 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 879, + 502, + 889 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Then the TD interpolation to the S-P-T solution field can be written as follows:", + "bbox": [ + 102, + 131, + 638, + 146 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\underbrace {\\left[ \\sum_ {I _ {1}} \\widetilde {N} _ {I _ {1}} ^ {[ 1 ]} \\left(x _ {1}\\right) u _ {I _ {1} m} ^ {[ 1 ]} \\right] \\cdots \\left[ \\sum_ {I _ {S}} \\widetilde {N} _ {I _ {S}} ^ {[ S ]} \\left(x _ {I _ {S}}\\right) u _ {I _ {S} m} ^ {[ S ]} \\right]} _ {\\text {s p a t i a l}} \\underbrace {\\left[ \\sum_ {I _ {S + 1}} \\widetilde {N} _ {I _ {S + 1}} ^ {[ S + 1 ]} \\left(x _ {S + 1}\\right) u _ {I _ {S + 1} m} ^ {[ S + 1 ]} \\right] \\cdots \\left[ \\sum_ {P} \\widetilde {N} _ {I _ {P}} ^ {[ P ]} \\left(x _ {P}\\right) u _ {I _ {P} m} ^ {[ P ]} \\right]} _ {\\text {p a r a m e t r i c}} \\underbrace {\\left[ \\sum_ {I _ {D}} \\widetilde {N} _ {I _ {D}} ^ {[ D ]} (t) u _ {I _ {D}} ^ {[ D ]} \\right]} _ {\\text {t e m p o r a l}} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 154, + 892, + 206 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This can be further simplified using the product notation:", + "bbox": [ + 102, + 214, + 490, + 229 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\prod_ {d = 1} ^ {D} \\sum_ {I _ {d}} \\widetilde {N} _ {I _ {d}} ^ {[ d ]} \\left(x _ {d}\\right) u _ {I _ {d} m} ^ {[ d ]} \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 237, + 892, + 278 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\widetilde{N}_{I_d}^{[d]}(x_d)$ refers to the 1D C-HiDeNN shape function in the $d$ -th dimension; $u_{I_d m}^{[d]}$ is the nodal solution for dimension $d$ and mode $m$ .", + "bbox": [ + 100, + 288, + 892, + 319 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2.4. The General S-P-T Galerkin form of TAPS", + "text_level": 1, + "bbox": [ + 102, + 332, + 428, + 348 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Similar to FEM, TAPS adopts the weighted-sum formulation to solve PDEs. Consider a general S-P-T PDE:", + "bbox": [ + 126, + 351, + 858, + 365 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (u (\\boldsymbol {x})) = f (\\boldsymbol {x}), \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 438, + 379, + 892, + 394 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mathcal{L}$ is the differential operator; the independent variable vector $\\pmb{x} = (x_{s}, x_{p}, x_{t})$ ; $f(\\pmb{x})$ is the forcing function. Table 3 lists different examples of operator $\\mathcal{L}$ and corresponding dependent and independent variables.", + "bbox": [ + 102, + 401, + 890, + 430 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7caeaada43efeb0a507f555c9c0b240ec85768dcd8b9ddf7f5624cd32319ec5c.jpg", + "table_caption": [ + "Table 3: Examples for differential operators, dependent and independent variables" + ], + "table_footnote": [], + "table_body": "
PDEDifferential operator LDependent variablexsxpxt
∂2u/∂x12 + ∂2u/∂x22 + ... + ∂2u/∂x2D = f(x)∂2/∂x12 + ∂2/∂x22 + ... + ∂2/∂x2Du(x1, x2, ...,xD)--
μui, jj + (μ + λ)uj,ij + Fi = e(x12+x22+x32)μ(·)i,jj + (μ + λ)(·)j,ijui, i = 1, 2, 3(x1, x2, x3)(λ,μ)-
ρcp,du/dt + k(∂2u/∂x12 + ∂2u/∂x22 + ∂2u/∂x32) = Pe(x12+x22+x32)ρcp,du/dt + k(∂2/∂x12 + ∂2/∂x22 + ∂2/∂x32)u(x1, x2, x3)(ρ, cp, k, P)t
", + "bbox": [ + 104, + 463, + 905, + 533 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The weighted-sum residual form of the PDE with TD interpolation can be written as:", + "bbox": [ + 127, + 546, + 702, + 560 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} \\delta u ^ {T D} (\\boldsymbol {x}) \\left[ \\mathcal {L} \\left(u ^ {T D} (\\boldsymbol {x})\\right) - f (\\boldsymbol {x}) \\right] d \\Omega = 0 \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 570, + 892, + 601 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $u^{TD}$ is the approximation of the solution (i.e., trial function), $\\delta u^{TD}$ is the test function, and $d\\Omega = d\\Omega_{x_s}d\\Omega_{x_p}d\\Omega_{x_t}$ .", + "bbox": [ + 102, + 605, + 890, + 620 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Depending on how $\\delta u^{TD}$ is adopted, different mathematical formulations can be obtained. If the test function resides in the same function space as the trial function, it becomes the Galerkin formulation. When the test function space differs from the trial function space, it becomes the Petrov-Galerkin formulation [22]. If the Dirac delta function is used for the test function, then Eq. 15 corresponds to the collocation method [24]. In this paper, we employ the Galerkin formulation. However, the proposed framework is versatile and can be extended to accommodate other formulations as well.", + "bbox": [ + 100, + 620, + 892, + 703 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Eq. 12, the entire S-P-T domain is approximated using TD interpolation. However, this approach may result in a large system of equations due to the rapid increase in the number of TD modes for certain cases. For example, if the forcing function represents a moving source function in Eq. 14), this complexity may arise. To maintain computational efficiency, we can partition the temporal domain into a series of time slabs. As illustrated in Fig. 6(a), the S-P-T continuum is divided into S-P-T slabs $\\mathcal{T}_1,\\mathcal{T}_2,\\dots ,\\mathcal{T}_T$ . The solution within each time slab is then approximated individually using the TD interpolation.", + "bbox": [ + 100, + 706, + 892, + 790 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Between consecutive S-P-T slabs, either a continuous or discontinuous formulation can be employed. As shown in Fig. 6(b) for the continuous Galerkin scheme, the continuity of the solution in time is enforced by imposing the solution at the end of slab $\\mathcal{T}_{i-1}$ as the initial condition of $\\mathcal{T}_i$ :", + "bbox": [ + 100, + 791, + 892, + 834 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n{ } ^ { [ \\mathcal { T } + 1 ] } u ( \\boldsymbol { x } _ { s } , \\boldsymbol { x } _ { p } , 0 ) = { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } , \\boldsymbol { x } _ { p } , x _ { t } ^ { m a x } ) \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 845, + 892, + 864 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 879, + 502, + 889 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Discontinuous Galerkin method can be used when a discontinuity is allowed between S-P-T slabs, as illustrated in Fig. 6(c). Discontinuity in time can be modeled using the jump operator $\\llbracket \\dots \\rrbracket$ [25].", + "bbox": [ + 102, + 131, + 892, + 161 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\llbracket u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, t\\right) \\rrbracket = \\lim _ {\\epsilon \\rightarrow 0 ^ {+}} \\left(^ {\\mathcal {T} + 1} u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, \\epsilon\\right) - ^ {\\mathcal {T}} u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t} ^ {\\max } - \\epsilon\\right)\\right) \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 171, + 892, + 195 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/6c2d52bb0f68ce1b2b23d130b8706969e5dd55fc54afb40e93be9e7a63dfc8a8.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 146, + 214, + 408, + 337 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2211d1927532087afa8c9ab6bf9f0e46ebf0f9fcc68e95f91179e9e07f54486f.jpg", + "image_caption": [ + "(b)", + "Figure 6: (a) Multiple S-P-T slabs along the temporal dimension. (b) Continuous Galerkin: the solution is continuous across different S-P-T slabs. (c) Discontinuous Galerkin: jumps are allowed across the slab boundaries" + ], + "image_footnote": [], + "bbox": [ + 408, + 212, + 616, + 322 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f9df96ab1e94c3f197c125cc26bdae791e3769bfcd4aa7fddc2a762fde3a337b.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 640, + 212, + 848, + 322 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Keeping in mind that this approach can be applied generally to a range of engineering problems, we will demonstrate an example of the Galerkin formulation using a single space-parameter-time partition (S-P-T) slab in the remainder of this section. For illustrative purposes, the transient heat transfer equation will be utilized:", + "bbox": [ + 102, + 400, + 892, + 443 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\rho c _ {p} \\nabla_ {x _ {t}} u - \\nabla_ {\\boldsymbol {x} _ {s}} \\cdot k \\nabla_ {\\boldsymbol {x} _ {s}} u = f (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}) \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 451, + 892, + 467 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "as we focus on the example of modeling the laser powder bed fusion (LPBF) process in additive manufacturing (AM). In an LPBF simulation, we adopt the following time-dependent moving heat source function:", + "bbox": [ + 102, + 475, + 890, + 502 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nf \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\frac {2 \\eta P}{\\pi r ^ {2} d _ {\\nu}} \\exp \\left(- \\frac {2 \\left((x - x _ {0} (t)) ^ {2} + (y - y _ {0} (t)) ^ {2}\\right)}{r ^ {2}}\\right) \\cdot \\mathbf {1} _ {\\left(x _ {3} \\geq d _ {\\nu}\\right)} \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 270, + 510, + 892, + 550 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Summarizing the independent variables in Eq. 18, there are spatial variables $\\mathbf{x}_s = (x_1, x_2, x_3)$ ; parametric variables $\\mathbf{x}_p = (k, \\rho, c_p, \\eta, P, r, d_v)$ ; and a temporal variable $x_t = t$ . Among the parametric variables, $k$ is conductivity; $\\rho$ is the material density; $c_p$ is heat capacity; $\\eta$ is the material absorptivity; $P$ represents laser power; $r$ is the standard deviation that characterizes the width of the heat source; $d_v$ is the penetration depth of the heat source. In Eq. 19, $[x_0(t), y_0(t)]$ represents the center of the moving heat source; $\\mathbf{1}_{(x_3 \\geq d_v)}$ is the indicator function where $\\mathbf{1}_{(x_3 \\geq d_v)} = 1$ if $x_3 \\geq d_v$ or $\\mathbf{1}_{(x_3 \\geq d_v)} = 0$ if $x_3 < d_v$ . Note that the discretization of the material parameters, in particular, in a random field setting, has been previously proposed by Liu et al. [26, 27].", + "bbox": [ + 102, + 557, + 892, + 655 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As shown in the schematic below, we classify the boundary surfaces into 2 categories: the Dirichlet boundary surface $\\Gamma_{D}$ and the Neumann boundary surface $\\Gamma_{N}$ .", + "bbox": [ + 102, + 657, + 890, + 684 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "A uniform ambient temperature is used as the initial condition. The bottom of the powder bed is subject to the Dirichlet boundary condition and the Neumann boundary conditions are prescribed on the other surfaces. The initial and boundary conditions are:", + "bbox": [ + 102, + 684, + 892, + 726 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nu (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, 0) | _ {\\Omega} = u _ {0},\n$$\n", + "text_format": "latex", + "bbox": [ + 396, + 728, + 529, + 741 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\left. u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) \\right| _ {\\Gamma_ {D}} = u _ {0}, \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 396, + 744, + 890, + 760 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {n} \\cdot \\boldsymbol {q} | _ {\\Gamma_ {N}} = q _ {\\text {c o n v}} + q _ {\\text {r a d}} + q _ {\\text {e v a p}}\n$$\n", + "text_format": "latex", + "bbox": [ + 396, + 763, + 596, + 778 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $u_{0}$ is the ambient temperature, $q_{conv}$ accounts for free convection, $q_{rad}$ accounts for radiation, and $q_{evap}$ imposes evaporative cooling when any material surface reaches the evaporation temperature [28]. Each flux is defined as:", + "bbox": [ + 102, + 782, + 892, + 810 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nq _ {c o n v} = h _ {c o n v} [ u (x, t) - u _ {0} ],\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 821, + 571, + 835 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nq _ {r a d} = - \\sigma_ {S B} \\epsilon \\left(u ^ {4} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) - u _ {0} ^ {4}\\right), \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 394, + 838, + 890, + 854 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nq _ {e v a p} = - m _ {e v a p} L _ {e v a p}.\n$$\n", + "text_format": "latex", + "bbox": [ + 394, + 858, + 534, + 873 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 879, + 504, + 889 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/a36972000cd774cc5ab556e0900c90f4c6eea0a3a42c11d6c7322779ba30c687.jpg", + "image_caption": [ + "Figure 7: Transient heat transfer with initial condition and boundary conditions." + ], + "image_footnote": [], + "bbox": [ + 347, + 135, + 647, + 261 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $\\sigma_{SB}$ is the Stefan-Boltzmann constant; $\\epsilon$ is the material emissivity; $u_0$ is the ambient temperature; $h_{conv}$ is the convection coefficient of the surrounding gas, $m_{evap}$ is the mass evaporation flux and $L_{evap}$ is the heat of evaporation. In the following numerical examples, we only consider the free convection term in the Neumann boundary condition. The solution to Eq. 18 is approximated using TD interpolation function:", + "bbox": [ + 102, + 310, + 892, + 367 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 376, + 892, + 414 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Here a general notation is employed to represent different types of components in Eq. 22. For example, the spatial component $u_{\\boldsymbol{x}_s}^{(m)}(\\boldsymbol{x}_s)$ is equivalent to $u_{x_1}^{(m)}(x_1)u_{x_2}^{(m)}(x_2)u_{x_3}^{(m)}(x_3)$ . The corresponding test function can be obtained using the variational principle:", + "bbox": [ + 102, + 424, + 892, + 467 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\delta u ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\left[ \\delta u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) \\delta u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) \\delta u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) \\right] \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 139, + 487, + 892, + 526 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Plugging the trial and test functions, the S-P-T Galerkin form of Eq. 18 can be obtained by following Eq. 15:", + "bbox": [ + 102, + 535, + 838, + 550 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} \\delta u ^ {T D} \\left[ \\rho c _ {p} \\nabla_ {x _ {t}} u ^ {T D} - \\nabla_ {x _ {s}} \\cdot k \\nabla_ {x _ {s}} u ^ {T D} - f \\right] d \\Omega = 0 \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 329, + 558, + 892, + 589 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Using integration by parts on the diffusion term, we get the corresponding general S-P-T Galerkin weak form in TAPS formulation:", + "bbox": [ + 102, + 593, + 890, + 620 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} \\delta u ^ {T D} \\rho c _ {p} \\nabla_ {x _ {t}} u ^ {T D} d \\Omega + \\int_ {\\Omega} \\nabla_ {\\boldsymbol {x} _ {s}} \\delta u ^ {T D} \\cdot k \\nabla_ {\\boldsymbol {x} _ {s}} u ^ {T D} d \\Omega - \\int_ {\\partial \\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, t} \\delta u ^ {T D} \\boldsymbol {n} \\cdot \\boldsymbol {q} | _ {\\Gamma_ {N}} d s d \\Omega_ {\\boldsymbol {x} _ {p}} d \\Omega_ {t} - \\int_ {\\Omega} \\delta u ^ {T D} f (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}) d \\Omega = 0 \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 110, + 642, + 892, + 676 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $q$ is the heat flux on the Neumann boundary.", + "bbox": [ + 102, + 684, + 450, + 697 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "2.5. Discretized matrix form", + "text_level": 1, + "bbox": [ + 102, + 712, + 302, + 726 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The S-P-T Galerkin weak form shown in Eq. 25 is nonlinear in nature due to the tensor product structure in TD interpolation, necessitating efficient solution schemes. To illustrate the detailed solution approach for the general S-P-T weak form, we simplify the governing equation Eq. 18 by considering a one-dimensional spatial problem where $x_{s} = x$ . We assume that the product of density and specific heat capacity $\\rho c_{p}$ is equal to 1. Additionally, the forcing term is solely dependent on $x$ . Therefore, the simplified governing equation for this example is given by:", + "bbox": [ + 102, + 728, + 892, + 800 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial u}{\\partial t} - \\frac {\\partial u}{\\partial x} \\cdot k \\frac {\\partial x}{\\partial x} = f (x) \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 420, + 809, + 892, + 838 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "subject to homogeneous boundary conditions and initial conditions. This equation has 3 independent variables ( $D = 3$ ), i.e., spatial variable $x_{s} = x_{1} = x$ , parametric variable $x_{p} = x_{2} = k$ and temporal variable $x_{t} = x_{3} = t$ . The S-P-T", + "bbox": [ + 102, + 841, + 892, + 871 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 489, + 878, + 509, + 889 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Galerkin weak form of this problem can be written as follows according to Eq. 25 (the superscripts \"TD\" for both trial and test functions are omitted for brevity).", + "bbox": [ + 100, + 131, + 892, + 161 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} \\delta u \\nabla_ {t} u d \\Omega + \\int_ {\\Omega} \\nabla_ {x} \\delta u \\cdot k \\nabla_ {x} u d \\Omega - \\int_ {\\Omega} \\delta u f d \\Omega = 0 \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 322, + 170, + 892, + 200 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The corresponding trial and test functions can be obtained using Eqs. 22-23:", + "bbox": [ + 102, + 204, + 621, + 219 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nu (x, k, t) = \\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t) \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 228, + 892, + 266 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\delta u (x, k, t) = \\underbrace {\\sum_ {m = 1} ^ {M} \\delta u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\\text {s p a t i a l v a r i a t i o n}} + \\underbrace {\\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) \\delta u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\\text {p a r a m e t r i c v a r i a t i o n}} + \\underbrace {\\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) \\delta u _ {t} ^ {(m)} (t)} _ {\\text {t e m p o r a l v a r i a t i o n}} \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 275, + 892, + 334 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Eq. 29, the test function is further split into $D$ variational terms for a general $D$ dimensional problem (in the current example, $D = 3$ ). As an example, we first plug Eq. 28 and the spatial variation term of Eq. 29 into the Galerkin weak form in Eq. 27 to obtain the S-P-T weak form terms corresponding to spatial variation:", + "bbox": [ + 100, + 338, + 892, + 381 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\left[ \\nabla \\delta u _ {x} ^ {(m)} (x) \\nabla u _ {x} ^ {(n)} (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) k u _ {k} ^ {(n)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) u _ {t} ^ {(n)} (t) d t \\right]} _ {\\text {d i f f u s i o n t e r m}} +\n$$\n", + "text_format": "latex", + "bbox": [ + 252, + 391, + 742, + 445 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\left[ \\delta u _ {x} ^ {(m)} (x) u _ {x} ^ {(n)} (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) u _ {k} ^ {(n)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) \\nabla_ {t} u _ {t} ^ {(n)} (t) d t \\right]} _ {\\text {t i m e d e r i v a t i v e t e r m}} - \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 260, + 448, + 892, + 504 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\left[ \\delta u _ {x} ^ {(m)} (x) f (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) d t \\right]} _ {\\text {f o r c i n g t e r m}}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 505, + 660, + 560 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We use 1D C-HiDeNN shape functions to approximate each univariate function:", + "bbox": [ + 102, + 565, + 645, + 579 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nu _ {d} ^ {(n)} (x _ {d}) = \\widetilde {N} _ {n _ {d} ^ {\\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\\prime} n} ^ {[ d ]} \\quad (\\text {n o s u m o n} d)\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 602, + 623, + 623 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\delta u _ {d} ^ {(m)} \\left(x _ {d}\\right) = \\widetilde {N} _ {n _ {d}} ^ {[ d ]} \\left(x _ {d}\\right) \\delta u _ {n _ {d} m} ^ {[ d ]} \\quad (\\text {n o s u m o n} d) \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 625, + 892, + 646 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where Einstein summation is used. The free index $d$ refers to dimension and $d = x,k$ or $t$ . The gradient of the interpolated variable can be computed using the shape function derivative $\\widetilde{B}_{n_d}^{[d]}(x_d) = \\frac{d\\widetilde{N}_{n_d}^{[d]}(x_d)}{dx_d}$ .", + "bbox": [ + 102, + 653, + 892, + 689 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {x _ {d}} u _ {d} ^ {(n)} (x _ {d}) = \\widetilde {B} _ {n _ {d} ^ {\\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\\prime} n} ^ {[ d ]} \\quad (\\text {n o s u m o n} d)\n$$\n", + "text_format": "latex", + "bbox": [ + 347, + 708, + 635, + 730 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {x _ {d}} \\delta u _ {d} ^ {(m)} (x _ {d}) = \\widetilde {B} _ {n _ {d}} ^ {[ d ]} (x _ {d}) \\delta u _ {n _ {d} m} ^ {[ d ]} \\quad (\\text {n o s u m o n} d) \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 327, + 732, + 890, + 752 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Plugging Eq. 31 - 32 into Eq. 30, the diffusion term can be rewritten as:", + "bbox": [ + 102, + 760, + 589, + 774 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\underbrace {\\int_ {\\Omega_ {x}} \\widetilde {B} _ {n _ {x}} (x) \\delta u _ {n _ {x} m} ^ {[ x ]} \\widetilde {B} _ {n _ {x} ^ {\\prime}} (x) u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} d x} _ {\\text {s p a t i a l t e r m}} \\cdot \\underbrace {\\int_ {\\Omega_ {k}} \\widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \\widetilde {N} _ {n _ {k} ^ {\\prime}} (k) u _ {n _ {k} ^ {\\prime} n} ^ {[ k ]} d k} _ {\\text {p a r a m e t r i c t e r m}} \\cdot \\underbrace {\\int_ {\\Omega_ {t}} \\widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \\widetilde {N} _ {n _ {t} ^ {\\prime}} (t) u _ {n _ {t} ^ {\\prime} n} ^ {[ t ]} d t} _ {\\text {t e m p o r a l t e r m}} \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 785, + 892, + 838 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As can be readily seen from Eq. 33, after doing 1D integration of each term, the parametric and temporal terms can be treated as coefficient matrices:", + "bbox": [ + 100, + 841, + 892, + 869 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 489, + 878, + 507, + 889 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nC _ {m n} ^ {[ k ]} = \\underbrace {\\int_ {\\Omega_ {k}} \\widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \\widetilde {N} _ {n _ {k} ^ {\\prime}} (k) u _ {n _ {k} ^ {\\prime} n} ^ {[ k ]} d k} _ {\\text {p a r a m e t r i c t e r m}}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 152, + 601, + 200 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nC _ {m n} ^ {[ t ]} = \\underbrace {\\int_ {\\Omega_ {t}} \\widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \\widetilde {N} _ {n _ {t} ^ {\\prime}} (t) u _ {n _ {t} ^ {\\prime} n} ^ {[ t ]} d t} _ {\\text {t e m p o r a l t e r m}} \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 203, + 892, + 250 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "as the only free indices are $m$ and $n$ . Substituting the coefficient matrices and rearranging different terms in Eq. 33, we have:", + "bbox": [ + 102, + 261, + 892, + 288 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {m = 1} ^ {M} \\delta u _ {n _ {x} m} ^ {[ x ]} \\sum_ {n = 1} ^ {M} \\left[ \\int_ {\\Omega_ {x}} \\widetilde {B} _ {n _ {x}} (x) \\widetilde {B} _ {n _ {x} ^ {\\prime}} (x) d x \\right] \\cdot C _ {m n} ^ {[ k ]} C _ {m n} ^ {[ t ]} \\cdot u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 327, + 299, + 892, + 338 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Like standard FEM, we can define $\\int_{x}\\widetilde{B}_{n_x}(x)\\widetilde{B}_{n_x'}(x)dx$ as the 1D stiffness matrix $K_{n_x n_x'}^{[x]}$ of $x$ dimension in Eq. 35. We let $C_{mn}^{[x]} = C_{mn}^{[k]}C_{mn}^{[t]}$ with no summation on $(m,n)$ . Furthermore, let us define the following 4-th order tensor:", + "bbox": [ + 102, + 343, + 892, + 378 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nA _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} = K _ {n _ {x} n _ {x} ^ {\\prime}} ^ {[ x ] ^ {\\prime}} C _ {m n} ^ {[ x ]} \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 430, + 388, + 892, + 409 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $A_{n_s n_s'mn}^{[x]}$ is a function of solution vectors $u_{n_k m}^{[k]}$ and $u_{n_m m}^{[t]}$ since the coefficient matrix $C_{mn}^{[x]}$ depends on these solution vectors as shown in Eq. 34. This dependency reflects the interconnected nature of the variables across different dimensions in the S-P-T framework, highlighting how the spatial, parameter, and temporal components influence each other through the coefficients. As a result, Eq. 33 can be further simplified as follows:", + "bbox": [ + 102, + 414, + 892, + 473 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\delta u _ {n _ {x} m} ^ {[ x ]} A _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 438, + 481, + 892, + 502 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where the summation signs are neglected since $m$ and $n$ become dummy variables. The 4-th order tensor $A_{n_x n_x' mn}^{[x]}$ can be reshaped as a 2nd order tensor $\\mathbb{A}_{IJ}^{[x]}$ : the indices $n_x$ and $m$ are combined into a single composite index $I$ , and the indices $n_x'$ and $n$ are combined into a single composite index $J$ .", + "bbox": [ + 102, + 512, + 892, + 560 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nA _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} = \\mathbb {A} _ {I J} ^ {[ x ]} \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 447, + 571, + 890, + 590 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Define the following vectorization:", + "bbox": [ + 102, + 595, + 342, + 608 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\delta \\mathbb {U} _ {I} ^ {[ x ]} = \\left[ \\operatorname {v e c} \\left(\\delta u _ {n _ {x} m} ^ {[ x ]}\\right) \\right] _ {I}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 631, + 554, + 652 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {U} _ {J} ^ {[ x ]} = \\left[ \\operatorname {v e c} \\left(u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]}\\right) \\right] _ {J} \\tag {39}\n$$\n", + "text_format": "latex", + "bbox": [ + 425, + 653, + 890, + 674 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As a result, Eq. 37 is equivalent to:", + "bbox": [ + 102, + 683, + 342, + 697 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\delta \\mathbb {U} ^ {[ x ] T} \\mathbb {A} ^ {[ x ]} \\mathbb {U} ^ {[ x ]} \\tag {40}\n$$\n", + "text_format": "latex", + "bbox": [ + 448, + 709, + 890, + 726 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Following the same procedure, we can obtain matrix forms corresponding to the time derivative term $\\delta \\mathbb{U}^{[x]^T}\\mathbb{B}^{[x]}\\mathbb{U}^{[x]}$ , and the forcing term $\\delta \\mathbb{U}^{[x]^T}\\mathbb{Q}^{[x]}$ for the spatial variational part of Eq. 30. Similar structures can also be obtained for the parametric and temporal variational parts of the test function in Eq. 29. Denoting $\\mathbb{K}^{[d]} = \\mathbb{A}^{[d]} + \\mathbb{B}^{[d]}$ , the matrix form of the generalized S-P-T Galerkin form in Eq. 27 can be written as:", + "bbox": [ + 100, + 731, + 892, + 790 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\underbrace {\\delta \\mathbb {U} ^ {[ x ] ^ {T}} \\mathbb {K} ^ {[ x ]} \\mathbb {U} ^ {[ x ]} - \\delta \\mathbb {U} ^ {[ x ] ^ {T}} \\mathbb {Q} ^ {[ x ]}} _ {\\text {s p a t i a l v a r i a t i o n a l p a r t}} + \\underbrace {\\delta \\mathbb {U} ^ {[ k ] ^ {T}} \\mathbb {K} ^ {[ k ]} \\mathbb {U} ^ {[ k ]} - \\delta \\mathbb {U} ^ {[ k ] ^ {T}} \\mathbb {Q} ^ {[ k ]}} _ {\\text {p a r a m e t r i c v a r i a t i o n a l p a r t}} + \\underbrace {\\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\mathbb {K} ^ {[ t ]} \\mathbb {U} ^ {[ t ]} - \\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\mathbb {Q} ^ {[ t ]}} _ {\\text {t e m p o r a l v a r i a t i o n a l p a r t}} = 0 \\tag {41}\n$$\n", + "text_format": "latex", + "bbox": [ + 198, + 797, + 890, + 834 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Eq. 41 is equivalent to the following nonlinear system of equations. Note that the nonlinearity comes from the fact that $\\mathbb{K}^{[d]}$ is solution dependent:", + "bbox": [ + 100, + 841, + 892, + 870 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 489, + 878, + 507, + 889 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\delta \\mathbb {U} ^ {[ x ] ^ {T}}, \\delta \\mathbb {U} ^ {[ k ] ^ {T}}, \\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\right] \\left\\{\\left[ \\begin{array}{c c c} \\mathbb {K} ^ {[ x ]} (\\mathbb {U} ^ {[ k ]}, \\mathbb {U} ^ {[ t ]}) & 0 & 0 \\\\ 0 & \\mathbb {K} ^ {[ k ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ t ]}) & 0 \\\\ 0 & 0 & \\mathbb {K} ^ {[ t ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ k ]}) \\end{array} \\right] \\left[ \\begin{array}{l} \\mathbb {U} ^ {[ x ]} \\\\ \\mathbb {U} ^ {[ k ]} \\\\ \\mathbb {U} ^ {[ t ]} \\end{array} \\right] - \\left[ \\begin{array}{l} \\mathbb {Q} ^ {[ x ]} (\\mathbb {U} ^ {[ k ]}, \\mathbb {U} ^ {[ t ]}) \\\\ \\mathbb {Q} ^ {[ k ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ t ]}) \\\\ \\mathbb {Q} ^ {[ t ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ k ]}) \\end{array} \\right] \\right\\} = 0 \\tag {42}\n$$\n", + "text_format": "latex", + "bbox": [ + 131, + 153, + 892, + 200 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where we can treat the solution vector $\\left[\\mathbb{U}^{[x]^T},\\mathbb{U}^{[k]^T},\\mathbb{U}^{[t]^T}\\right]$ as generalized DoFs like standard FEM. There are many ways to solve Eq. 42. For example, standard linearization schemes such as Newton's method have been used [29]. However, this method may suffer from ill-conditioning since the mismatch of scales for different dimensions can be significant. In this paper, we use the concept of subspace iteration to efficiently approximate the solution by iterating in the subspace of the test function space until a convergence criteria is met [19]. A similar counterpart has been widely adopted as the gold standard in discrete tensor decomposition [23].", + "bbox": [ + 100, + 210, + 892, + 298 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2.6. Solution scheme of TAPS: subspace iteration", + "text_level": 1, + "bbox": [ + 102, + 312, + 442, + 326 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For subspace iteration in $d$ -th dimension, only the solution matrix $\\mathbb{U}^{[d]}$ is treated as unknown while all other functions are considered as known constants. Consequently, the variations of the univariate functions other than $d$ -th dimension will vanish. From Eq. 42, it can be seen that this will lead to a linear system of equations for the unknowns in the $d$ -th dimension. The updated solution matrix $\\mathbb{U}^{[d]}$ from this process is then used in the next subspace iteration for dimension $d + 1$ (when $d = D$ , we come back to the first dimension $d = 1$ ). The complete solution scheme for subspace iteration is shown in Algorithm 1.", + "bbox": [ + 100, + 329, + 892, + 414 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Algorithm 1 TAPS solution scheme (subspace iteration)", + "bbox": [ + 105, + 428, + 487, + 442 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "1: Initialize solution vector $\\mathbb{U}^{[x_1][0]}$ ..., $\\mathbb{U}^{[x_D][0]}$ with random values and compute $\\mathbb{K}^{[x_1][0]}$ , and $\\mathbb{Q}^{[x_1][0]}$", + "bbox": [ + 112, + 444, + 781, + 460 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2: for iter = 0 to iter_max do", + "3: for $d = 1$ to D do", + "4: Update iteration number $\\mathcal{K} = iter\\times D + d$", + "5: Solve TD linear system $\\mathbb{K}^{[x_d][\\mathcal{K} - 1]}\\mathbb{U}^{[x_d][\\mathcal{K}]} = \\mathbb{Q}^{[x_d][\\mathcal{K} - 1]}$", + "6: Update matrices $\\mathbb{K}^{[x_{d + 1}][\\mathcal{K}]}$ and force vector $\\mathbb{Q}^{[x_{d + 1}][\\mathcal{K}]}$", + "7: end for", + "8: Check convergence", + "9: end for" + ], + "bbox": [ + 112, + 444, + 546, + 571 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To illustrate the details of the subspace iteration algorithm, we consider the $\\mathcal{K}$ -th subspace iteration (which is on spatial variable $x$ ). Here, we assume that the parametric and temporal solutions have been updated from the previous $(\\mathcal{K} - 1)$ -th iteration, leaving the spatial solution as unknown to be solved in $\\mathcal{K}$ -th iteration. Moreover, instead of the full variation form of the test function as in Eq. 42, we only consider the subspace $x$ of the test function by setting the parametric and temporal variational parts as 0. As a result, we have:", + "bbox": [ + 100, + 590, + 892, + 662 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {K} ^ {[ x ] [ \\mathcal {K} - 1 ]} \\left(\\mathbb {U} ^ {[ k ] [ \\mathcal {K} - 1 ]}, \\mathbb {U} ^ {[ t ] [ \\mathcal {K} - 1 ]}\\right) \\mathbb {U} ^ {[ x ] [ \\mathcal {K} ]} = \\mathbb {Q} ^ {[ x ] [ \\mathcal {K} - 1 ]} \\left(\\mathbb {U} ^ {[ k ] [ \\mathcal {K} - 1 ]}, \\mathbb {U} ^ {[ t ] [ \\mathcal {K} - 1 ]}\\right) \\tag {43}\n$$\n", + "text_format": "latex", + "bbox": [ + 265, + 673, + 890, + 694 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "which is a linear system of equations with unknown $\\mathbb{U}^{[x][\\mathcal{K}]}$ . This is a general Sylvester equation which can be solved using many efficient solution schemes [30, 31]. In this paper, sparse direct solvers based on fast diagonalization/complex Schur decomposition methods are adopted [32]. The computational complexity of the sparse direct solver is $O(M^3 + M^2 n_d + C_c(n_d))$ for the $d$ -th dimension subspace iteration, where $M$ is the total number of modes; $n_d$ is the number of grid points for $d$ -th dimension; $C_c(n_d)$ refers to the computational cost of the banded sparse mass/stiffness matrix for $d$ -th dimension with a shape of $(n_d \\times n_d)$ .", + "bbox": [ + 100, + 700, + 892, + 785 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Once $\\mathbb{U}^{[x][\\mathcal{K}]}$ is obtained, we then update matrix $\\mathbb{K}^{[k][\\mathcal{K}]}(\\mathbb{U}^{[x][\\mathcal{K}]},\\mathbb{U}^{[t][\\mathcal{K}]})$ and forcing vector $\\mathbb{Q}^{[k][\\mathcal{K}]}(\\mathbb{U}^{[x][\\mathcal{K}]},\\mathbb{U}^{[t][\\mathcal{K}]})$ . In the next iteration (for dimension $k$ ), we treat $\\mathbb{U}^{[k][\\mathcal{K} + 1]}$ as the only unknown. Subspace iteration will continue unless the relative change of all solution matrices (for example, $L_{2}$ norm) is within the tolerance.", + "bbox": [ + 100, + 784, + 892, + 828 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 489, + 878, + 507, + 889 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2.7. Error estimates of TAPS", + "text_level": 1, + "bbox": [ + 102, + 131, + 307, + 145 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Since the TAPS solution is based on the C-HiDeNN-TD approximation and the generalized Galerkin formulation, we can have the following theoretical results on the error bounds, as demonstrated in our previous work on C-HiDeNN [6]:", + "bbox": [ + 102, + 149, + 892, + 191 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| u ^ {\\mathrm {C} - \\mathrm {H i D e N N}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\leq \\left\\| u ^ {\\mathrm {T A P S}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\leq \\left\\| u ^ {\\mathrm {F E M}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\tag {44}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 191, + 890, + 206 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\| \\cdot \\|_E$ denotes the energy norm, $u^{\\mathrm{ex}}$ denotes the exact solution, $u^{\\mathrm{C - HiDeNN}}$ denotes the solution obtained by the full C-HiDeNN method without tensor decomposition, $u^{\\mathrm{TAPS}}$ denotes the TAPS solution with a sufficient number of modes, $u^{\\mathrm{FEM}}$ denotes the FEM solution. The proof of the above results is based on the fact that the full C-HiDeNN approximation can provide a larger function space and therefore more accurate solutions than conventional FEM [6]. The subspace iteration can be considered as a local (directional) version of the Galerkin formulation and is expected to enable an optimized solution for the tensor decomposition that will converge to the Galerkin-based full C-HiDeNN method.", + "bbox": [ + 102, + 214, + 892, + 313 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3. Results", + "text_level": 1, + "bbox": [ + 102, + 335, + 181, + 348 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3.1. Convergence study for moving heat source", + "text_level": 1, + "bbox": [ + 102, + 360, + 426, + 375 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this section, we first analyze the convergence of the TAPS solver for a space-time (S-T) transient heat transfer problem. A single NVIDIA RTX A6000 GPU is used for all the following analyses. In Eq. 18, we let $\\rho c_{p} = 1$ , $k = 1$ , and replace the heat source as shown in Eq. 45. In this example, we have the spatial variable $x_{s} = (x,y,z)$ and the temporal variable $x_{t} = t$ .", + "bbox": [ + 102, + 378, + 892, + 434 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} f \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = 2 (1 - 2 y ^ {2}) \\left(1 - e ^ {- 1 5 t}\\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ + 2 (1 - 2 (1 0 0 t - x - 5) ^ {2}) \\left(1 - e ^ {- 1 5 t}\\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} + (1 \\tag {45} \\\\ - e ^ {- 1 5 t}) (2 0 0 x + 1 0 0 0 - 2 0 0 0 0 t) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ - 1 5 e ^ {- 1 5 t} e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 265, + 445, + 890, + 521 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The analytical solution to the PDE is inherently non-separable.", + "bbox": [ + 102, + 531, + 529, + 545 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = (1 - e ^ {- 1 5 t}) e ^ {- y ^ {2} - (x - 1 0 0 t - 5) ^ {2}} \\tag {46}\n$$\n", + "text_format": "latex", + "bbox": [ + 373, + 556, + 890, + 573 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The initial and boundary conditions are:", + "bbox": [ + 102, + 585, + 376, + 598 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nu \\left(x _ {s}, 0\\right) = 0,\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 612, + 490, + 624 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\left. u \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\right| _ {\\partial \\Omega} = \\left. u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\right| _ {\\partial \\Omega}. \\tag {47}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 623, + 890, + 646 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The relative $L_{2}$ norm error is defined as:", + "bbox": [ + 102, + 651, + 376, + 664 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\epsilon_ {L _ {2}} = \\frac {\\| u ^ {T D} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) - u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) \\| _ {L _ {2} \\left(\\Omega_ {\\boldsymbol {x} _ {s}} \\otimes \\Omega_ {x _ {t}}\\right)}}{\\| u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) \\| _ {L _ {2} \\left(\\Omega_ {\\boldsymbol {x} _ {s}} \\otimes \\Omega_ {x _ {t}}\\right)}} \\tag {48}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 675, + 890, + 709 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "First, we investigate the influence of the number of subspace iterations. As shown in Fig. 8(a), 3 iterations are enough to obtain an accurate result. Next, we investigate the convergence in terms of the number of modes. Here we compare the relative $L_{2}$ norm error for both TAPS and proper generalized decomposition (PGD) methods [17, 18]. To this aim, we use the same discretization for the space-time domain with each dimension discretized by 100 grid points, the same reproducing polynomial order $p = 1$ and convolution patch size $s = 1$ . As can be seen from Fig. 8(b), TAPS requires a much smaller number of modes than PGD. For TAPS, when the number of modes equals 25, the relative $L_{2}$ norm error decreases to $2.5 \\times 10^{-3}$ . The total solution time is 15.2 s. However, PGD requires 1,000 modes which takes 60.6 s solution time to reach the same level of accuracy. This is because the test function space in PGD is a subspace of TAPS [29]. Furthermore, the modal decomposition obtained from PGD is not optimal and thus requires a larger storage requirement due to the increased number of modes.", + "bbox": [ + 102, + 718, + 892, + 860 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 489, + 879, + 507, + 889 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/71b70cc86e81f29f17717ebc5e990a233cea4fd792c202ed7d02aa919c3be394.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 183, + 133, + 505, + 281 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/fc1a326bac6804be45fa1c9b3cf27c427a62e03aa83dfcf816fe3cdb17e70be1.jpg", + "image_caption": [ + "(b)", + "Figure 8: Relative L2 norm error with respect to (a) the number of subspace iterations (b) the number of modes" + ], + "image_footnote": [], + "bbox": [ + 512, + 131, + 810, + 284 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The spatial and temporal convergence are also studied. In Fig. 9(a), the number of temporal nodes is fixed as 500, and the spatial mesh is refined. It shows the relative $L_{2}$ norm error with respect to the number of nodes along each spatial dimension. As can be readily seen from the figure, larger patch size $s$ leads to smaller error given the same reproducing polynomial orders $p$ . Moreover, we can adjust $p$ to control the spatial convergence rate. Similarly, Fig. 9(b) demonstrates the convergence rate in the temporal domain where we fix the spatial discretization as 500 along each spatial dimension. By adjusting $s$ and $p$ , we can obtain different temporal convergence rates.", + "bbox": [ + 100, + 347, + 892, + 432 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Finally, we refine the spatial and temporal mesh simultaneously and study the spatio-temporal convergence rate in Fig. 9(c). As can be observed from the figure, higher reproducing polynomial order $p$ will lead to a higher-order convergence rate.", + "bbox": [ + 100, + 432, + 892, + 475 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/2345a49f326f69ec67725fc081645d1b37d8869821b2514f92f47f0a0b9cc7e8.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 110, + 489, + 381, + 645 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/aa5c83a28c6329ef2d94f536ced88460656363be508afd476967c069343f0768.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 386, + 489, + 620, + 646 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/d1d0eb95867b86e555e3a95df3d5b5a14cd34d608385d9234dcdbecea9446986.jpg", + "image_caption": [ + "(c)", + "Figure 9: Relative $L_{2}$ norm error with respect to the number of grid points (a) spatial convergence (b) temporal convergence (c) spatio-temporal convergence" + ], + "image_footnote": [], + "bbox": [ + 623, + 489, + 884, + 645 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "3.2. Convergence study of $S-P-T$ problems up to equivalent zetta-scale (10 $^{21}$ ) full models", + "text_level": 1, + "bbox": [ + 102, + 720, + 705, + 734 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this example, we study the convergence of the TAPS solver for the time-dependent parametric heat transfer problem in a S-P-T setting. In Eq. 18, we adopt the heat source as shown in Eq. 49. In this example, we have spatial variable $\\boldsymbol{x}_s = (x,y,z)$ , parametric variable $\\boldsymbol{x}_p = (k,P,\\rho ,c_p)$ and temporal variable $x_{t} = t$ .", + "bbox": [ + 100, + 736, + 892, + 778 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} f \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = 1 5 \\rho^ {2} c _ {p} ^ {2} k p e ^ {- 1 5 k t} e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\\\ + 5 0 \\rho c _ {p} k p \\left(1 - e ^ {- 1 5 k t}\\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\left[ \\left(1 - 5 0 x ^ {2}\\right) + \\left(1 - 5 0 y ^ {2}\\right) \\right] \\tag {49} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 788, + 890, + 831 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The analytical solution to the PDE is inherently non-separable.", + "bbox": [ + 102, + 832, + 529, + 847 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nu ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\rho c _ {p} P \\left(1 - e ^ {- 1 5 k t}\\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\tag {50}\n$$\n", + "text_format": "latex", + "bbox": [ + 342, + 853, + 890, + 872 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 878, + 507, + 889 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The initial and boundary conditions are:", + "bbox": [ + 102, + 131, + 376, + 146 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nu \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, 0\\right) = 0\n$$\n", + "text_format": "latex", + "bbox": [ + 376, + 156, + 489, + 172 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nu \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) | _ {\\partial \\Omega} = u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) | _ {\\partial \\Omega}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 175, + 616, + 195 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The relative $L_{2}$ norm error is defined as:", + "bbox": [ + 102, + 200, + 379, + 212 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\epsilon_ {L _ {2}} = \\frac {\\left\\| u ^ {T D} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) - u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) \\right\\| _ {L _ {2} \\left(\\Omega_ {x _ {s}} \\otimes \\Omega_ {x _ {p}} \\otimes \\Omega_ {x _ {t}}\\right)}}{\\left\\| u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) \\right\\| _ {L _ {2} \\left(\\Omega_ {x _ {s}} \\otimes \\Omega_ {x _ {p}} \\otimes \\Omega_ {x _ {t}}\\right)}} \\tag {51}\n$$\n", + "text_format": "latex", + "bbox": [ + 317, + 222, + 892, + 260 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To study the convergence of TAPS for S-P-T problems, the number of grid points is refined simultaneously in each dimension and corresponding relative $L_{2}$ norm errors are computed as shown in Fig. 10. When the number of grid points in each dimension is 450, the equivalent DoFs of a full model achieves $450^{8} = 1.68 \\times 10^{21}$ . Consequently, it is equivalent to a zetta-scale $(10^{21})$ full problem. As can be seen from the figure, a larger patch size $s$ leads to a smaller error and faster convergence. A higher reproducing polynomial order $p$ also leads to a higher convergence rate. It can be noticed that the convergence rate for $p = 3$ case is smaller than expected $p + 1 = 4$ . This is attributed to the fact that the S-P-T mesh is not fine enough. However, due to the rounding error in computing the relative $L_{2}$ norm error, we can only accurately compute the error up to 450 grid points per dimension.", + "bbox": [ + 100, + 268, + 892, + 382 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7fb0c377642dbc2df6a083a06da8df232e8bdb8c9587d121c766dfaf94864ad3.jpg", + "image_caption": [ + "Figure 10: Relative $L_{2}$ norm error with respect to the number of grid points in each dimension" + ], + "image_footnote": [], + "bbox": [ + 344, + 395, + 655, + 596 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In summary, we have the flexibility to choose different $s$ and $p$ to control the accuracy of TAPS by directly solving the S-P-T PDE. This is different from other data-driven modeling approaches (for instance, neural networks-based data-driven methods) in two notable ways. First, unlike a black-box neural network interpolator where the accuracy of the model is not guaranteed, our method is built upon the AI-enhanced finite element method, and we can control the convergence rate by choosing suitable hyperparameters $s$ and $p$ . Second, unlike most data-driven reduced-order models for physical problems, our method directly solves the governing PDE by plugging in the TD interpolation without seeing any training data. As a result, we can avoid the most expensive offline data generation stage as opposed to data-driven methods.", + "bbox": [ + 100, + 636, + 894, + 749 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3.3. Moving source with solution dependent material parameters", + "text_level": 1, + "bbox": [ + 102, + 763, + 547, + 778 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In this section, we model moving heat sources using temperature-dependent material parameters. The solution scheme of this problem is provided in detail in Appendix A. Figure 11(a) illustrates a typical representation of temperature-dependent heat conductivity and capacity for Inconel 718 [33].", + "bbox": [ + 100, + 781, + 894, + 824 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Since the temperature dependency of $k(u)$ and $\\rho c_{p}(u)$ can be approximated using a linear relationship. As a result, we can directly rewrite $k(u(\\boldsymbol{x}_s,x_t))$ and $\\rho c_{p}(u(\\boldsymbol{x}_{s},x_{t}))$ in the TD format.", + "bbox": [ + 102, + 824, + 892, + 853 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 489, + 878, + 509, + 889 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5e704b162ebdd448550d9186205e95bdb2ff6e27cf2d3fe5c63a122256540197.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 186, + 143, + 500, + 286 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/716c83297ce005647d99e5f23cc46e6e3024853b336292259b2f47b9f8a009d8.jpg", + "image_caption": [ + "(b)", + "Figure 11: (a) Temperature dependent material properties for Inconel 718 [33] (b) Schematic of numerical simulation, where the solution along the center line is compared for FEM and TAPS." + ], + "image_footnote": [], + "bbox": [ + 526, + 177, + 811, + 280 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nk \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\approx \\sum_ {m = 1} ^ {M} m _ {k} u _ {x _ {1}} ^ {(m)} \\left(x _ {1}\\right) u _ {x _ {2}} ^ {(m)} \\left(x _ {2}\\right) u _ {x _ {3}} ^ {(m)} \\left(x _ {3}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + n _ {k}\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 387, + 673, + 425 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\rho c _ {p} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\approx \\sum_ {m = 1} ^ {M} m _ {c _ {p}} u _ {x _ {1}} ^ {(m)} \\left(x _ {1}\\right) u _ {x _ {2}} ^ {(m)} \\left(x _ {2}\\right) u _ {x _ {3}} ^ {(m)} \\left(x _ {3}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + n _ {c _ {p}} \\tag {52}\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 428, + 892, + 464 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $M$ is the decomposition modes of the TAPS solution; $m_{k} = 1.52 \\times 10^{-5} \\mathrm{~W} / (\\mathrm{mmK}^{2})$ ; $n_{k} = 5.29 \\times 10^{-3} \\mathrm{~W} / (\\mathrm{mmK})$ ; $m_{c_{p}} = 6.11 \\times 10^{-7} \\mathrm{~mm}^{-3} \\mathrm{~K}^{-2}$ ; $n_{c p} = 3.25 \\times 10^{-3} \\mathrm{~mm}^{-3} \\mathrm{~K}^{-1}$", + "bbox": [ + 102, + 476, + 892, + 506 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The problem setup is shown in Fig. 11 (b). The spatial domain size is $10\\mathrm{mm} \\times 10\\mathrm{mm} \\times 1\\mathrm{mm}$ where homogeneous Dirichlet boundary conditions are assumed for the left and right surfaces; homogeneous Neumann boundary conditions are applied to all other surfaces. As shown in Eq. 19, a moving Gaussian source term $f(\\boldsymbol{x}_s, t)$ is applied as a volumetric source term with a radius $r = 0.5\\mathrm{mm}$ and moving velocity $500\\mathrm{mm/s}$ . The diameter is discretized using 10 spatial elements.", + "bbox": [ + 102, + 506, + 892, + 575 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Since there is no analytical solution available to this problem, we use implicit finite element analysis as the baseline for validation. JAX-FEM [34] is used to generate the nonlinear FEM solution. For ease of comparison, we use the same time increment as $1.60 \\times 10^{-4}$ sec for both TAPS and FEM. The solution along the center line, as shown in Fig. 11 (b) is compared. As can be seen from Fig. 12, the result of the nonlinear TAPS solver agrees well with FEM.", + "bbox": [ + 102, + 576, + 892, + 634 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "3.4. Simulation of LPBF process", + "text_level": 1, + "bbox": [ + 102, + 646, + 331, + 661 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we use TAPS to efficiently model the laser powder bed fusion process (LPBF) in additive manufacturing. Here we only consider the free convection term in the Neumann boundary condition. The initial condition can be considered by splitting the total solution as a summation of the homogeneous part and the inhomogeneous part.", + "bbox": [ + 100, + 665, + 892, + 709 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nu \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = u _ {0} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) + u _ {\\text {i n i t}} \\left(\\boldsymbol {x} _ {s}\\right) \\tag {53}\n$$\n", + "text_format": "latex", + "bbox": [ + 391, + 722, + 890, + 738 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "As a result, $u_{0}(\\pmb{x}_{s}, x_{t})$ is subject to homogeneous initial conditions. In this section, we assume Ti-6Al-4V is used as the powder bed materials. The detailed material parameters can be found in Table 4.", + "bbox": [ + 102, + 744, + 892, + 772 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "3.4.1. Single-track simulation", + "text_level": 1, + "bbox": [ + 102, + 785, + 310, + 800 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this example, we investigate the computational complexity numerically for single-track LPBF simulation with a single S-T slab, as shown in Fig. 13 (a). A single NVIDIA RTX A6000 GPU is used for all the following analyses. To ensure accuracy, the number of modes is adopted as 5 times larger than the number of time steps in the following examples. In the first case, within the S-T slab, the spatial mesh is refined uniformly along each spatial dimension", + "bbox": [ + 100, + 801, + 892, + 859 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 489, + 878, + 507, + 889 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/3d5930d344ad41e50bbdf9aa4fa400ae67373da8806b2d086f22076597db5e79.jpg", + "image_caption": [ + "Figure 12: Comparison of Nonlinear TAPS solution versus finite element solution at different times." + ], + "image_footnote": [], + "bbox": [ + 339, + 130, + 648, + 336 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/e26a208925086b40e365433211679472b0588b4faef6f759f0fae6134d0dc341.jpg", + "table_caption": [ + "Table 4: Parameters used in the simulation" + ], + "table_footnote": [], + "table_body": "
ParameterVariableValueUnits
Thermal conductivityk22.0W m-1K-1
Densityρ4.27g cm-3
Specific heat capacitycp745J kg-1K-1
Ambient temperatureT0298.15K
Heat convection coefficienthconv14.73W m-2K-1
", + "bbox": [ + 105, + 397, + 892, + 486 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "while fixing the number of temporal grid points. The computational time for each subspace iteration is plotted in Fig. 13 (b). It can be seen that TAPS has a linear growth of computational complexity when refining the spatial mesh.", + "bbox": [ + 102, + 508, + 890, + 538 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Similarly, we only refine the temporal mesh while fixing the spatial mesh in the second case and plot the computational time for each subspace iteration as in Fig. 13 (c). It can be readily observed that refining the temporal mesh has a much higher computational complexity than refining the spatial mesh. This is because increasing temporal elements will also lead to an increased number of modes $M$ . As mentioned before, the computational cost for the sparse direct solver employed is $O(M^3 + M^2 n_d + C_c(n_d))$ for the $d$ -th dimension subproblem, where $M$ represents total number of modes; $n_d$ refers to the total number of grid points in $d$ -th dimension; $C_c(n_d)$ refers to the computational cost of a banded sparse matrix with a shape of $(n_d \\times n_d)$ . Therefore, the increased number of modes leads to a cubic growth in computational time.", + "bbox": [ + 100, + 538, + 892, + 650 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/a855b7b5af10595c3b507ce75c396e8ed53ba715f08759f5c0f86d5d5ce978ec.jpg", + "table_caption": [ + "Table 5: Parameters used in the single-track simulation" + ], + "table_footnote": [], + "table_body": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Mesh sizeh5μm
", + "bbox": [ + 105, + 684, + 892, + 829 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 878, + 507, + 889 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/06d93feedd76d1074ae325710d3e3e673d487dd19e7ca7144df3fe686afb12f4.jpg", + "image_caption": [ + "Figure 13: (a) Single-track simulation. (b) Computational time of subspace iteration in refining the spatial mesh: linear growth. (c) Computational time of subspace iteration in refining the temporal mesh in a single space-time slab: cubic growth due to the increased number of modes" + ], + "image_footnote": [], + "bbox": [ + 147, + 133, + 329, + 281 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/4f647f9527a5dd106e3c64c9d9a5f2ac9e311427cc75722f1e8cd157bd35a51d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 346, + 131, + 601, + 284 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/fa2ca97b6f3903dd29bd21860b65642768d7856f00c975369e0e7129f9aa8323.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 603, + 130, + 847, + 282 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "3.4.2. Multi-track simulation", + "text_level": 1, + "bbox": [ + 102, + 344, + 307, + 357 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A major challenge in simulating multiple tracks in LPBF is the substantial number of time steps needed. To circumvent the cubic growth associated with the increasing number of temporal grid points and modes for moving source problems, we can leverage multiple S-T slabs to break down the original problem with a large number of time steps into smaller slabs. Consequently, this method keeps the total number of modes required in each slab beneath a reasonable threshold, thereby optimizing computational efficiency. The detailed algorithm of simulating multiple space-time (S-T) slabs for LPBF process is shown in Appendix B. Using this method, we first simulate a multi-track LPBF problem and analyze how the total number of slabs influences computational cost. The detailed setup can be found in Table 6. Note that we only simulate the printing process of the final layer in this section.", + "bbox": [ + 100, + 359, + 892, + 473 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/917627eb0b2157a4d53686719880b45a818bf05e0967a467fc9e17abc0ca611b.jpg", + "table_caption": [ + "Table 6: Parameters used in the multi-track simulation" + ], + "table_footnote": [], + "table_body": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Hatch space sizehs50μm
Mesh sizeh5μm
", + "bbox": [ + 105, + 505, + 892, + 665 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We use different numbers of temporal grids within each S-T slab and compare the computation cost, as shown in Fig. 14. As can be seen from the figure, when each space-time slab contains around 20 temporal grid points, the computational efficiency is optimal. Hence, choosing the optimal number of temporal elements inside each space-time slab is crucial for the overall performance of the TAPS solver for modeling LPBF process. We will adopt 20 temporal grid points per S-T slab as the default for the following multi-track LPBF simulations.", + "bbox": [ + 100, + 678, + 892, + 749 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Next, we compare the performance of TAPS versus the classical explicit finite difference method. To this aim, we use a GPU-accelerated and optimized finite difference code, GAMMA, to model the LPBF process [28]. In this example, we increase the size of the domain while maintaining all other process parameters, as shown in Table 6. The corresponding computation time, GPU memory usage, and required data storage space are plotted in Fig. 15.", + "bbox": [ + 100, + 750, + 892, + 806 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Fig. 15(a) highlights the significant speed advantage of TAPS over GAMMA, especially as the size of the simulation domain increases. GAMMA only can simulate powder bed size up to $4.5^{3}\\mathrm{mm}^{3}$ since the GPU memory can only handle up to $7.31\\times 10^{8}$ spatial DoFs. For the $4.5^{3}\\mathrm{mm}^{3}$ case, TAPS is 85 times faster than GAMMA. On the other hand, TAPS is able to model $100^{3}\\mathrm{mm}^{3}$ powder bed, with its speed benefits becoming even more evident for larger", + "bbox": [ + 100, + 807, + 892, + 863 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 489, + 878, + 507, + 889 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/cc72b3743dea1acce6b7f995d1ed32b295f09a088f75ac84fbe7696ee90234f1.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 226, + 130, + 480, + 304 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/13672870ba3d3e65ac32f7b556569b2691beeea4c17ceda38811f12e48a4d9bc.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 497, + 142, + 774, + 305 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/1e0837c8a03c5dd7bb6ad48fde26cc3fea68510e35b307ac270e07be256a353a.jpg", + "image_caption": [ + "Figure 14: (a) Multi-track simulation. (b) Influence of number of temporal grid points in each S-T slab on the computational cost.", + "(a)", + "Figure 15: Performance comparison of TAPS and GAMMA for powder bed with different sizes in terms of (a) computational time (b) GPU memory requirement (c) Data storage requirement for each time increment" + ], + "image_footnote": [], + "bbox": [ + 105, + 367, + 364, + 537 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/f9ccd59eb06fa06952a41ddbeabb1229514b05ff8f3cebdcc5d4513deee979da.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 364, + 367, + 615, + 537 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b1257e053086d2dd69596cd534767d87c78a5bcb112c31e469f474d08790a53b.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 618, + 367, + 889, + 536 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "domains. Fig. 15(b) compares the memory requirements, where GAMMA experiences fast growth due to the cubic scaling of total spatial DoFs. In contrast, TAPS benefits from TD, requiring significantly less memory. TAPS uses 13 times smaller GPU memory compared to GAMMA for the $4.5^{3}\\mathrm{mm}^{3}$ case. Additionally, TAPS can efficiently manage GPU memory usage for larger powder bed simulations by adopting different numbers of temporal grids in each S-T slab. Finally, Fig. 15(c) compares data storage needs where GAMMA's storage requirements grow cubically, whereas TAPS maintains a linear growth pattern. For the $4.5^{3}\\mathrm{mm}^{3}$ case, the data storage of GAMMA is 2,700 times larger than TAPS.", + "bbox": [ + 100, + 609, + 892, + 708 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "3.4.3. Large-scale multi-layer multi-track LPBF simulation", + "text_level": 1, + "bbox": [ + 102, + 722, + 509, + 737 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In this section, the proposed method is used to simulate a large-scale multi-layer multi-track LPBF process. Element birth is used to model newly added layers in the process. Details on element birth can be found in Appendix C. As shown in Fig. 16 (a), the run scenario is the production of a $10\\mathrm{mm}$ cube within a $12\\mathrm{mm}$ powder bed domain. The base plate height is $2\\mathrm{mm}$ . The tool path follows the pattern shown on the top surface. Material parameters are taken from Ti-6Al-4V [35]. The detailed parameters for the simulation setup are shown in Table 7.", + "bbox": [ + 100, + 739, + 892, + 809 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To showcase the capabilities of our approach using TAPS, we employ a fine spatial mesh for the simulation. The spatial element size is $10 \\times 10 \\times 5\\mu m^3$ . In classical numerical algorithms, this corresponds to $3.46 \\times 10^{9}$ spatial DoFs, which is unmanageable for typical workstations due to the prohibitive RAM requirements.", + "bbox": [ + 100, + 810, + 892, + 852 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The simulation result is shown in Fig. 16 (b), where the temperature of the last layer is plotted. In total, it costs 60.7", + "bbox": [ + 127, + 852, + 892, + 866 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 487, + 878, + 507, + 889 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/0f42907a3d6c93d278eb81dc581820a0260002f7e28abedb7a27b81e2d7be42a.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 233, + 147, + 480, + 297 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/6dccaac9c3bd5c80948b57cdbca225315a06fe5add804187394c7973ca6c6a07.jpg", + "image_caption": [ + "(b)", + "Figure 16: (a) Problem statement: LPBF simulation. (b) Temperature solution for printing the final layer" + ], + "image_footnote": [], + "bbox": [ + 500, + 140, + 769, + 297 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/65318179eddcce82c105fefcf6d4e4fb81f40bde17ec3c84afebf28ebb9869b3.jpg", + "table_caption": [ + "Table 7: Parameters used in the large-scale LPBF simulation" + ], + "table_footnote": [], + "table_body": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr100μm
Laser scan speedV500mm s-1
Absorptivityη0.251
Laser penetration depthd50μm
Layer thicknesshl50μm
Hatch space sizehs200μm
", + "bbox": [ + 105, + 382, + 892, + 500 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "hrs to run the simulation. The maximum GPU memory usage is 8.11 GB. The final solution vector size is 1.35 GB. As a comparison, it's estimated that GAMMA will solve the same problem with the same spatial resolution in 3,485 days, with at least 120 GB GPU memory usage and 1.26 TB storage space to store the solution [35]. Consequently, TAPS achieves around 1,370 X speedup, 14.8 X memory footprint savings, and 955 X storage gain compared to the finite difference method.", + "bbox": [ + 100, + 523, + 892, + 595 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "4. Discussion", + "text_level": 1, + "bbox": [ + 102, + 615, + 205, + 629 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In the previous sections, we have shown that TAPS tackles two drawbacks of data-driven surrogate modeling approaches which use offline data generated through direct numerical simulation (DNS). Firstly, the proposed TAPS is data-free, which means that it does not require any training data. This is of crucial importance for applications that require ultra high-resolution simulations because offline training data generation can be extremely costly. Our method circumvents expensive offline DNS data generation by directly solving the governing equation. Secondly, TAPS enables solving ultra large-scale problems with significant speedup, minimal memory requirement, and substantial storage gain as compared to standard DNS techniques.", + "bbox": [ + 100, + 640, + 894, + 739 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The computational speed of the current method can be further improved with the state-of-the-art high-performance numerical solvers and parallel computing on multiple GPUs. Right now, the TAPS linear systems of equations are solved on CPUs which results in additional overhead. With more sparse direct solvers/iterative schemes becoming available on GPU, we expect a further speedup of the current program. Moreover, parallel computing using multiple GPUs can be achieved using Message Passing Interface (MPI) [36]. For ultra large-scale analysis where each dimension contains millions of nodes, an efficient iterative solver with a suitable preconditioner needs to be developed.", + "bbox": [ + 100, + 740, + 894, + 825 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Variational multiscale methods can be used to further extend the capabilities of the current method to tackle zettascale space-time problems [37, 35]. Moreover, one major computational cost for the current method originates from the increased number of decomposition modes for a large number of time steps. This can be avoided by leveraging", + "bbox": [ + 100, + 825, + 894, + 869 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 487, + 878, + 507, + 889 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "coordinate transformation techniques where the moving source can be transformed into a fixed one. As a result, we expect to greatly improve the computational performance of the current method. Irregular geometry can also be considered using immersed finite element techniques or the Solid Isotropic Material with Penalization (SIMP) method in topology optimization [20, 38, 39, 40, 41].", + "bbox": [ + 100, + 131, + 892, + 189 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 102, + 210, + 211, + 223 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In this paper, we propose TAPS as a data-free predictive scientific AI model to simulate ultra large-scale physical problems. This method eliminates the traditional necessity for offline training data generation, thereby exhibiting substantial speedup, memory efficiency, and storage gain as opposed to data-driven methods, making previously unsolvable large-scale and high-dimensional problems manageable. The convergence of the TAPS solver is numerically investigated. As a demonstration of the capabilities of TAPS, we showcase the application of the TAPS solver for a multi-layer multi-track additive manufacturing problem that is intractable with classical numerical algorithms.", + "bbox": [ + 100, + 235, + 892, + 319 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "TAPS is well suited for a broad range of science or engineering problems where: 1) the finite element method and other conventional numerical methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained. The INN hierarchical neural network interpolants, particularly C-HiDeNN used by TAPS, demonstrate superior performance compared to other machine learning models. For the solving tasks, it has shown superior performance compared to physics-informed neural network (PINN) [7], CP-PINN [42], and Kolmogorov-Arnold Networks (KAN) [43] with orders of magnitude faster solution time, higher accuracy, and better scalability to ultra large-scale and high-dimensional PDEs [44]. INN interpolants can also be effectively used in data-driven training tasks and show better training accuracy compared to MLP, SIREN [45] and KAN [11, 44].", + "bbox": [ + 100, + 319, + 892, + 447 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "As illustrated in Fig. 17, the significance of this work in the area of predictive scientific AI models aligns with the trend in other areas in AI, such as language and vision AI models. The evolution of language models has seen dramatic growth, beginning with foundational models like BERT [46], followed by the GPT series [47], which expanded transformer architecture to hundreds of billions of parameters, showcasing powerful generative capabilities. In vision models, AlexNet [48] marked a breakthrough, while advancements like DIT-XL [49] and SORA [50] integrated diffusion models to handle more complex and challenging visual tasks. This trajectory of increasing scale and sophistication from its network architecture (i.e., transformer of language models and diffusion of vision models) is mirrored in predictive scientific AI where TAPS represents a significant advancement in its network architecture, INN.", + "bbox": [ + 100, + 448, + 892, + 574 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A major critical issue in the emerging large AI models is a more sophisticated model will generally lead to a larger amount of training data, more expensive training costs, and longer inference time. The advent of DeepSeek R1 breaks this rule since it has fewer parameters, much less training cost, faster inference speed, yet still comparable accuracy compared to other state-of-the-art models due to its novel architecture and training techniques such as distillation methods [51]. For predictive scientific AI, we face even more pronounced challenges due to strict accuracy demands and the necessity for high-resolution physics for large-scale problems. As a result, the future of predictive scientific AI is still largely untapped. TAPS provides a promising solution to these emerging challenges by delivering a highly accurate, exceptionally fast, and memory and storage efficient scientific AI model.", + "bbox": [ + 100, + 576, + 892, + 688 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In conclusion, the proposed TAPS computational framework offers substantial enhancements in computational efficiency, memory consumption, and storage demands for science and engineering simulations. As a result, TAPS paves a new path to address future challenges in ultra large-scale simulations pertinent to complex predictive scientific AI models.", + "bbox": [ + 100, + 689, + 892, + 746 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Appendix A. Solving nonlinear S-P-T PDEs: solution dependent material properties", + "text_level": 1, + "bbox": [ + 102, + 766, + 717, + 782 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The algorithm 1 works for linear PDEs where the PDE coefficients remain constant. However, in many engineering applications, the PDE coefficients can be solution-dependent. For instance, material properties such as heat conductivity and heat capacity can be a function of temperature in additive manufacturing. In these cases, the PDE becomes non-linear which requires an efficient solution scheme. In this section, we solely focus on the space-time problems formulation of a nonlinear PDE. As a result, the product of density and heat capacity $\\rho c_{p}(u)$ and conductivity", + "bbox": [ + 100, + 791, + 892, + 865 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 487, + 878, + 510, + 890 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/5ac20a39f47685ebb9378a0c4e12e417a603d78fe820a3ac37f08ecdb86a0b5e.jpg", + "image_caption": [ + "Figure 17: Evolution of AI models for different tasks" + ], + "image_footnote": [], + "bbox": [ + 226, + 131, + 774, + 363 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "$k(u)$ are no longer temperature independent as in Eq. 18. Similar to the linear problem shown before, the generalized Galerkin weak form is used to solve this equation.", + "bbox": [ + 100, + 416, + 894, + 445 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} \\delta u \\nabla_ {x _ {t}} \\left[ \\rho c _ {p} (u) u \\right] d \\Omega - \\int_ {\\Omega} \\nabla_ {x _ {s}} \\delta u \\cdot k (u) \\nabla_ {x _ {s}} u d \\Omega + \\int_ {\\partial \\Omega_ {x _ {s}} \\otimes \\Omega_ {t}} \\delta u \\boldsymbol {q} \\cdot \\boldsymbol {n} d s d \\Omega_ {x _ {t}} = \\int_ {\\Omega} \\delta u b d \\Omega \\tag {A.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 203, + 453, + 892, + 487 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where $\\mathbf{q}$ is the heat flux on the Neumann boundary. Since Eq. A.1 is a space-time integral, classical time-stepping based methods can't be directly used to update material parameters. Here we propose a global-local approach similar to the Large Time Increment (LATIN) method to effectively solve the above equations [52].", + "bbox": [ + 100, + 489, + 894, + 533 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/200be0b0946761ab17f9c2c59f91eed1c6bccd651b97031cd574a612f7e34b9a.jpg", + "image_caption": [ + "Figure A.18: Global-local approach for nonlinear TAPS solver" + ], + "image_footnote": [], + "bbox": [ + 379, + 546, + 613, + 700 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "As shown in Fig. A.18, we split the nonlinear problem into 2 stages, a linear global stage and a nonlinear local update stage. In the global stage, we assume the spatio-temporal $k(\\pmb{x}_s,x_t)$ and $\\rho c_{p}(\\pmb{x}_{s},x_{t})$ are known. As a result, we treat the global problem as a linear problem and obtain $u(x_{s},x_{t})$ using the previously proposed method for linear problems. After $u(\\pmb{x}_s,x_t)$ is updated in the global stage, we update $k(u)$ and $\\rho c_{p}(u)$ locally at each Gauss integration point according to material models $k(u)$ and $\\rho c_{p}(u)$ . We repeat the global-local iteration until the variation of $k(u)$ and $\\rho c_{p}(u)$ between consecutive iterations meets the convergence criteria. The algorithm is summarized in Algorithm 2:", + "bbox": [ + 100, + 740, + 894, + 827 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 487, + 878, + 507, + 889 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2 Nonlinear TAPS solution scheme: PDE with solution dependent coefficients" + ], + "code_body": "1: Initialize solution matrices with random values and update $\\rho_{c_p}(\\pmb{x}_s, x_t)$ and $k(\\pmb{x}_s, x_t)$ . \n2: for iter $_\\gamma$ = 1 to iter $_\\gamma_{max}$ do \n3: for iter = 1 to iter $_max$ do \n4: Update $\\rho_{c_p}(\\pmb{x}_s, x_t)$ and $k(\\pmb{x}_s, x_t)$ \n5: Use Algorithm 1 to solve solution $u(\\pmb{x}_s, x_t)$ \n6: for $i = 1$ to integration points do \n7: $\\rho_{c_p}(\\pmb{x}_s, x_t) = \\rho_{c_p}[u(\\pmb{x}_s, x_t)]$ \n8: $k(\\pmb{x}_s, x_t) = k[u(\\pmb{x}_s, x_t)]$ \n9: end for \n10: Check convergence \n11: end for \n12: end for", + "bbox": [ + 112, + 147, + 892, + 319 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Appendix B. Mode compression", + "text_level": 1, + "bbox": [ + 102, + 343, + 344, + 357 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "One significant challenge in multi-track simulation in LPBF is the huge number of time steps required. It is impossible to resolve all the time steps with only a single space-time (S-T) slab. Hence, we split the whole layer scan into multiple S-T slabs and relate each S-T slab using the following equation.", + "bbox": [ + 100, + 367, + 892, + 411 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n{ } ^ { [ \\mathcal { T } + 1 ] } u ( \\boldsymbol { x } _ { s } , x _ { t } ) = { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } , x _ { t } ^ { m a x } ) + { } ^ { [ \\mathcal { T } + 1 ] } u _ { 0 } ( \\boldsymbol { x } _ { s } , x _ { t } ) \\tag {B.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 417, + 892, + 435 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "where $[T + 1]u(\\pmb{x}_s, x_t)$ refers to the solution at $(\\mathcal{T} + 1)$ -th space-time slab; $[T + 1]u_0(\\pmb{x}_s, x_t)$ refers to the solution of the homogeneous initial value problem of $(\\mathcal{T} + 1)$ -th space-time slab; $[T]u(\\pmb{x}_s, x_t^{max})$ is the solution of $\\mathcal{T}$ -th space-time slab at the last time increment. As can be seen from Eq. B.1, we impose the last time increment solution of previous space-time slab as the initial condition for the next space-time slab. This is efficiently implemented by adding the TD form of the last increment as new modes in the current space-slab solution. However, for large-scale computations requiring thousands of slabs, directly concatenating modes can result in substantial storage demands.", + "bbox": [ + 100, + 444, + 892, + 529 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In mode compression, we aim to compress the number of modes for $\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})$ because of its spatial dependence and naturally low-dimensional structure. Consequently, it can be effectively decomposed using only a few modes. Denote the TD form of the last time step solution of the previous space-time slab as $\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})^{TD}$ , we aim to find a compact form that can be represented with much fewer number of modes $\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})_F^{TD}$ . For notation simplicity, we omit $x_{t}^{max}$ in the following equations. Consequently, the mode compression problem can be written as:", + "bbox": [ + 100, + 529, + 892, + 602 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n{ } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } ) _ { F } ^ { T D } - { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } ) ^ { T D } = 0 \\tag {B.2}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 608, + 890, + 627 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The weighted sum residual form is used to approximate $[T]u(\\pmb{x}_s,x_{t - 1})_F^{TD}$ ..", + "bbox": [ + 102, + 634, + 591, + 651 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega_ {x}} \\delta^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) _ {F} ^ {T D} \\cdot \\left[ ^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) _ {F} ^ {T D} - ^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) ^ {T D} \\right] d \\boldsymbol {x} _ {s} = 0 \\tag {B.3}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 659, + 892, + 690 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Eq. B.3 can be efficiently solved using Algorithm 1.", + "bbox": [ + 102, + 697, + 455, + 713 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Appendix C. Element birth", + "text_level": 1, + "bbox": [ + 102, + 732, + 310, + 747 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In the LPBF process, once the printing is finished for the current layer, a new layer of powder is deposited on top of the existing layer. This necessitates modeling the new layer with additional elements. Various studies have investigated different approaches for element birth techniques. While some researchers opt for activating small sections of geometry incrementally, others apply the technique by spreading the deposition across an entire layer or multiple layers simultaneously. The most widely adopted approach is to activate an entire layer and then scan the heat source over it [53].", + "bbox": [ + 100, + 756, + 892, + 841 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In TAPS, we propose a new scheme to generate new layers of elements. In this scheme, new elements are added only in the $x_{3}$ direction, since the plan dimension doesn't change in the printing process. Therefore, as opposed to", + "bbox": [ + 100, + 841, + 892, + 871 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 487, + 878, + 507, + 889 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "full-scale classical numerical methods, TAPS enables marginal overhead in generating new layers of elements with extra grid points added only in the $x_{3}$ dimension. The solution scheme for multi-layer multi-track LPBF simulation using TAPS can be summarized in Algorithm 3.", + "bbox": [ + 100, + 131, + 894, + 175 + ], + "page_idx": 24 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 3 Multi-layer multi-track LPBF simulation using TAPS" + ], + "code_body": "1: for $n_{layer} = 1$ to $n_{layerTotal}$ do \n2: Initialize solution matrices with random values for the new layer \n3: Compute the updated stiffness matrix and force vector for the $x_{3}$ direction \n4: for $n_{track} = 1$ to $n_{tracktotal}$ do \n5: for $iter = 1$ to $iter_{max}$ do \n6: for $d = 1$ to dimension do \n7: Compute solution vectors according to Algorithm 1 or 2 \n8: end for \n9: Check convergence \n10: end for \n11: Compress modes \n12: Concatenate compressed modes to previous tracks as new modes \n13: end for \n14: Compress modes \n15: Concatenate compressed modes to previous layers as new modes \n16: end for", + "bbox": [ + 112, + 205, + 655, + 432 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 104, + 469, + 186, + 483 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Zongyi Li, Nikola Kovachki, Kamyar Azizzadenesheli, Burigede Liu, Kaushik Bhattacharya, Andrew Stuart, and Anima Anandkumar. Fourier neural operator for parametric partial differential equations. arXiv preprint arXiv:2010.08895, 2020.", + "[2] Owen Huang, Sourav Saha, Jiachen Guo, and Wing Kam Liu. An introduction to kernel and operator learning methods for homogenization by self-consistent clustering analysis. Computational Mechanics, 72(1):195-219, 2023.", + "[3] Can AI Solve Science? https://writings.sthenwolfram.com/2024/03/can-ai-solve-science/. [Accessed 03-04-2025].", + "[4] A new golden age of discovery — deepmind.google. https://deepmind.google/public-policy/ai-for-science/. [Accessed 03-04-2025].", + "[5] Wing Kam Liu, Shaofan Li, and Harold S Park. Eighty years of the finite element method: Birth, evolution, and future. Archives of Computational Methods in Engineering, 29(6):4431-4453, 2022.", + "[6] Ye Lu, Hengyang Li, Lei Zhang, Chanwook Park, Satyajit Mojumder, Stefan Knapik, Zhongsheng Sang, Shaoqiang Tang, Daniel W Apley, Gregory J Wagner, et al. Convolution hierarchical deep-learning neural networks (c-hidenn): finite elements, isogeometric analysis, tensor decomposition, and beyond. Computational Mechanics, 72(2):333-362, 2023.", + "[7] Maziar Raissi, Paris Perdikaris, and George E Karniadakis. Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations. Journal of Computational physics, 378:686-707, 2019.", + "[8] Enrui Zhang, Ming Dao, George Em Karniadakis, and Subra Suresh. Analyses of internal structures and defects in materials using physics-informed neural networks. Science advances, 8(7):eabk0644, 2022.", + "[9] Nick McGreavy and Ammar Hakim. Weak baselines and reporting biases lead to overoptimism in machine learning for fluid-related partial differential equations. Nature Machine Intelligence, 6(10):1256-1269, 2024.", + "[10] Junwoo Cho, Seungtae Nam, Hyunmo Yang, Seok-Bae Yun, Youngjoon Hong, and Eunbyung Park. Separable physics-informed neural networks. Advances in Neural Information Processing Systems, 36, 2024.", + "[11] Chanwook Park, Sourav Saha, Jiachen Guo, Hantao Zhang, Xiaoyu Xie, Miguel A Bessa, Dong Qian, Wei Chen, Gregory J Wagner, Jian Cao, et al. Engineering software 2.0 by interpolating neural networks: unifying training, solving, and calibration. arXiv preprint arXiv:2404.10296, 2024.", + "[12] Lei Zhang, Lin Cheng, Hengyang Li, Jiaying Gao, Cheng Yu, Reno Domel, Yang Yang, Shaoqiang Tang, and Wing Kam Liu. Hierarchical deep-learning neural networks: finite elements and beyond. Computational Mechanics, 67:207-230, 2021.", + "[13] Lei Zhang, Ye Lu, Shaoqiang Tang, and Wing Kam Liu. Hidenn-td: reduced-order hierarchical deep learning neural networks. Computer Methods in Applied Mechanics and Engineering, 389:114414, 2022.", + "[14] Chanwook Park, Ye Lu, Sourav Saha, Tianju Xue, Jiachen Guo, Satyajit Mojumder, Daniel W Apley, Gregory J Wagner, and Wing Kam Liu. Convolution hierarchical deep-learning neural network (c-hidenn) with graphics processing unit (gpu) acceleration. Computational Mechanics, 72(2):383-409, 2023.", + "[15] Sourav Saha, Zhengtao Gan, Lin Cheng, Jiaying Gao, Orion L Kafka, Xiaoyu Xie, Hengyang Li, Mahsa Tajdari, H Alicia Kim, and Wing Kam Liu. Hierarchical deep learning neural network (hidenn): an artificial intelligence (ai) framework for computational science and engineering. Computer Methods in Applied Mechanics and Engineering, 373:113452, 2021." + ], + "bbox": [ + 105, + 493, + 894, + 865 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 487, + 878, + 507, + 889 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Yingjian Liu, Chanwook Park, Ye Lu, Satyajit Mojumder, Wing Kam Liu, and Dong Qian. Hidenn-fem: a seamless machine learning approach to nonlinear finite element analysis. Computational Mechanics, 72(1):173-194, 2023.", + "[17] Francisco Chinesta, Amine Ammar, Adrien Leygue, and Roland Keunings. An overview of the proper generalized decomposition with applications in computational rheology. Journal of Non-Newtonian Fluid Mechanics, 166(11):578-592, 2011.", + "[18] Francisco Chinesta, Roland Keunings, and Adrien Leygue. The proper generalized decomposition for advanced numerical simulations: a primer. Springer Science & Business Media, 2013.", + "[19] Anthony Nouy. A priori model reduction through proper generalized decomposition for solving time-dependent partial differential equations. Computer Methods in Applied Mechanics and Engineering, 199(23-24):1603-1626, 2010.", + "[20] Hengyang Li, Stefan Knapik, Yangfan Li, Chanwook Park, Jiachen Guo, Satyajit Mojumder, Ye Lu, Wei Chen, Daniel W Apley, and Wing Kam Liu. Convolution hierarchical deep-learning neural network tensor decomposition (c-hidenn-td) for high-resolution topology optimization. Computational Mechanics, 72(2):363-382, 2023.", + "[21] Ehsan Kharazmi, Zhongqiang Zhang, and George Em Karniadakis. hp-vpinns: Variational physics-informed neural networks with domain decomposition. Computer Methods in Applied Mechanics and Engineering, 374:113547, 2021.", + "[22] Thomas JR Hughes. The finite element method: linear static and dynamic finite element analysis. Courier Corporation, 2003.", + "[23] Tamara G Kolda and Brett W Bader. Tensor decompositions and applications. SIAM review, 51(3):455-500, 2009.", + "[24] Junuthula Narasimha Reddy. An introduction to the finite element method, volume 3. McGraw-Hill New York, 2005.", + "[25] Thomas JR Hughes and Gregory M Hulbert. Space-time finite element methods for elastodynamics: formulations and error estimates. Computer methods in applied mechanics and engineering, 66(3):339-363, 1988.", + "[26] Wing Kam Liu, Ted Belytschko, and A. Mani. Probabilistic finite elements for nonlinear structural dynamics. Computer Methods in Applied Mechanics and Engineering, 56(1):61-81, 1986.", + "[27] Wing Kam Liu, Ted Belytschko, and A. Mani. Random field finite elements. International Journal for Numerical Methods in Engineering, 23(3):1831-1845, 1986.", + "[28] Shuheng Liao, Ashkan Golgoon, Mojtaba Mozaffar, and Jian Cao. Efficientgpu-accelerated thermomechanical solver for residual stress prediction in additive manufacturing. Computational Mechanics, 71(5):879-893, 2023.", + "[29] Amine Ammar, Bechir Mokdad, Francisco Chinesta, and Roland Keunings. A new family of solvers for some classes of multidimensional partial differential equations encountered in kinetic theory modeling of complex fluids. Journal of non-Newtonian fluid Mechanics, 139(3): 153-176, 2006.", + "[30] Abderrahman Bouhamidi and Khalide Jbilou. A note on the numerical approximate solutions for generalized sylvester matrix equations with applications. Applied Mathematics and Computation, 206(2):687-694, 2008.", + "[31] Ya-Jun Xie and Chang-Feng Ma. The scaling conjugate gradient iterative method for two types of linear matrix equations. Computers & Mathematics with Applications, 70(5):1098-1113, 2015.", + "[32] Ulrich Langer and Marco Zank. Efficient direct space-time finite element solvers for parabolic initial-boundary value problems in anisotropic sobolev spaces. SIAM Journal on Scientific Computing, 43(4):A2714-A2736, 2021.", + "[33] A Sh Agazhanov, DA Samoshkin, and Yu M Kozlovskii. Thermophysical properties of inconel 718 alloy. In Journal of Physics: Conference Series, volume 1382, page 012175. IOP Publishing, 2019.", + "[34] Tianju Xue, Shuheng Liao, Zhengtao Gan, Chanwook Park, Xiaoyu Xie, Wing Kam Liu, and Jian Cao. Jax-fem: A differentiablegpu-accelerated 3d finite element solver for automatic inverse design and mechanistic data science. Computer Physics Communications, 291: 108802, 2023.", + "[35] Joseph P Leonor and Gregory J Wagner. Go-melt: GPU-optimized multilevel execution of lpbf thermal simulations. Computer Methods in Applied Mechanics and Engineering, 426:116977, 2024.", + "[36] Dana Jacobsen, Julien Thibault, and Inanc Senocak. An mpi-cuda implementation for massively parallel incompressible flow computations on multi-gpu clusters. In 48th AIAA Aerospace Sciences Meeting Including the New Horizons Forum and Aerospace Exposition, page 522, 2010.", + "[37] Thomas JR Hughes, Gonzalo R Feijóo, Luca Mazzei, and Jean-Baptiste Quincy. The variational multiscale method—a paradigm for computational mechanics. Computer methods in applied mechanics and engineering, 166(1-2):3-24, 1998.", + "[38] Xingshi Wang and Lucy T Zhang. Modified immersed finite element method for fully-coupled fluid-structure interactions. Computer methods in applied mechanics and engineering, 267:150–169, 2013.", + "[39] Wing Kam Liu, Yaling Liu, David Farrell, Lucy Zhang, X Sheldon Wang, Yoshio Fukui, Neelesh Patankar, Yongjie Zhang, Chandrajit Bajaj, Junghoon Lee, et al. Immersed finite element method and its applications to biological systems. Computer methods in applied mechanics and engineering, 195(13-16):1722-1749, 2006.", + "[40] Wing Kam Liu, Do Wan Kim, and Shaoqiang Tang. Mathematical foundations of the immersed finite element method. Computational Mechanics, 39:211-222, 2007.", + "[41] Adrian M Kopacz, Woon-Hong Yeo, Jae-Hyun Chung, and Wing Kam Liu. Nanoscale sensor analysis using the immersed molecular electrokinetic finite element method. Nanoscale, 4(16):5189-5194, 2012.", + "[42] Sai Karthikeya Vemuri, Tim Büchner, Julia Niebling, and Joachim Denzler. Functional tensor decompositions for physics-informed neural networks. In International Conference on Pattern Recognition, pages 32-46. Springer, 2025.", + "[43] Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljačić, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024.", + "[44] Jiachen Guo, Xiaoyu Xie, Chanwook Park, Hantao Zhang, Matthew Politis, Gino Domel, T.J.R Hughes, and Wing Kam Liu. Interpolation neural network-tensor decomposition (inn-td): a scalable and interpretable approach for large-scale physics-based problems. arXiv preprint arXiv:2503.02041, 2025.", + "[45] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in neural information processing systems, 33:7462-7473, 2020.", + "[46] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human" + ], + "bbox": [ + 105, + 133, + 892, + 865 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 489, + 879, + 507, + 889 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "language technologies, volume 1 (long and short papers), pages 4171-4186, 2019.", + "[47] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018.", + "[48] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25, 2012.", + "[49] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023.", + "[50] Yixin Liu, Kai Zhang, Yuan Li, Zhiling Yan, Chujie Gao, Ruoxi Chen, Zhengqing Yuan, Yue Huang, Hanchi Sun, Jianfeng Gao, et al. Sora: A review on background, technology, limitations, and opportunities of large vision models. arXiv preprint arXiv:2402.17177, 2024.", + "[51] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[52] Pierre Ladevèze. On reduced models in nonlinear solid mechanics. European Journal of Mechanics-A/Solids, 60:227-237, 2016.", + "[53] Richard J Williams, Catrin M Davies, and Paul A Hooper. A pragmatic part scale model for residual stress and distortion prediction in powder bed fusion. Additive Manufacturing, 22:416-425, 2018." + ], + "bbox": [ + 105, + 133, + 892, + 280 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 489, + 878, + 507, + 889 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_model.json b/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_model.json new file mode 100644 index 0000000000000000000000000000000000000000..af0288463ede2c60a034733a33a1f302e271c7a4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_model.json @@ -0,0 +1,4863 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.306, + 0.061, + 0.724 + ], + "angle": 270, + "content": "arXiv:2503.13933v1 [cs.CE] 18 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.135, + 0.127, + 0.868, + 0.171 + ], + "angle": 0, + "content": "Tensor-decomposition-based A Priori Surrogate (TAPS) modeling for ultra large-scale simulations" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.188, + 0.882, + 0.218 + ], + "angle": 0, + "content": "Jiachen Guo\\(^{a}\\), Gino Domel\\(^{b}\\), Chanwook Park\\(^{b}\\), Hantao Zhang\\(^{a}\\), Ozgur Can Gumus\\(^{b}\\), Ye Lu\\(^{c}\\), Gregory J. Wagner\\(^{b}\\), Dong Qian\\(^{d,e}\\), Jian Cao\\(^{b}\\), Thomas J.R. Hughes\\(^{f}\\), Wing Kam Liu\\(^{b,e}\\)" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.228, + 0.825, + 0.24 + ], + "angle": 0, + "content": "aTheoretical and Applied Mechanics Program, Northwestern University, 2145 Sheridan Road, Evanston, 60201, IL, USA" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.24, + 0.812, + 0.251 + ], + "angle": 0, + "content": "\\(^{b}\\)Department of Mechanical Engineering, Northwestern University, 2145 Sheridan Road, Evanston, IL, USA" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.251, + 0.862, + 0.262 + ], + "angle": 0, + "content": "\\(^{c}\\)Department of Mechanical Engineering, University of Maryland, Baltimore County, 1000 Hilltop Circle, Baltimore, 21250, MD, USA" + }, + { + "type": "text", + "bbox": [ + 0.166, + 0.262, + 0.832, + 0.274 + ], + "angle": 0, + "content": "\\(^{d}\\)Department of Mechanical Engineering, University of Texas, Dallas, 800 W. Campbell Road, Richardson, 75080, TX, USA" + }, + { + "type": "text", + "bbox": [ + 0.282, + 0.274, + 0.715, + 0.285 + ], + "angle": 0, + "content": "\\( {}^{e} \\) Co-Founders of HIDENN-AI,LLC,1801 Maple Ave,Evanston,60201,IL,USA" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.285, + 0.812, + 0.296 + ], + "angle": 0, + "content": "fInstitute for Computational Engineering and Sciences, The University of Texas at Austin, 201 East 24th Street, Stop" + }, + { + "type": "text", + "bbox": [ + 0.413, + 0.296, + 0.586, + 0.307 + ], + "angle": 0, + "content": "C0200, Austin, 78712, TX, USA" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.361, + 0.172, + 0.374 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.381, + 0.896, + 0.568 + ], + "angle": 0, + "content": "A data-free, predictive scientific AI model, Tensor-decomposition-based A Priori Surrogate (TAPS), is proposed for tackling ultra large-scale engineering simulations with significant speedup, memory savings, and storage gain. TAPS can effectively obtain surrogate models for high-dimensional parametric problems with equivalent zetta-scale \\((10^{21})\\) degrees of freedom (DoFs). TAPS achieves this by directly obtaining reduced-order models through solving governing equations with multiple independent variables such as spatial coordinates, parameters, and time. The paper first introduces an AI-enhanced finite element-type interpolation function called convolution hierarchical deep-learning neural network (C-HiDeNN) with tensor decomposition (TD). Subsequently, the generalized space-parameter-time Galerkin weak form and the corresponding matrix form are derived. Through the choice of TAPS hyperparameters, an arbitrary convergence rate can be achieved. To show the capabilities of this framework, TAPS is then used to simulate a large-scale additive manufacturing process as an example and achieves around 1,370x speedup, 14.8x memory savings, and 955x storage gain compared to the finite difference method with 3.46 billion spatial degrees of freedom (DoFs). As a result, the TAPS framework opens a new avenue for many challenging ultra large-scale engineering problems, such as additive manufacturing and integrated circuit design, among others." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.575, + 0.179, + 0.588 + ], + "angle": 0, + "content": "Keywords:" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.589, + 0.875, + 0.618 + ], + "angle": 0, + "content": "Predictive scientific AI, hierarchical neural network finite element interpolation, generalized Galerkin formulation for parametric PDEs, large-scale simulation, additive manufacturing" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.661, + 0.222, + 0.675 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.686, + 0.896, + 0.858 + ], + "angle": 0, + "content": "Precision is a fundamental aspect of scientific and engineering applications, especially in advanced industries such as semiconductor manufacturing. The capability to perform accurate computational simulations for these applications is essential for advancing these fields. Precise simulations enable the optimization of design and manufacturing processes by utilizing virtual prototypes and process simulations. This reduces the need for expensive physical prototypes and tests and provides virtual prototypes in circumstances where physical ones are impractical. Traditional computational methods for engineering simulations, however, suffer from prohibitive computational costs when attempting to accurately predict responses across multiple length and time scales (typically done by increasing mesh resolution), making achieving high precision for large-scale problems challenging. In fact, the random-access memory (RAM) requirement can be far beyond the capability of typical workstations and may require massive parallelization on supercomputers. In other industries, such as additive manufacturing (a term encompassing all forms of 3D printing), the vast design space further exacerbates these limitations, as numerous expensive simulations are required to thoroughly explore the effects of different design parameters." + }, + { + "type": "footer", + "bbox": [ + 0.104, + 0.88, + 0.272, + 0.892 + ], + "angle": 0, + "content": "Preprint submitted to Elsevier" + }, + { + "type": "footer", + "bbox": [ + 0.803, + 0.88, + 0.893, + 0.891 + ], + "angle": 0, + "content": "March 19, 2025" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.436, + 0.141, + 0.563, + 0.15 + ], + "angle": 0, + "content": "Table 1: Nomenclature" + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.15, + 0.894, + 0.624 + ], + "angle": 0, + "content": "
VariablesDescription
uh(x)Interpolated scalar field defined inside of an element
AeNodes within element e
AsNodes within patch domain of element e
WiS,a,p,j(x)Convolution patch function at node j for i-th nodal patch with hyperparameters s, a, and p
MTotal number of modes in tensor decomposition (TD)
mIndex for mode
DTotal number of dimensions
dIndex for dimension
xIndependent variable which includes spatial variable xs, parametric variable xp and temporal variable xt
Nd(xd;ad,sd,pd)Global C-HiDeNN shape function for dimension d with dilation parameter ad, patch size sd and reproducing polynomial order pd
bSource function in laser powder bed fusion process
uTDApproximation of the solution field expressed via TD
TTime slab index for space-parameter-time problem
kThermal conductivity
ρMaterial density
cpHeat capacity
ηMaterial absorptivity
PLaser power
rStandard deviation that characterizes the width of the heat source
qHeat flux
qconvHeat flux from convection
qradHeat flux from radiation
qevapHeat flux from evaporative cooling
hconvConvection coefficient
σSBStefan-Boltzman constant
mevapMass evaporation flux
LevapHeat of evaporation
BndShape function derivative
UdSolution matrix (Rnd×M) for dimension d that contains all the modes
" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.647, + 0.894, + 0.846 + ], + "angle": 0, + "content": "To fulfill the ever-growing challenges in predictive scientific models, data-driven surrogates, especially artificial intelligence (AI)-based models, present an alternative to conventional numerical models by significantly reducing the forward prediction time. These models can be treated as a reasonably accurate, reduced representation of real physics. Once trained properly, they can be used for fast prediction on unseen parameters [1, 2]. However, it is still uncertain whether a data-driven surrogate model can be trained to achieve the level of accuracy required in engineering design. Recently, it has been pointed out by Wolfram Research that standard AI models cannot easily fulfill the high accuracy requirement of predictive scientific tasks [3]. Furthermore, as suggested by Google Deepmind, the real potential of AI models lies in enhancing, rather than thoroughly replacing, well-established classical numerical algorithms [4]. In addition, the current standard data-driven approaches follow an offline-online scheme, where the offline stage involves a huge amount of training data, which can again be prohibitive. For problems with known physics, this data can be obtained by running multiple expensive simulations relying on standard numerical algorithms. In scenarios involving high-dimensional design spaces governed by parameterized partial differential equations (PDEs), such as in additive manufacturing (AM), conducting repetitive simulations with varying parameters in this offline stage becomes exceedingly expensive both in terms of computation time and data storage." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.847, + 0.893, + 0.861 + ], + "angle": 0, + "content": "To avoid the prohibitive offline stage, one can try to obtain a surrogate model directly from governing equations" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.88, + 0.505, + 0.89 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.189, + 0.131, + 0.818, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.261, + 0.894, + 0.319 + ], + "angle": 0, + "content": "Figure 1: The parameterized PDE is a PDE that includes parameters \\( \\mathbf{x}_p \\) that can vary and influence the solution \\( \\mathbf{u}(\\mathbf{x}_s, \\mathbf{x}_p, x_t) \\), where \\( \\mathbf{x}_s \\) and \\( x_t \\) are the spatial and time variables, respectively. The a priori approach directly finds a surrogate model from the governing parameterized PDE, whereas the data-driven approach has to solve the parameter-fixed PDE on sampled parameters to generate simulation data, followed by training tasks. FEM: Finite Element Method [5], C-HiDeNN: Convolution Hierarchical Deep-learning Neural Network [6], TAPS: Tensor-decomposition-based A Priori Surrogate, PINN: Physics Informed Neural Network [7]." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.342, + 0.893, + 0.429 + ], + "angle": 0, + "content": "without generating any data. As shown in Fig. 1 denoted by the words \"A Priori\", this approach aims to find the surrogate model before actually \"seeing\" any data. For example, multilayer perceptron (MLP) architectures have been vastly used in physics-informed neural networks (PINNs) and their variations to approximate solutions to PDEs without requiring data [7, 8]. However, the results of these efforts are underwhelming, as it has been shown that PINN results have often been compared to weak baselines [9], and it is unclear if they guarantee convergence. Moreover, this method is still susceptible to high computational costs for both large-scale and high-dimensional problems [10]." + }, + { + "type": "image", + "bbox": [ + 0.111, + 0.438, + 0.891, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.654, + 0.893, + 0.678 + ], + "angle": 0, + "content": "Figure 2: Development history of INN [11]. Figures are borrowed from references: HiDeNN [12], HiDeNN-TD [13], C-HiDeNN [14], C-HiDeNN-TD [6]." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.691, + 0.895, + 0.862 + ], + "angle": 0, + "content": "Instead of developing solvers solely based on machine learning concepts, a new class of Hierarchical Deep-learning Neural Networks (HiDeNN) has been developed recently. This network architecture incorporates principles from the finite element method (FEM) to construct their architecture [15, 12]. Originally designed to advance FEM as opposed to solve parameterized PDEs, this approach significantly enhances computational accuracy and efficiency for both linear and nonlinear problems compared to standard FEM [16]. HiDeNN was then enhanced by adding an additional hidden layer in the form of a nonlinear convolutional filter, formulating a new neural network architecture named Convolutional HiDeNN (C-HiDeNN) [6, 14]. C-HiDeNN mimics the structure of the generalized finite element method but leverages machine learning to optimize its hyperparameters to further improve accuracy and efficiency. Arbitrary orders of convergence have been observed for C-HiDeNN despite utilizing a linear finite element mesh [14]. Although these methods offers greater accuracy with fewer DoFs, like FEM, they still encounter computational challenges such as balancing memory usage against mesh resolution, which limits their efficiency in modeling ultra large-scale and high-dimensional problems. Therefore, it becomes necessary to employ model order reduction" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.88, + 0.505, + 0.89 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.103, + 0.133, + 0.373, + 0.146 + ], + "angle": 0, + "content": "techniques to address these limitations." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.147, + 0.916, + 0.315 + ], + "angle": 0, + "content": "Model order reduction techniques have been widely used to tackle the ever-growing challenges from high-dimensional and large-scale problems. For example, proper generalized decomposition (PGD) [17, 18, 19] has been proposed to efficiently solve high-dimensional PDEs. Recently, tensor decomposition (TD) has been successfully leveraged within the HiDeNN framework. For example, Zhang showed that HiDeNN combined with TD (HiDeNN-TD) significantly improved the speed of HiDeNN while maintaining higher accuracy [13]. Li proposed C-HiDeNN combined with TD (C-HiDeNN-TD) for extremely large-scale nested topology optimization problems [20]. Recently, Park generalized the HiDeNN-family networks under the umbrella of Interpolating Neural Networks (INNs) and demonstrated that the network can be used for both data-driven learning and data-free (i.e., a priori) solving [11]. The development history of HiDeNN family networks and INN is summarized in Fig. 2. While INN clearly explains how to construct the network architecture, an efficient optimization scheme for solving ultra large-scale and high-dimensional problems remains underdeveloped. In this paper, ultra large-scale problems refer to problems on the zetta-scale \\((10^{21})\\) in terms of DoFs." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.317, + 0.894, + 0.459 + ], + "angle": 0, + "content": "The demand for high-precision engineering simulations and efficient solution schemes highlights the need for innovative modeling approaches that swiftly solve large-scale problems while optimizing the design space. This research aims to fulfill this need by developing tensor-decomposition-based A Priori Surrogate (TAPS), a data-free predictive AI model, which aims to enhance high-resolution capabilities while simultaneously optimizing computational efficiency with a minimal memory footprint, low data storage needs, and fast prediction. The proposed comprehensive framework sets a foundation for scalable, adaptable, and future-proof solutions to counter the ever-growing complexity in simulation-driven advanced industries. TAPS is particularly well-suited for engineering challenges where: 1) the finite element method and other conventional methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.459, + 0.894, + 0.531 + ], + "angle": 0, + "content": "This paper is structured as follows. We first introduce the formulation of TAPS in section 2. In section 3, we examine the numerical convergence of TAPS for both space-time (S-T) and space-parameter-time (S-P-T) problems (i.e., problems that are dependent on spatial, parametric, and temporal inputs). In section 4, TAPS is applied to large-scale additive manufacturing problems that are considered intractable with standard numerical algorithms. This application effectively demonstrates TAPS's capability to address all of the three identified challenges." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.55, + 0.185, + 0.565 + ], + "angle": 0, + "content": "2. Theory" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.574, + 0.427, + 0.589 + ], + "angle": 0, + "content": "2.1. Review of C-HiDeNN interpolation theory" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.591, + 0.894, + 0.69 + ], + "angle": 0, + "content": "Leveraging the universal approximation theorem, multilayer perceptrons (MLPs) have been successfully applied as global basis functions in deep learning-based solvers [7]. However, as shown in Table 2, MLPs have a few potential caveats when approximating PDE solutions. To overcome these limitations, we leverage the Convolutional HiDeNN (C-HiDeNN) interpolation function, which leverages the merits of both locally supported finite element shape functions and the flexibility of machine learning. Note that C-HiDeNN also belongs to the INN category as shown in Fig. 2. C-HiDeNN maintains all the essential finite element approximation properties such as Kronecker delta and partition of unity [14]." + }, + { + "type": "table_caption", + "bbox": [ + 0.249, + 0.712, + 0.747, + 0.723 + ], + "angle": 0, + "content": "Table 2: Comparison of MLP and C-HiDeNN as approximation functions of PDE solutions." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.723, + 0.825, + 0.792 + ], + "angle": 0, + "content": "
MLPC-HiDeNN
Boundary/initial conditionPenalty term in the loss function [7]Automatic satisfaction [6]
Convergence and stabilityStochastic in nature and not guaranteedShown for different PDEs [6]
Numerical integrationQuasi-Monte Carlo integration [21]Gaussian integration [22]
InterpretabilityBlack-box modelInterpretable [11]
" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.805, + 0.893, + 0.834 + ], + "angle": 0, + "content": "We first review the C-HiDeNN formulation as illustrated in Fig. 3 (a) [14]. A scalar field \\( u(\\pmb{x}) \\) defined in each element within a domain \\( \\Omega_{\\pmb{x}} \\) can be approximated using C-HiDeNN interpolation as:" + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.841, + 0.894, + 0.873 + ], + "angle": 0, + "content": "\\[\nu _ {e} ^ {h} (\\boldsymbol {x}) = \\sum_ {i \\in A ^ {e}} N _ {i} (\\boldsymbol {x}) \\sum_ {j \\in A _ {s} ^ {i}} \\mathcal {W} _ {s, a, p, j} ^ {i} (\\boldsymbol {x}) u _ {j} = \\sum_ {k \\in A _ {s} ^ {e}} \\widetilde {N} _ {k} (\\boldsymbol {x}; s, a, p) u _ {k} \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.88, + 0.505, + 0.89 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.191, + 0.135, + 0.764, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.199, + 0.191, + 0.315, + 0.199 + ], + "angle": 0, + "content": "- \\(A^e\\): nodes at element \\(e\\)" + }, + { + "type": "image_footnote", + "bbox": [ + 0.2, + 0.2, + 0.406, + 0.208 + ], + "angle": 0, + "content": "- \\(A_{\\mathrm{s}}^{i}\\): patch domain at node \\(i\\) with patch size \\(s\\)" + }, + { + "type": "image_footnote", + "bbox": [ + 0.2, + 0.209, + 0.468, + 0.219 + ], + "angle": 0, + "content": "- \\( A_{s}^{e} = \\bigcup_{i\\in A^{e}}A_{s}^{i} \\): patch nodes of element \\( e \\) with patch size \\( s \\)" + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.191, + 0.468, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.521, + 0.195, + 0.626, + 0.202 + ], + "angle": 0, + "content": "- s: patch size, integer" + }, + { + "type": "image_footnote", + "bbox": [ + 0.521, + 0.204, + 0.628, + 0.211 + ], + "angle": 0, + "content": "a: dilation parameter" + }, + { + "type": "image_footnote", + "bbox": [ + 0.521, + 0.213, + 0.712, + 0.221 + ], + "angle": 0, + "content": "- \\( p \\) : reproducing polynomial order,integer" + }, + { + "type": "list", + "bbox": [ + 0.521, + 0.195, + 0.712, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.228, + 0.482, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.19, + 0.375, + 0.389, + 0.386 + ], + "angle": 0, + "content": "- \\( W_{a,p,j}^{i} \\): convolution interpolant for node \\( j \\)" + }, + { + "type": "image_footnote", + "bbox": [ + 0.19, + 0.386, + 0.412, + 0.394 + ], + "angle": 0, + "content": "- \\(R_{i}(x)\\): radial basis function centered at node \\(i\\)" + }, + { + "type": "image_footnote", + "bbox": [ + 0.19, + 0.395, + 0.29, + 0.403 + ], + "angle": 0, + "content": "1 \\(G\\) : moment matrix" + }, + { + "type": "image_footnote", + "bbox": [ + 0.19, + 0.404, + 0.375, + 0.413 + ], + "angle": 0, + "content": "1 \\(\\cdot x^{A_s^i}\\) : nodal coordinates of nodes in \\(A_{s}^{i}\\)" + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.375, + 0.412, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.226, + 0.808, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.435, + 0.894, + 0.46 + ], + "angle": 0, + "content": "Figure 3: (a) Covolution patch in 1D C-HiDeNN shape function (b) Construction of convolution patch function (c) C-HiDeNN shape function as MLP with 3 hidden layers" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.479, + 0.894, + 0.566 + ], + "angle": 0, + "content": "where \\( u_{j} \\) is the nodal value and \\( u_{j} = u(\\pmb{x}_{j}) \\); \\( N_{i} \\) is the linear finite element shape function at node \\( j \\) centered in \\( i \\)-th nodal patch; \\( \\mathcal{W}_{s,a,p,j}^{i} \\) is the convolution patch function at node \\( i \\) that can be represented with a partially connected MLP as illustrated in Fig. 3 (b). The convolution patch functions are controlled by three hyperparameters: patch size \\( s \\) that controls nodal connectivity, dilation parameter \\( a \\) that normalizes distances between patch nodes, and reproducing order \\( p \\) that defines types/orders of activation functions to be reproduced by the patch functions. Due to the inherent local support nature of both \\( N_{i} \\) and \\( \\mathcal{W}_{s,a,p,j}^{i} \\), the C-HiDeNN shape function \\( \\widetilde{N}_k(\\pmb{x};s,a,p) \\) is also locally supported." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.565, + 0.751, + 0.579 + ], + "angle": 0, + "content": "Similar to standard finite element, the approximation for the solution field can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.585, + 0.893, + 0.622 + ], + "angle": 0, + "content": "\\[\nu ^ {h} (\\boldsymbol {x}) = \\sum_ {k} ^ {n n o d e} \\widetilde {N} _ {k} (\\boldsymbol {x}; s _ {k}, a _ {k}, p _ {k}) u _ {k} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.628, + 0.894, + 0.684 + ], + "angle": 0, + "content": "where \\( nnode \\) is the total number of nodes and \\( k \\) is the nodal index. It should be noted that the hyperparameters \\( s, a, p \\) can vary across nodes since C-HiDeNN can optimize these hyperparameters like machine learning parameters, rendering an adaptable functional space without altering the number of global nodes or hidden layers. This clearly distinguishes C-HiDeNN from MLP, where the activation functions and network architectures are mostly fixed." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.684, + 0.892, + 0.712 + ], + "angle": 0, + "content": "The C-HiDeNN shape function \\(\\widetilde{N}_k(\\pmb{x})\\) satisfies Kronecker-delta property at nodal positions [6] (hyperparameters \\(s, a, p\\) are dropped for brevity):" + }, + { + "type": "equation", + "bbox": [ + 0.456, + 0.711, + 0.892, + 0.728 + ], + "angle": 0, + "content": "\\[\n\\widetilde {N} _ {k} \\left(\\boldsymbol {x} _ {l}\\right) = \\delta_ {k l} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.731, + 0.378, + 0.744 + ], + "angle": 0, + "content": "where the Kronecker delta is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.741, + 0.893, + 0.773 + ], + "angle": 0, + "content": "\\[\n\\delta_ {k l} = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} k \\neq l, \\\\ 1 & \\text {i f} k = l. \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.775, + 0.891, + 0.801 + ], + "angle": 0, + "content": "Thus, at the Dirichlet boundary node \\( \\mathbf{x}_b \\) where \\( u(\\mathbf{x}_b) = u_b \\), C-HiDeNN automatically satisfies the Dirichlet boundary condition:" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.801, + 0.893, + 0.838 + ], + "angle": 0, + "content": "\\[\nu ^ {h} \\left(\\boldsymbol {x} _ {b}\\right) = \\sum_ {k} ^ {n n o d e} \\widetilde {N} _ {k} \\left(\\boldsymbol {x} _ {b}\\right) u _ {k} = u _ {b} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.842, + 0.894, + 0.872 + ], + "angle": 0, + "content": "Going forward, we will employ the C-HiDeNN shape function \\(\\widetilde{N}_k(\\pmb{x})\\) as the locally supported basis function for the interpolation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.879, + 0.505, + 0.89 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.104, + 0.133, + 0.351, + 0.147 + ], + "angle": 0, + "content": "2.2. Discrete Tensor decomposition" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.15, + 0.894, + 0.193 + ], + "angle": 0, + "content": "Tensor decomposition is a mathematical technique used to break down a high-dimensional tensor, such as a 3D finite element solution, into a set of simpler components, making it easier to analyze, store, and process [23]. It generalizes matrix decomposition methods like singular value decomposition (SVD) to higher-order tensors." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.194, + 0.894, + 0.294 + ], + "angle": 0, + "content": "Consider a cubic spatial domain \\(\\Omega_{x}\\) discretized with a regular Cartesian grid where each grid point (or node) stores a scalar value (see Fig. 4). The discrete nodal values can be represented as a 3rd order tensor \\(u_{JK}\\) where \\(I = 1,..,n_1;J = 1,\\dots,n_2;K = 1,\\dots,n_3\\). The number of DoFs for this structured mesh is \\(n_1\\times n_2\\times n_3\\). When high resolution is required for the analysis, as the case in AM simulations, the number of DoFs can be extremely large. To effectively reduce the DoFs, different discrete tensor decomposition methods can be used to project the original 3rd order tensor into lower order tensors. In this paper, we focus on CANDECOMP/PARAFAC (CP) decomposition, where the higher-order tensors are approximated using a finite sum of products of 1D vectors [23]:" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.303, + 0.893, + 0.341 + ], + "angle": 0, + "content": "\\[\nu _ {I J K} \\approx u _ {I J K} ^ {T D} = \\sum_ {m = 1} ^ {M} u _ {I m} ^ {[ 1 ]} u _ {J m} ^ {[ 2 ]} u _ {K m} ^ {[ 3 ]} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.353, + 0.894, + 0.397 + ], + "angle": 0, + "content": "where \\( M \\) is defined as the total number of modes in CP decomposition; \\( u_{lm}^{[1]} \\) refers to the projected 1D vector in the first dimension and \\( m \\)-th mode; the superscript \\( [d] \\) represents the dimension index and \\( d = 1,2,3 \\); the 1st subscript \\( I \\) is the nodal index, and the 2nd subscript \\( m \\) refers to the modal index." + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.431, + 0.436, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.299, + 0.592, + 0.321, + 0.606 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.412, + 0.811, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.577, + 0.595, + 0.6, + 0.609 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.263, + 0.623, + 0.733, + 0.635 + ], + "angle": 0, + "content": "Figure 4: (a) 3D Cartesian mesh. (b) Nodal values can be treated as a 3rd order tensor." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.65, + 0.894, + 0.707 + ], + "angle": 0, + "content": "As can be seen from Eq. 6, with CP decomposition, the total number of DoFs can be reduced from \\( n_1 \\times n_2 \\times n_3 \\) to \\( M \\times (n_1 + n_2 + n_3) \\). Assuming \\( M \\) does not increase when the mesh is refined along each dimension, then the solution matrix \\( u_{IJK} \\) will have cubic growth, whereas CP decomposition \\( \\sum_{m=1}^{M} u_{Im}^{[1]} u_{Jm}^{[2]} u_{Km}^{[3]} \\) only exhibits linear growth, as shown in Fig. 5 (a). This reduction is paramount to making large-scale simulation achievable." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.707, + 0.894, + 0.821 + ], + "angle": 0, + "content": "As an extension of the previous case, we consider \\(D\\) dimensional general time-dependent parametric problems where the independent variables \\((x_{1},x_{2},\\ldots ,x_{D})\\) can be classified into 3 different categories, namely, spatial variables \\(\\pmb{x}_s\\), parametric variables \\(\\pmb{x}_p\\), and temporal variable \\(x_{t}\\). Spatial variables \\(\\pmb{x}_s\\) describe the spatial coordinates of the problem. Parametric variables \\(\\pmb{x}_p\\) can represent any PDE coefficients, initial/boundary conditions, or geometry descriptors as extra-coordinates. The temporal variable \\(x_{t}\\) represents time. Assuming the spatial domain \\(\\Omega_{\\pmb{x}_s}\\) is cubic, the parametric domain \\(\\Omega_{\\pmb{x}_p}\\) is hypercubic and Cartesian grids are used for discretization, then the nodal solution to these problems can be written as a discrete \\(D\\)-th order tensor \\(u_{I_1I_2,\\dots,I_D}\\). Similarly, CP decomposition can be used to effectively decompose higher-order tensors into a finite sum of tensor products of 1D vectors." + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.831, + 0.893, + 0.868 + ], + "angle": 0, + "content": "\\[\nu _ {I _ {1} I _ {2}, \\dots , I _ {D}} \\approx u _ {I _ {1} I _ {2}, \\dots , I _ {D}} ^ {T D} = \\sum_ {m = 1} ^ {M} u _ {I _ {1} m} ^ {[ 1 ]} u _ {I _ {2} m} ^ {[ 2 ]} \\dots u _ {I _ {D} m} ^ {[ D ]} \\tag {7}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.88, + 0.505, + 0.89 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.189, + 0.131, + 0.49, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.355, + 0.32, + 0.378, + 0.331 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.494, + 0.131, + 0.811, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.673, + 0.32, + 0.696, + 0.334 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.206, + 0.349, + 0.792, + 0.362 + ], + "angle": 0, + "content": "Figure 5: Comparison of number of DoFs, (a) in terms of mesh size \\( n \\), (b) in terms of problem dimension \\( D \\)" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.385, + 0.894, + 0.429 + ], + "angle": 0, + "content": "If every dimension is discretized into \\( n \\) grid points, then a \\( D \\)-th order tensor will have DoFs of \\( n^D \\), whereas CP decomposition only requires \\( M \\times D \\times n \\) DoFs. Consequently, CP decomposition can dramatically reduce the total DoFs of general high-dimensional parametric problems, as shown in Fig. 5 (b)." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.442, + 0.317, + 0.456 + ], + "angle": 0, + "content": "2.3. TD interpolation in TAPS" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.46, + 0.895, + 0.529 + ], + "angle": 0, + "content": "Assume that the \\(D\\)-th order tensor \\(u_{I_1I_2,\\dots,I_D}\\) represents a \\(D\\)-input one-output continuous function \\(u(\\pmb{x})\\) measured at a Cartesian grid discretized with \\(I_1, I_2, \\dots, I_D\\) grid points in each input dimension. The discrete tensor decomposition \\(u_{I_1I_2,\\dots,I_D}^{TD}\\) can only approximate the function \\(u(\\pmb{x})\\) at these grid points. In this case, how can we measure the value of the function on an arbitrary input \\(\\pmb{x}\\) with tensor decomposition? A natural answer is using C-HiDeNN interpolation functions." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.53, + 0.894, + 0.575 + ], + "angle": 0, + "content": "Similar to standard finite element shape functions, for a 3D spatial problem discretized with a Cartesian grid, a 3D C-HiDeNN interpolation function can be rewritten as a tensor product of one-dimensional C-HiDeNN interpolation functions (hyperparameters \\(s\\), \\(a\\) and \\(p\\) will be dropped from now on for brevity):" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.582, + 0.893, + 0.602 + ], + "angle": 0, + "content": "\\[\n\\widetilde {N} _ {k} \\left(x _ {1}, x _ {2}, x _ {3}\\right) = \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.611, + 0.893, + 0.638 + ], + "angle": 0, + "content": "where the superscript refers to the dimension of the 1D C-HiDeNN shape function. Therefore, we can rewrite Eq. 2 as:" + }, + { + "type": "equation", + "bbox": [ + 0.332, + 0.65, + 0.893, + 0.68 + ], + "angle": 0, + "content": "\\[\nu ^ {h} \\left(\\boldsymbol {x} _ {s}\\right) = \\sum_ {I} \\sum_ {J} \\sum_ {K} \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) u _ {I J K} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.686, + 0.893, + 0.716 + ], + "angle": 0, + "content": "where \\( \\boldsymbol{x}_s = [x_1, x_2, x_3] \\) is the spatial variable. Plugging the CP decomposition form of the tensor \\( u_{IJK}^{TD} \\) into Eq. 6 into Eq. 9 and rearranging the terms, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.725, + 0.893, + 0.763 + ], + "angle": 0, + "content": "\\[\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}\\right) = \\sum_ {m = 1} ^ {M} \\left[ \\sum_ {I} \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) u _ {I m} ^ {[ 1 ]} \\right] \\left[ \\sum_ {J} \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) u _ {J m} ^ {[ 2 ]} \\right] \\left[ \\sum_ {K} \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) u _ {K m} ^ {[ 3 ]} \\right] \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.768, + 0.893, + 0.797 + ], + "angle": 0, + "content": "Eq. 10 represents the TD interpolation (with C-HiDeNN) for a 3D spatial problem. Extending this framework to a general \\( D \\)-dimensional space-parameter-time (S-P-T) problem with independent variables defined in Eq. 11:" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.812, + 0.893, + 0.829 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} = \\left( \\begin{array}{l l l l} x _ {1}, \\dots , x _ {S} & \\underbrace {x _ {S + 1}} _ {\\text {上}} & \\dots & x _ {P} \\end{array} , x _ {t}\\right) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.414, + 0.831, + 0.595, + 0.841 + ], + "angle": 0, + "content": "spatial variables parametric variables" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.88, + 0.504, + 0.89 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.104, + 0.132, + 0.64, + 0.147 + ], + "angle": 0, + "content": "Then the TD interpolation to the S-P-T solution field can be written as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.113, + 0.155, + 0.894, + 0.207 + ], + "angle": 0, + "content": "\\[\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\underbrace {\\left[ \\sum_ {I _ {1}} \\widetilde {N} _ {I _ {1}} ^ {[ 1 ]} \\left(x _ {1}\\right) u _ {I _ {1} m} ^ {[ 1 ]} \\right] \\cdots \\left[ \\sum_ {I _ {S}} \\widetilde {N} _ {I _ {S}} ^ {[ S ]} \\left(x _ {I _ {S}}\\right) u _ {I _ {S} m} ^ {[ S ]} \\right]} _ {\\text {s p a t i a l}} \\underbrace {\\left[ \\sum_ {I _ {S + 1}} \\widetilde {N} _ {I _ {S + 1}} ^ {[ S + 1 ]} \\left(x _ {S + 1}\\right) u _ {I _ {S + 1} m} ^ {[ S + 1 ]} \\right] \\cdots \\left[ \\sum_ {P} \\widetilde {N} _ {I _ {P}} ^ {[ P ]} \\left(x _ {P}\\right) u _ {I _ {P} m} ^ {[ P ]} \\right]} _ {\\text {p a r a m e t r i c}} \\underbrace {\\left[ \\sum_ {I _ {D}} \\widetilde {N} _ {I _ {D}} ^ {[ D ]} (t) u _ {I _ {D}} ^ {[ D ]} \\right]} _ {\\text {t e m p o r a l}} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.215, + 0.492, + 0.23 + ], + "angle": 0, + "content": "This can be further simplified using the product notation:" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.239, + 0.893, + 0.279 + ], + "angle": 0, + "content": "\\[\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\prod_ {d = 1} ^ {D} \\sum_ {I _ {d}} \\widetilde {N} _ {I _ {d}} ^ {[ d ]} \\left(x _ {d}\\right) u _ {I _ {d} m} ^ {[ d ]} \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.29, + 0.893, + 0.32 + ], + "angle": 0, + "content": "where \\(\\widetilde{N}_{I_d}^{[d]}(x_d)\\) refers to the 1D C-HiDeNN shape function in the \\(d\\)-th dimension; \\(u_{I_d m}^{[d]}\\) is the nodal solution for dimension \\(d\\) and mode \\(m\\)." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.334, + 0.43, + 0.349 + ], + "angle": 0, + "content": "2.4. The General S-P-T Galerkin form of TAPS" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.352, + 0.86, + 0.366 + ], + "angle": 0, + "content": "Similar to FEM, TAPS adopts the weighted-sum formulation to solve PDEs. Consider a general S-P-T PDE:" + }, + { + "type": "equation", + "bbox": [ + 0.439, + 0.38, + 0.893, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (u (\\boldsymbol {x})) = f (\\boldsymbol {x}), \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.402, + 0.892, + 0.431 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}\\) is the differential operator; the independent variable vector \\(\\pmb{x} = (x_{s}, x_{p}, x_{t})\\); \\(f(\\pmb{x})\\) is the forcing function. Table 3 lists different examples of operator \\(\\mathcal{L}\\) and corresponding dependent and independent variables." + }, + { + "type": "table_caption", + "bbox": [ + 0.277, + 0.453, + 0.72, + 0.464 + ], + "angle": 0, + "content": "Table 3: Examples for differential operators, dependent and independent variables" + }, + { + "type": "table", + "bbox": [ + 0.105, + 0.464, + 0.907, + 0.535 + ], + "angle": 0, + "content": "
PDEDifferential operator LDependent variablexsxpxt
∂2u/∂x12 + ∂2u/∂x22 + ... + ∂2u/∂x2D = f(x)∂2/∂x12 + ∂2/∂x22 + ... + ∂2/∂x2Du(x1, x2, ...,xD)--
μui, jj + (μ + λ)uj,ij + Fi = e(x12+x22+x32)μ(·)i,jj + (μ + λ)(·)j,ijui, i = 1, 2, 3(x1, x2, x3)(λ,μ)-
ρcp,du/dt + k(∂2u/∂x12 + ∂2u/∂x22 + ∂2u/∂x32) = Pe(x12+x22+x32)ρcp,du/dt + k(∂2/∂x12 + ∂2/∂x22 + ∂2/∂x32)u(x1, x2, x3)(ρ, cp, k, P)t
" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.547, + 0.704, + 0.561 + ], + "angle": 0, + "content": "The weighted-sum residual form of the PDE with TD interpolation can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.571, + 0.893, + 0.602 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} \\delta u ^ {T D} (\\boldsymbol {x}) \\left[ \\mathcal {L} \\left(u ^ {T D} (\\boldsymbol {x})\\right) - f (\\boldsymbol {x}) \\right] d \\Omega = 0 \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.606, + 0.891, + 0.621 + ], + "angle": 0, + "content": "where \\( u^{TD} \\) is the approximation of the solution (i.e., trial function), \\( \\delta u^{TD} \\) is the test function, and \\( d\\Omega = d\\Omega_{x_s}d\\Omega_{x_p}d\\Omega_{x_t} \\)." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.621, + 0.894, + 0.705 + ], + "angle": 0, + "content": "Depending on how \\(\\delta u^{TD}\\) is adopted, different mathematical formulations can be obtained. If the test function resides in the same function space as the trial function, it becomes the Galerkin formulation. When the test function space differs from the trial function space, it becomes the Petrov-Galerkin formulation [22]. If the Dirac delta function is used for the test function, then Eq. 15 corresponds to the collocation method [24]. In this paper, we employ the Galerkin formulation. However, the proposed framework is versatile and can be extended to accommodate other formulations as well." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.707, + 0.894, + 0.791 + ], + "angle": 0, + "content": "In Eq. 12, the entire S-P-T domain is approximated using TD interpolation. However, this approach may result in a large system of equations due to the rapid increase in the number of TD modes for certain cases. For example, if the forcing function represents a moving source function in Eq. 14), this complexity may arise. To maintain computational efficiency, we can partition the temporal domain into a series of time slabs. As illustrated in Fig. 6(a), the S-P-T continuum is divided into S-P-T slabs \\(\\mathcal{T}_1,\\mathcal{T}_2,\\dots ,\\mathcal{T}_T\\) . The solution within each time slab is then approximated individually using the TD interpolation." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.792, + 0.894, + 0.835 + ], + "angle": 0, + "content": "Between consecutive S-P-T slabs, either a continuous or discontinuous formulation can be employed. As shown in Fig. 6(b) for the continuous Galerkin scheme, the continuity of the solution in time is enforced by imposing the solution at the end of slab \\(\\mathcal{T}_{i-1}\\) as the initial condition of \\(\\mathcal{T}_i\\):" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.846, + 0.893, + 0.865 + ], + "angle": 0, + "content": "\\[\n{ } ^ { [ \\mathcal { T } + 1 ] } u ( \\boldsymbol { x } _ { s } , \\boldsymbol { x } _ { p } , 0 ) = { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } , \\boldsymbol { x } _ { p } , x _ { t } ^ { m a x } ) \\tag {16}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.88, + 0.504, + 0.89 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.103, + 0.132, + 0.894, + 0.162 + ], + "angle": 0, + "content": "Discontinuous Galerkin method can be used when a discontinuity is allowed between S-P-T slabs, as illustrated in Fig. 6(c). Discontinuity in time can be modeled using the jump operator \\(\\llbracket \\dots \\rrbracket\\) [25]." + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.172, + 0.894, + 0.196 + ], + "angle": 0, + "content": "\\[\n\\llbracket u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, t\\right) \\rrbracket = \\lim _ {\\epsilon \\rightarrow 0 ^ {+}} \\left(^ {\\mathcal {T} + 1} u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, \\epsilon\\right) - ^ {\\mathcal {T}} u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t} ^ {\\max } - \\epsilon\\right)\\right) \\tag {17}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.215, + 0.409, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.253, + 0.341, + 0.272, + 0.353 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.213, + 0.617, + 0.323 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.34, + 0.522, + 0.352 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.641, + 0.214, + 0.85, + 0.323 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.737, + 0.341, + 0.755, + 0.352 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.367, + 0.893, + 0.39 + ], + "angle": 0, + "content": "Figure 6: (a) Multiple S-P-T slabs along the temporal dimension. (b) Continuous Galerkin: the solution is continuous across different S-P-T slabs. (c) Discontinuous Galerkin: jumps are allowed across the slab boundaries" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.401, + 0.893, + 0.444 + ], + "angle": 0, + "content": "Keeping in mind that this approach can be applied generally to a range of engineering problems, we will demonstrate an example of the Galerkin formulation using a single space-parameter-time partition (S-P-T) slab in the remainder of this section. For illustrative purposes, the transient heat transfer equation will be utilized:" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.452, + 0.893, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\rho c _ {p} \\nabla_ {x _ {t}} u - \\nabla_ {\\boldsymbol {x} _ {s}} \\cdot k \\nabla_ {\\boldsymbol {x} _ {s}} u = f (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}) \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.476, + 0.892, + 0.504 + ], + "angle": 0, + "content": "as we focus on the example of modeling the laser powder bed fusion (LPBF) process in additive manufacturing (AM). In an LPBF simulation, we adopt the following time-dependent moving heat source function:" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.511, + 0.893, + 0.551 + ], + "angle": 0, + "content": "\\[\nf \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\frac {2 \\eta P}{\\pi r ^ {2} d _ {\\nu}} \\exp \\left(- \\frac {2 \\left((x - x _ {0} (t)) ^ {2} + (y - y _ {0} (t)) ^ {2}\\right)}{r ^ {2}}\\right) \\cdot \\mathbf {1} _ {\\left(x _ {3} \\geq d _ {\\nu}\\right)} \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.558, + 0.894, + 0.656 + ], + "angle": 0, + "content": "Summarizing the independent variables in Eq. 18, there are spatial variables \\( \\mathbf{x}_s = (x_1, x_2, x_3) \\); parametric variables \\( \\mathbf{x}_p = (k, \\rho, c_p, \\eta, P, r, d_v) \\); and a temporal variable \\( x_t = t \\). Among the parametric variables, \\( k \\) is conductivity; \\( \\rho \\) is the material density; \\( c_p \\) is heat capacity; \\( \\eta \\) is the material absorptivity; \\( P \\) represents laser power; \\( r \\) is the standard deviation that characterizes the width of the heat source; \\( d_v \\) is the penetration depth of the heat source. In Eq. 19, \\( [x_0(t), y_0(t)] \\) represents the center of the moving heat source; \\( \\mathbf{1}_{(x_3 \\geq d_v)} \\) is the indicator function where \\( \\mathbf{1}_{(x_3 \\geq d_v)} = 1 \\) if \\( x_3 \\geq d_v \\) or \\( \\mathbf{1}_{(x_3 \\geq d_v)} = 0 \\) if \\( x_3 < d_v \\). Note that the discretization of the material parameters, in particular, in a random field setting, has been previously proposed by Liu et al. [26, 27]." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.658, + 0.892, + 0.685 + ], + "angle": 0, + "content": "As shown in the schematic below, we classify the boundary surfaces into 2 categories: the Dirichlet boundary surface \\(\\Gamma_{D}\\) and the Neumann boundary surface \\(\\Gamma_{N}\\)." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.686, + 0.894, + 0.727 + ], + "angle": 0, + "content": "A uniform ambient temperature is used as the initial condition. The bottom of the powder bed is subject to the Dirichlet boundary condition and the Neumann boundary conditions are prescribed on the other surfaces. The initial and boundary conditions are:" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.729, + 0.531, + 0.743 + ], + "angle": 0, + "content": "\\[\nu (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, 0) | _ {\\Omega} = u _ {0},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.745, + 0.892, + 0.761 + ], + "angle": 0, + "content": "\\[\n\\left. u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) \\right| _ {\\Gamma_ {D}} = u _ {0}, \\tag {20}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.764, + 0.597, + 0.779 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {n} \\cdot \\boldsymbol {q} | _ {\\Gamma_ {N}} = q _ {\\text {c o n v}} + q _ {\\text {r a d}} + q _ {\\text {e v a p}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.783, + 0.893, + 0.811 + ], + "angle": 0, + "content": "where \\( u_{0} \\) is the ambient temperature, \\( q_{conv} \\) accounts for free convection, \\( q_{rad} \\) accounts for radiation, and \\( q_{evap} \\) imposes evaporative cooling when any material surface reaches the evaporation temperature [28]. Each flux is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.822, + 0.573, + 0.836 + ], + "angle": 0, + "content": "\\[\nq _ {c o n v} = h _ {c o n v} [ u (x, t) - u _ {0} ],\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.839, + 0.892, + 0.856 + ], + "angle": 0, + "content": "\\[\nq _ {r a d} = - \\sigma_ {S B} \\epsilon \\left(u ^ {4} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) - u _ {0} ^ {4}\\right), \\tag {21}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.859, + 0.536, + 0.874 + ], + "angle": 0, + "content": "\\[\nq _ {e v a p} = - m _ {e v a p} L _ {e v a p}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.88, + 0.505, + 0.89 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.349, + 0.136, + 0.648, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.281, + 0.275, + 0.715, + 0.287 + ], + "angle": 0, + "content": "Figure 7: Transient heat transfer with initial condition and boundary conditions." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.311, + 0.894, + 0.368 + ], + "angle": 0, + "content": "where \\(\\sigma_{SB}\\) is the Stefan-Boltzmann constant; \\(\\epsilon\\) is the material emissivity; \\(u_0\\) is the ambient temperature; \\(h_{conv}\\) is the convection coefficient of the surrounding gas, \\(m_{evap}\\) is the mass evaporation flux and \\(L_{evap}\\) is the heat of evaporation. In the following numerical examples, we only consider the free convection term in the Neumann boundary condition. The solution to Eq. 18 is approximated using TD interpolation function:" + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.378, + 0.893, + 0.415 + ], + "angle": 0, + "content": "\\[\nu ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.425, + 0.893, + 0.468 + ], + "angle": 0, + "content": "Here a general notation is employed to represent different types of components in Eq. 22. For example, the spatial component \\( u_{\\boldsymbol{x}_s}^{(m)}(\\boldsymbol{x}_s) \\) is equivalent to \\( u_{x_1}^{(m)}(x_1)u_{x_2}^{(m)}(x_2)u_{x_3}^{(m)}(x_3) \\). The corresponding test function can be obtained using the variational principle:" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.488, + 0.893, + 0.527 + ], + "angle": 0, + "content": "\\[\n\\delta u ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\left[ \\delta u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) \\delta u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) \\delta u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) \\right] \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.536, + 0.84, + 0.551 + ], + "angle": 0, + "content": "Plugging the trial and test functions, the S-P-T Galerkin form of Eq. 18 can be obtained by following Eq. 15:" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.56, + 0.893, + 0.59 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} \\delta u ^ {T D} \\left[ \\rho c _ {p} \\nabla_ {x _ {t}} u ^ {T D} - \\nabla_ {x _ {s}} \\cdot k \\nabla_ {x _ {s}} u ^ {T D} - f \\right] d \\Omega = 0 \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.594, + 0.892, + 0.621 + ], + "angle": 0, + "content": "Using integration by parts on the diffusion term, we get the corresponding general S-P-T Galerkin weak form in TAPS formulation:" + }, + { + "type": "equation", + "bbox": [ + 0.112, + 0.643, + 0.893, + 0.677 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} \\delta u ^ {T D} \\rho c _ {p} \\nabla_ {x _ {t}} u ^ {T D} d \\Omega + \\int_ {\\Omega} \\nabla_ {\\boldsymbol {x} _ {s}} \\delta u ^ {T D} \\cdot k \\nabla_ {\\boldsymbol {x} _ {s}} u ^ {T D} d \\Omega - \\int_ {\\partial \\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, t} \\delta u ^ {T D} \\boldsymbol {n} \\cdot \\boldsymbol {q} | _ {\\Gamma_ {N}} d s d \\Omega_ {\\boldsymbol {x} _ {p}} d \\Omega_ {t} - \\int_ {\\Omega} \\delta u ^ {T D} f (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}) d \\Omega = 0 \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.685, + 0.452, + 0.699 + ], + "angle": 0, + "content": "where \\(q\\) is the heat flux on the Neumann boundary." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.713, + 0.304, + 0.727 + ], + "angle": 0, + "content": "2.5. Discretized matrix form" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.73, + 0.894, + 0.801 + ], + "angle": 0, + "content": "The S-P-T Galerkin weak form shown in Eq. 25 is nonlinear in nature due to the tensor product structure in TD interpolation, necessitating efficient solution schemes. To illustrate the detailed solution approach for the general S-P-T weak form, we simplify the governing equation Eq. 18 by considering a one-dimensional spatial problem where \\( x_{s} = x \\). We assume that the product of density and specific heat capacity \\( \\rho c_{p} \\) is equal to 1. Additionally, the forcing term is solely dependent on \\( x \\). Therefore, the simplified governing equation for this example is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.421, + 0.81, + 0.893, + 0.839 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial u}{\\partial t} - \\frac {\\partial u}{\\partial x} \\cdot k \\frac {\\partial x}{\\partial x} = f (x) \\tag {26}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.843, + 0.893, + 0.872 + ], + "angle": 0, + "content": "subject to homogeneous boundary conditions and initial conditions. This equation has 3 independent variables (\\(D = 3\\)), i.e., spatial variable \\(x_{s} = x_{1} = x\\), parametric variable \\(x_{p} = x_{2} = k\\) and temporal variable \\(x_{t} = x_{3} = t\\). The S-P-T" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.51, + 0.89 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.102, + 0.132, + 0.894, + 0.162 + ], + "angle": 0, + "content": "Galerkin weak form of this problem can be written as follows according to Eq. 25 (the superscripts \"TD\" for both trial and test functions are omitted for brevity)." + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.171, + 0.894, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} \\delta u \\nabla_ {t} u d \\Omega + \\int_ {\\Omega} \\nabla_ {x} \\delta u \\cdot k \\nabla_ {x} u d \\Omega - \\int_ {\\Omega} \\delta u f d \\Omega = 0 \\tag {27}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.205, + 0.623, + 0.22 + ], + "angle": 0, + "content": "The corresponding trial and test functions can be obtained using Eqs. 22-23:" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.229, + 0.893, + 0.267 + ], + "angle": 0, + "content": "\\[\nu (x, k, t) = \\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t) \\tag {28}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.277, + 0.893, + 0.335 + ], + "angle": 0, + "content": "\\[\n\\delta u (x, k, t) = \\underbrace {\\sum_ {m = 1} ^ {M} \\delta u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\\text {s p a t i a l v a r i a t i o n}} + \\underbrace {\\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) \\delta u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\\text {p a r a m e t r i c v a r i a t i o n}} + \\underbrace {\\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) \\delta u _ {t} ^ {(m)} (t)} _ {\\text {t e m p o r a l v a r i a t i o n}} \\tag {29}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.339, + 0.894, + 0.382 + ], + "angle": 0, + "content": "As shown in Eq. 29, the test function is further split into \\( D \\) variational terms for a general \\( D \\) dimensional problem (in the current example, \\( D = 3 \\)). As an example, we first plug Eq. 28 and the spatial variation term of Eq. 29 into the Galerkin weak form in Eq. 27 to obtain the S-P-T weak form terms corresponding to spatial variation:" + }, + { + "type": "equation", + "bbox": [ + 0.253, + 0.392, + 0.744, + 0.446 + ], + "angle": 0, + "content": "\\[\n\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\left[ \\nabla \\delta u _ {x} ^ {(m)} (x) \\nabla u _ {x} ^ {(n)} (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) k u _ {k} ^ {(n)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) u _ {t} ^ {(n)} (t) d t \\right]} _ {\\text {d i f f u s i o n t e r m}} +\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.261, + 0.449, + 0.893, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\left[ \\delta u _ {x} ^ {(m)} (x) u _ {x} ^ {(n)} (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) u _ {k} ^ {(n)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) \\nabla_ {t} u _ {t} ^ {(n)} (t) d t \\right]} _ {\\text {t i m e d e r i v a t i v e t e r m}} - \\tag {30}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.506, + 0.662, + 0.561 + ], + "angle": 0, + "content": "\\[\n\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\left[ \\delta u _ {x} ^ {(m)} (x) f (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) d t \\right]} _ {\\text {f o r c i n g t e r m}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.566, + 0.647, + 0.58 + ], + "angle": 0, + "content": "We use 1D C-HiDeNN shape functions to approximate each univariate function:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.603, + 0.624, + 0.624 + ], + "angle": 0, + "content": "\\[\nu _ {d} ^ {(n)} (x _ {d}) = \\widetilde {N} _ {n _ {d} ^ {\\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\\prime} n} ^ {[ d ]} \\quad (\\text {n o s u m o n} d)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.626, + 0.893, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\delta u _ {d} ^ {(m)} \\left(x _ {d}\\right) = \\widetilde {N} _ {n _ {d}} ^ {[ d ]} \\left(x _ {d}\\right) \\delta u _ {n _ {d} m} ^ {[ d ]} \\quad (\\text {n o s u m o n} d) \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.654, + 0.893, + 0.69 + ], + "angle": 0, + "content": "where Einstein summation is used. The free index \\(d\\) refers to dimension and \\(d = x,k\\) or \\(t\\). The gradient of the interpolated variable can be computed using the shape function derivative \\(\\widetilde{B}_{n_d}^{[d]}(x_d) = \\frac{d\\widetilde{N}_{n_d}^{[d]}(x_d)}{dx_d}\\)." + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.709, + 0.636, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {x _ {d}} u _ {d} ^ {(n)} (x _ {d}) = \\widetilde {B} _ {n _ {d} ^ {\\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\\prime} n} ^ {[ d ]} \\quad (\\text {n o s u m o n} d)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.733, + 0.892, + 0.753 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {x _ {d}} \\delta u _ {d} ^ {(m)} (x _ {d}) = \\widetilde {B} _ {n _ {d}} ^ {[ d ]} (x _ {d}) \\delta u _ {n _ {d} m} ^ {[ d ]} \\quad (\\text {n o s u m o n} d) \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.761, + 0.591, + 0.775 + ], + "angle": 0, + "content": "Plugging Eq. 31 - 32 into Eq. 30, the diffusion term can be rewritten as:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.786, + 0.893, + 0.839 + ], + "angle": 0, + "content": "\\[\n\\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\underbrace {\\int_ {\\Omega_ {x}} \\widetilde {B} _ {n _ {x}} (x) \\delta u _ {n _ {x} m} ^ {[ x ]} \\widetilde {B} _ {n _ {x} ^ {\\prime}} (x) u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} d x} _ {\\text {s p a t i a l t e r m}} \\cdot \\underbrace {\\int_ {\\Omega_ {k}} \\widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \\widetilde {N} _ {n _ {k} ^ {\\prime}} (k) u _ {n _ {k} ^ {\\prime} n} ^ {[ k ]} d k} _ {\\text {p a r a m e t r i c t e r m}} \\cdot \\underbrace {\\int_ {\\Omega_ {t}} \\widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \\widetilde {N} _ {n _ {t} ^ {\\prime}} (t) u _ {n _ {t} ^ {\\prime} n} ^ {[ t ]} d t} _ {\\text {t e m p o r a l t e r m}} \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.843, + 0.893, + 0.87 + ], + "angle": 0, + "content": "As can be readily seen from Eq. 33, after doing 1D integration of each term, the parametric and temporal terms can be treated as coefficient matrices:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.508, + 0.89 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "equation", + "bbox": [ + 0.36, + 0.153, + 0.603, + 0.202 + ], + "angle": 0, + "content": "\\[\nC _ {m n} ^ {[ k ]} = \\underbrace {\\int_ {\\Omega_ {k}} \\widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \\widetilde {N} _ {n _ {k} ^ {\\prime}} (k) u _ {n _ {k} ^ {\\prime} n} ^ {[ k ]} d k} _ {\\text {p a r a m e t r i c t e r m}}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.204, + 0.893, + 0.252 + ], + "angle": 0, + "content": "\\[\nC _ {m n} ^ {[ t ]} = \\underbrace {\\int_ {\\Omega_ {t}} \\widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \\widetilde {N} _ {n _ {t} ^ {\\prime}} (t) u _ {n _ {t} ^ {\\prime} n} ^ {[ t ]} d t} _ {\\text {t e m p o r a l t e r m}} \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.262, + 0.893, + 0.289 + ], + "angle": 0, + "content": "as the only free indices are \\( m \\) and \\( n \\). Substituting the coefficient matrices and rearranging different terms in Eq. 33, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.3, + 0.893, + 0.339 + ], + "angle": 0, + "content": "\\[\n\\sum_ {m = 1} ^ {M} \\delta u _ {n _ {x} m} ^ {[ x ]} \\sum_ {n = 1} ^ {M} \\left[ \\int_ {\\Omega_ {x}} \\widetilde {B} _ {n _ {x}} (x) \\widetilde {B} _ {n _ {x} ^ {\\prime}} (x) d x \\right] \\cdot C _ {m n} ^ {[ k ]} C _ {m n} ^ {[ t ]} \\cdot u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.344, + 0.894, + 0.379 + ], + "angle": 0, + "content": "Like standard FEM, we can define \\(\\int_{x}\\widetilde{B}_{n_x}(x)\\widetilde{B}_{n_x'}(x)dx\\) as the 1D stiffness matrix \\(K_{n_x n_x'}^{[x]}\\) of \\(x\\) dimension in Eq. 35. We let \\(C_{mn}^{[x]} = C_{mn}^{[k]}C_{mn}^{[t]}\\) with no summation on \\((m,n)\\). Furthermore, let us define the following 4-th order tensor:" + }, + { + "type": "equation", + "bbox": [ + 0.431, + 0.39, + 0.893, + 0.41 + ], + "angle": 0, + "content": "\\[\nA _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} = K _ {n _ {x} n _ {x} ^ {\\prime}} ^ {[ x ] ^ {\\prime}} C _ {m n} ^ {[ x ]} \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.415, + 0.894, + 0.474 + ], + "angle": 0, + "content": "where \\( A_{n_s n_s'mn}^{[x]} \\) is a function of solution vectors \\( u_{n_k m}^{[k]} \\) and \\( u_{n_m m}^{[t]} \\) since the coefficient matrix \\( C_{mn}^{[x]} \\) depends on these solution vectors as shown in Eq. 34. This dependency reflects the interconnected nature of the variables across different dimensions in the S-P-T framework, highlighting how the spatial, parameter, and temporal components influence each other through the coefficients. As a result, Eq. 33 can be further simplified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.44, + 0.482, + 0.893, + 0.503 + ], + "angle": 0, + "content": "\\[\n\\delta u _ {n _ {x} m} ^ {[ x ]} A _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.513, + 0.893, + 0.561 + ], + "angle": 0, + "content": "where the summation signs are neglected since \\(m\\) and \\(n\\) become dummy variables. The 4-th order tensor \\(A_{n_x n_x' mn}^{[x]}\\) can be reshaped as a 2nd order tensor \\(\\mathbb{A}_{IJ}^{[x]}\\): the indices \\(n_x\\) and \\(m\\) are combined into a single composite index \\(I\\), and the indices \\(n_x'\\) and \\(n\\) are combined into a single composite index \\(J\\)." + }, + { + "type": "equation", + "bbox": [ + 0.448, + 0.572, + 0.892, + 0.592 + ], + "angle": 0, + "content": "\\[\nA _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} = \\mathbb {A} _ {I J} ^ {[ x ]} \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.596, + 0.343, + 0.609 + ], + "angle": 0, + "content": "Define the following vectorization:" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.632, + 0.555, + 0.653 + ], + "angle": 0, + "content": "\\[\n\\delta \\mathbb {U} _ {I} ^ {[ x ]} = \\left[ \\operatorname {v e c} \\left(\\delta u _ {n _ {x} m} ^ {[ x ]}\\right) \\right] _ {I}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.426, + 0.655, + 0.892, + 0.675 + ], + "angle": 0, + "content": "\\[\n\\mathbb {U} _ {J} ^ {[ x ]} = \\left[ \\operatorname {v e c} \\left(u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]}\\right) \\right] _ {J} \\tag {39}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.684, + 0.344, + 0.698 + ], + "angle": 0, + "content": "As a result, Eq. 37 is equivalent to:" + }, + { + "type": "equation", + "bbox": [ + 0.449, + 0.71, + 0.892, + 0.727 + ], + "angle": 0, + "content": "\\[\n\\delta \\mathbb {U} ^ {[ x ] T} \\mathbb {A} ^ {[ x ]} \\mathbb {U} ^ {[ x ]} \\tag {40}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.732, + 0.894, + 0.791 + ], + "angle": 0, + "content": "Following the same procedure, we can obtain matrix forms corresponding to the time derivative term \\(\\delta \\mathbb{U}^{[x]^T}\\mathbb{B}^{[x]}\\mathbb{U}^{[x]}\\), and the forcing term \\(\\delta \\mathbb{U}^{[x]^T}\\mathbb{Q}^{[x]}\\) for the spatial variational part of Eq. 30. Similar structures can also be obtained for the parametric and temporal variational parts of the test function in Eq. 29. Denoting \\(\\mathbb{K}^{[d]} = \\mathbb{A}^{[d]} + \\mathbb{B}^{[d]}\\), the matrix form of the generalized S-P-T Galerkin form in Eq. 27 can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.798, + 0.892, + 0.835 + ], + "angle": 0, + "content": "\\[\n\\underbrace {\\delta \\mathbb {U} ^ {[ x ] ^ {T}} \\mathbb {K} ^ {[ x ]} \\mathbb {U} ^ {[ x ]} - \\delta \\mathbb {U} ^ {[ x ] ^ {T}} \\mathbb {Q} ^ {[ x ]}} _ {\\text {s p a t i a l v a r i a t i o n a l p a r t}} + \\underbrace {\\delta \\mathbb {U} ^ {[ k ] ^ {T}} \\mathbb {K} ^ {[ k ]} \\mathbb {U} ^ {[ k ]} - \\delta \\mathbb {U} ^ {[ k ] ^ {T}} \\mathbb {Q} ^ {[ k ]}} _ {\\text {p a r a m e t r i c v a r i a t i o n a l p a r t}} + \\underbrace {\\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\mathbb {K} ^ {[ t ]} \\mathbb {U} ^ {[ t ]} - \\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\mathbb {Q} ^ {[ t ]}} _ {\\text {t e m p o r a l v a r i a t i o n a l p a r t}} = 0 \\tag {41}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.843, + 0.893, + 0.871 + ], + "angle": 0, + "content": "Eq. 41 is equivalent to the following nonlinear system of equations. Note that the nonlinearity comes from the fact that \\(\\mathbb{K}^{[d]}\\) is solution dependent:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "equation", + "bbox": [ + 0.132, + 0.154, + 0.894, + 0.201 + ], + "angle": 0, + "content": "\\[\n\\left[ \\delta \\mathbb {U} ^ {[ x ] ^ {T}}, \\delta \\mathbb {U} ^ {[ k ] ^ {T}}, \\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\right] \\left\\{\\left[ \\begin{array}{c c c} \\mathbb {K} ^ {[ x ]} (\\mathbb {U} ^ {[ k ]}, \\mathbb {U} ^ {[ t ]}) & 0 & 0 \\\\ 0 & \\mathbb {K} ^ {[ k ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ t ]}) & 0 \\\\ 0 & 0 & \\mathbb {K} ^ {[ t ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ k ]}) \\end{array} \\right] \\left[ \\begin{array}{l} \\mathbb {U} ^ {[ x ]} \\\\ \\mathbb {U} ^ {[ k ]} \\\\ \\mathbb {U} ^ {[ t ]} \\end{array} \\right] - \\left[ \\begin{array}{l} \\mathbb {Q} ^ {[ x ]} (\\mathbb {U} ^ {[ k ]}, \\mathbb {U} ^ {[ t ]}) \\\\ \\mathbb {Q} ^ {[ k ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ t ]}) \\\\ \\mathbb {Q} ^ {[ t ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ k ]}) \\end{array} \\right] \\right\\} = 0 \\tag {42}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.211, + 0.894, + 0.299 + ], + "angle": 0, + "content": "where we can treat the solution vector \\(\\left[\\mathbb{U}^{[x]^T},\\mathbb{U}^{[k]^T},\\mathbb{U}^{[t]^T}\\right]\\) as generalized DoFs like standard FEM. There are many ways to solve Eq. 42. For example, standard linearization schemes such as Newton's method have been used [29]. However, this method may suffer from ill-conditioning since the mismatch of scales for different dimensions can be significant. In this paper, we use the concept of subspace iteration to efficiently approximate the solution by iterating in the subspace of the test function space until a convergence criteria is met [19]. A similar counterpart has been widely adopted as the gold standard in discrete tensor decomposition [23]." + }, + { + "type": "title", + "bbox": [ + 0.103, + 0.313, + 0.443, + 0.327 + ], + "angle": 0, + "content": "2.6. Solution scheme of TAPS: subspace iteration" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.33, + 0.894, + 0.416 + ], + "angle": 0, + "content": "For subspace iteration in \\(d\\)-th dimension, only the solution matrix \\(\\mathbb{U}^{[d]}\\) is treated as unknown while all other functions are considered as known constants. Consequently, the variations of the univariate functions other than \\(d\\)-th dimension will vanish. From Eq. 42, it can be seen that this will lead to a linear system of equations for the unknowns in the \\(d\\)-th dimension. The updated solution matrix \\(\\mathbb{U}^{[d]}\\) from this process is then used in the next subspace iteration for dimension \\(d + 1\\) (when \\(d = D\\), we come back to the first dimension \\(d = 1\\)). The complete solution scheme for subspace iteration is shown in Algorithm 1." + }, + { + "type": "code_caption", + "bbox": [ + 0.107, + 0.429, + 0.488, + 0.443 + ], + "angle": 0, + "content": "Algorithm 1 TAPS solution scheme (subspace iteration)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.445, + 0.783, + 0.461 + ], + "angle": 0, + "content": "1: Initialize solution vector \\(\\mathbb{U}^{[x_1][0]}\\) ..., \\(\\mathbb{U}^{[x_D][0]}\\) with random values and compute \\(\\mathbb{K}^{[x_1][0]}\\), and \\(\\mathbb{Q}^{[x_1][0]}\\)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.462, + 0.303, + 0.476 + ], + "angle": 0, + "content": "2: for iter = 0 to iter_max do" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.476, + 0.281, + 0.488 + ], + "angle": 0, + "content": "3: for \\(d = 1\\) to D do" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.489, + 0.476, + 0.502 + ], + "angle": 0, + "content": "4: Update iteration number \\(\\mathcal{K} = iter\\times D + d\\)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.502, + 0.547, + 0.517 + ], + "angle": 0, + "content": "5: Solve TD linear system \\(\\mathbb{K}^{[x_d][\\mathcal{K} - 1]}\\mathbb{U}^{[x_d][\\mathcal{K}]} = \\mathbb{Q}^{[x_d][\\mathcal{K} - 1]}\\)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.517, + 0.539, + 0.532 + ], + "angle": 0, + "content": "6: Update matrices \\(\\mathbb{K}^{[x_{d + 1}][\\mathcal{K}]}\\) and force vector \\(\\mathbb{Q}^{[x_{d + 1}][\\mathcal{K}]}\\)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.533, + 0.215, + 0.545 + ], + "angle": 0, + "content": "7: end for" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.546, + 0.293, + 0.561 + ], + "angle": 0, + "content": "8: Check convergence" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.561, + 0.189, + 0.573 + ], + "angle": 0, + "content": "9: end for" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.445, + 0.547, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.592, + 0.894, + 0.663 + ], + "angle": 0, + "content": "To illustrate the details of the subspace iteration algorithm, we consider the \\(\\mathcal{K}\\)-th subspace iteration (which is on spatial variable \\(x\\)). Here, we assume that the parametric and temporal solutions have been updated from the previous \\((\\mathcal{K} - 1)\\)-th iteration, leaving the spatial solution as unknown to be solved in \\(\\mathcal{K}\\)-th iteration. Moreover, instead of the full variation form of the test function as in Eq. 42, we only consider the subspace \\(x\\) of the test function by setting the parametric and temporal variational parts as 0. As a result, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.267, + 0.674, + 0.892, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\mathbb {K} ^ {[ x ] [ \\mathcal {K} - 1 ]} \\left(\\mathbb {U} ^ {[ k ] [ \\mathcal {K} - 1 ]}, \\mathbb {U} ^ {[ t ] [ \\mathcal {K} - 1 ]}\\right) \\mathbb {U} ^ {[ x ] [ \\mathcal {K} ]} = \\mathbb {Q} ^ {[ x ] [ \\mathcal {K} - 1 ]} \\left(\\mathbb {U} ^ {[ k ] [ \\mathcal {K} - 1 ]}, \\mathbb {U} ^ {[ t ] [ \\mathcal {K} - 1 ]}\\right) \\tag {43}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.701, + 0.894, + 0.786 + ], + "angle": 0, + "content": "which is a linear system of equations with unknown \\(\\mathbb{U}^{[x][\\mathcal{K}]}\\). This is a general Sylvester equation which can be solved using many efficient solution schemes [30, 31]. In this paper, sparse direct solvers based on fast diagonalization/complex Schur decomposition methods are adopted [32]. The computational complexity of the sparse direct solver is \\(O(M^3 + M^2 n_d + C_c(n_d))\\) for the \\(d\\)-th dimension subspace iteration, where \\(M\\) is the total number of modes; \\(n_d\\) is the number of grid points for \\(d\\)-th dimension; \\(C_c(n_d)\\) refers to the computational cost of the banded sparse mass/stiffness matrix for \\(d\\)-th dimension with a shape of \\((n_d \\times n_d)\\)." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.785, + 0.894, + 0.829 + ], + "angle": 0, + "content": "Once \\(\\mathbb{U}^{[x][\\mathcal{K}]}\\) is obtained, we then update matrix \\(\\mathbb{K}^{[k][\\mathcal{K}]}(\\mathbb{U}^{[x][\\mathcal{K}]},\\mathbb{U}^{[t][\\mathcal{K}]})\\) and forcing vector \\(\\mathbb{Q}^{[k][\\mathcal{K}]}(\\mathbb{U}^{[x][\\mathcal{K}]},\\mathbb{U}^{[t][\\mathcal{K}]})\\). In the next iteration (for dimension \\(k\\)), we treat \\(\\mathbb{U}^{[k][\\mathcal{K} + 1]}\\) as the only unknown. Subspace iteration will continue unless the relative change of all solution matrices (for example, \\(L_{2}\\) norm) is within the tolerance." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.508, + 0.89 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.104, + 0.132, + 0.308, + 0.146 + ], + "angle": 0, + "content": "2.7. Error estimates of TAPS" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.15, + 0.894, + 0.192 + ], + "angle": 0, + "content": "Since the TAPS solution is based on the C-HiDeNN-TD approximation and the generalized Galerkin formulation, we can have the following theoretical results on the error bounds, as demonstrated in our previous work on C-HiDeNN [6]:" + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.192, + 0.892, + 0.208 + ], + "angle": 0, + "content": "\\[\n\\left\\| u ^ {\\mathrm {C} - \\mathrm {H i D e N N}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\leq \\left\\| u ^ {\\mathrm {T A P S}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\leq \\left\\| u ^ {\\mathrm {F E M}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\tag {44}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.215, + 0.894, + 0.314 + ], + "angle": 0, + "content": "where \\(\\| \\cdot \\|_E\\) denotes the energy norm, \\(u^{\\mathrm{ex}}\\) denotes the exact solution, \\(u^{\\mathrm{C - HiDeNN}}\\) denotes the solution obtained by the full C-HiDeNN method without tensor decomposition, \\(u^{\\mathrm{TAPS}}\\) denotes the TAPS solution with a sufficient number of modes, \\(u^{\\mathrm{FEM}}\\) denotes the FEM solution. The proof of the above results is based on the fact that the full C-HiDeNN approximation can provide a larger function space and therefore more accurate solutions than conventional FEM [6]. The subspace iteration can be considered as a local (directional) version of the Galerkin formulation and is expected to enable an optimized solution for the tensor decomposition that will converge to the Galerkin-based full C-HiDeNN method." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.336, + 0.183, + 0.349 + ], + "angle": 0, + "content": "3. Results" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.361, + 0.428, + 0.376 + ], + "angle": 0, + "content": "3.1. Convergence study for moving heat source" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.379, + 0.894, + 0.435 + ], + "angle": 0, + "content": "In this section, we first analyze the convergence of the TAPS solver for a space-time (S-T) transient heat transfer problem. A single NVIDIA RTX A6000 GPU is used for all the following analyses. In Eq. 18, we let \\(\\rho c_{p} = 1\\), \\(k = 1\\), and replace the heat source as shown in Eq. 45. In this example, we have the spatial variable \\(x_{s} = (x,y,z)\\) and the temporal variable \\(x_{t} = t\\)." + }, + { + "type": "equation", + "bbox": [ + 0.267, + 0.447, + 0.891, + 0.523 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} f \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = 2 (1 - 2 y ^ {2}) \\left(1 - e ^ {- 1 5 t}\\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ + 2 (1 - 2 (1 0 0 t - x - 5) ^ {2}) \\left(1 - e ^ {- 1 5 t}\\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} + (1 \\tag {45} \\\\ - e ^ {- 1 5 t}) (2 0 0 x + 1 0 0 0 - 2 0 0 0 0 t) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ - 1 5 e ^ {- 1 5 t} e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.532, + 0.53, + 0.546 + ], + "angle": 0, + "content": "The analytical solution to the PDE is inherently non-separable." + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.557, + 0.892, + 0.574 + ], + "angle": 0, + "content": "\\[\nu ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = (1 - e ^ {- 1 5 t}) e ^ {- y ^ {2} - (x - 1 0 0 t - 5) ^ {2}} \\tag {46}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.586, + 0.377, + 0.599 + ], + "angle": 0, + "content": "The initial and boundary conditions are:" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.613, + 0.491, + 0.625 + ], + "angle": 0, + "content": "\\[\nu \\left(x _ {s}, 0\\right) = 0,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.624, + 0.892, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\left. u \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\right| _ {\\partial \\Omega} = \\left. u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\right| _ {\\partial \\Omega}. \\tag {47}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.652, + 0.377, + 0.665 + ], + "angle": 0, + "content": "The relative \\(L_{2}\\) norm error is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.676, + 0.892, + 0.71 + ], + "angle": 0, + "content": "\\[\n\\epsilon_ {L _ {2}} = \\frac {\\| u ^ {T D} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) - u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) \\| _ {L _ {2} \\left(\\Omega_ {\\boldsymbol {x} _ {s}} \\otimes \\Omega_ {x _ {t}}\\right)}}{\\| u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) \\| _ {L _ {2} \\left(\\Omega_ {\\boldsymbol {x} _ {s}} \\otimes \\Omega_ {x _ {t}}\\right)}} \\tag {48}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.719, + 0.894, + 0.861 + ], + "angle": 0, + "content": "First, we investigate the influence of the number of subspace iterations. As shown in Fig. 8(a), 3 iterations are enough to obtain an accurate result. Next, we investigate the convergence in terms of the number of modes. Here we compare the relative \\( L_{2} \\) norm error for both TAPS and proper generalized decomposition (PGD) methods [17, 18]. To this aim, we use the same discretization for the space-time domain with each dimension discretized by 100 grid points, the same reproducing polynomial order \\( p = 1 \\) and convolution patch size \\( s = 1 \\). As can be seen from Fig. 8(b), TAPS requires a much smaller number of modes than PGD. For TAPS, when the number of modes equals 25, the relative \\( L_{2} \\) norm error decreases to \\( 2.5 \\times 10^{-3} \\). The total solution time is 15.2 s. However, PGD requires 1,000 modes which takes 60.6 s solution time to reach the same level of accuracy. This is because the test function space in PGD is a subspace of TAPS [29]. Furthermore, the modal decomposition obtained from PGD is not optimal and thus requires a larger storage requirement due to the increased number of modes." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.88, + 0.509, + 0.89 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.184, + 0.134, + 0.506, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.286, + 0.395, + 0.298 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.133, + 0.811, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.679, + 0.287, + 0.699, + 0.298 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.315, + 0.799, + 0.327 + ], + "angle": 0, + "content": "Figure 8: Relative L2 norm error with respect to (a) the number of subspace iterations (b) the number of modes" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.348, + 0.894, + 0.433 + ], + "angle": 0, + "content": "The spatial and temporal convergence are also studied. In Fig. 9(a), the number of temporal nodes is fixed as 500, and the spatial mesh is refined. It shows the relative \\( L_{2} \\) norm error with respect to the number of nodes along each spatial dimension. As can be readily seen from the figure, larger patch size \\( s \\) leads to smaller error given the same reproducing polynomial orders \\( p \\). Moreover, we can adjust \\( p \\) to control the spatial convergence rate. Similarly, Fig. 9(b) demonstrates the convergence rate in the temporal domain where we fix the spatial discretization as 500 along each spatial dimension. By adjusting \\( s \\) and \\( p \\), we can obtain different temporal convergence rates." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.434, + 0.894, + 0.476 + ], + "angle": 0, + "content": "Finally, we refine the spatial and temporal mesh simultaneously and study the spatio-temporal convergence rate in Fig. 9(c). As can be observed from the figure, higher reproducing polynomial order \\( p \\) will lead to a higher-order convergence rate." + }, + { + "type": "image", + "bbox": [ + 0.111, + 0.49, + 0.382, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.259, + 0.647, + 0.277, + 0.657 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.387, + 0.49, + 0.621, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.647, + 0.529, + 0.657 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.624, + 0.491, + 0.885, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.764, + 0.646, + 0.78, + 0.656 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.673, + 0.893, + 0.697 + ], + "angle": 0, + "content": "Figure 9: Relative \\( L_{2} \\) norm error with respect to the number of grid points (a) spatial convergence (b) temporal convergence (c) spatio-temporal convergence" + }, + { + "type": "title", + "bbox": [ + 0.103, + 0.721, + 0.707, + 0.735 + ], + "angle": 0, + "content": "3.2. Convergence study of \\( S-P-T \\) problems up to equivalent zetta-scale (10\\(^{21}\\)) full models" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.737, + 0.894, + 0.78 + ], + "angle": 0, + "content": "In this example, we study the convergence of the TAPS solver for the time-dependent parametric heat transfer problem in a S-P-T setting. In Eq. 18, we adopt the heat source as shown in Eq. 49. In this example, we have spatial variable \\( \\boldsymbol{x}_s = (x,y,z) \\), parametric variable \\( \\boldsymbol{x}_p = (k,P,\\rho ,c_p) \\) and temporal variable \\( x_{t} = t \\)." + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.789, + 0.892, + 0.832 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} f \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = 1 5 \\rho^ {2} c _ {p} ^ {2} k p e ^ {- 1 5 k t} e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\\\ + 5 0 \\rho c _ {p} k p \\left(1 - e ^ {- 1 5 k t}\\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\left[ \\left(1 - 5 0 x ^ {2}\\right) + \\left(1 - 5 0 y ^ {2}\\right) \\right] \\tag {49} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.833, + 0.53, + 0.848 + ], + "angle": 0, + "content": "The analytical solution to the PDE is inherently non-separable." + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.854, + 0.892, + 0.873 + ], + "angle": 0, + "content": "\\[\nu ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\rho c _ {p} P \\left(1 - e ^ {- 1 5 k t}\\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\tag {50}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.879, + 0.508, + 0.89 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.104, + 0.133, + 0.378, + 0.147 + ], + "angle": 0, + "content": "The initial and boundary conditions are:" + }, + { + "type": "equation", + "bbox": [ + 0.378, + 0.158, + 0.49, + 0.173 + ], + "angle": 0, + "content": "\\[\nu \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, 0\\right) = 0\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.177, + 0.617, + 0.196 + ], + "angle": 0, + "content": "\\[\nu \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) | _ {\\partial \\Omega} = u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) | _ {\\partial \\Omega}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.201, + 0.38, + 0.214 + ], + "angle": 0, + "content": "The relative \\(L_{2}\\) norm error is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.223, + 0.894, + 0.261 + ], + "angle": 0, + "content": "\\[\n\\epsilon_ {L _ {2}} = \\frac {\\left\\| u ^ {T D} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) - u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) \\right\\| _ {L _ {2} \\left(\\Omega_ {x _ {s}} \\otimes \\Omega_ {x _ {p}} \\otimes \\Omega_ {x _ {t}}\\right)}}{\\left\\| u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) \\right\\| _ {L _ {2} \\left(\\Omega_ {x _ {s}} \\otimes \\Omega_ {x _ {p}} \\otimes \\Omega_ {x _ {t}}\\right)}} \\tag {51}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.269, + 0.894, + 0.384 + ], + "angle": 0, + "content": "To study the convergence of TAPS for S-P-T problems, the number of grid points is refined simultaneously in each dimension and corresponding relative \\( L_{2} \\) norm errors are computed as shown in Fig. 10. When the number of grid points in each dimension is 450, the equivalent DoFs of a full model achieves \\( 450^{8} = 1.68 \\times 10^{21} \\). Consequently, it is equivalent to a zetta-scale \\( (10^{21}) \\) full problem. As can be seen from the figure, a larger patch size \\( s \\) leads to a smaller error and faster convergence. A higher reproducing polynomial order \\( p \\) also leads to a higher convergence rate. It can be noticed that the convergence rate for \\( p = 3 \\) case is smaller than expected \\( p + 1 = 4 \\). This is attributed to the fact that the S-P-T mesh is not fine enough. However, due to the rounding error in computing the relative \\( L_{2} \\) norm error, we can only accurately compute the error up to 450 grid points per dimension." + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.397, + 0.657, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.243, + 0.61, + 0.754, + 0.624 + ], + "angle": 0, + "content": "Figure 10: Relative \\( L_{2} \\) norm error with respect to the number of grid points in each dimension" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.637, + 0.895, + 0.75 + ], + "angle": 0, + "content": "In summary, we have the flexibility to choose different \\( s \\) and \\( p \\) to control the accuracy of TAPS by directly solving the S-P-T PDE. This is different from other data-driven modeling approaches (for instance, neural networks-based data-driven methods) in two notable ways. First, unlike a black-box neural network interpolator where the accuracy of the model is not guaranteed, our method is built upon the AI-enhanced finite element method, and we can control the convergence rate by choosing suitable hyperparameters \\( s \\) and \\( p \\). Second, unlike most data-driven reduced-order models for physical problems, our method directly solves the governing PDE by plugging in the TD interpolation without seeing any training data. As a result, we can avoid the most expensive offline data generation stage as opposed to data-driven methods." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.764, + 0.548, + 0.779 + ], + "angle": 0, + "content": "3.3. Moving source with solution dependent material parameters" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.782, + 0.895, + 0.825 + ], + "angle": 0, + "content": "In this section, we model moving heat sources using temperature-dependent material parameters. The solution scheme of this problem is provided in detail in Appendix A. Figure 11(a) illustrates a typical representation of temperature-dependent heat conductivity and capacity for Inconel 718 [33]." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.825, + 0.894, + 0.854 + ], + "angle": 0, + "content": "Since the temperature dependency of \\( k(u) \\) and \\( \\rho c_{p}(u) \\) can be approximated using a linear relationship. As a result, we can directly rewrite \\( k(u(\\boldsymbol{x}_s,x_t)) \\) and \\( \\rho c_{p}(u(\\boldsymbol{x}_{s},x_{t})) \\) in the TD format." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.51, + 0.89 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.187, + 0.144, + 0.501, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.324, + 0.289, + 0.349, + 0.302 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.178, + 0.812, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.626, + 0.285, + 0.65, + 0.301 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.318, + 0.894, + 0.342 + ], + "angle": 0, + "content": "Figure 11: (a) Temperature dependent material properties for Inconel 718 [33] (b) Schematic of numerical simulation, where the solution along the center line is compared for FEM and TAPS." + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.388, + 0.674, + 0.426 + ], + "angle": 0, + "content": "\\[\nk \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\approx \\sum_ {m = 1} ^ {M} m _ {k} u _ {x _ {1}} ^ {(m)} \\left(x _ {1}\\right) u _ {x _ {2}} ^ {(m)} \\left(x _ {2}\\right) u _ {x _ {3}} ^ {(m)} \\left(x _ {3}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + n _ {k}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.429, + 0.893, + 0.466 + ], + "angle": 0, + "content": "\\[\n\\rho c _ {p} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\approx \\sum_ {m = 1} ^ {M} m _ {c _ {p}} u _ {x _ {1}} ^ {(m)} \\left(x _ {1}\\right) u _ {x _ {2}} ^ {(m)} \\left(x _ {2}\\right) u _ {x _ {3}} ^ {(m)} \\left(x _ {3}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + n _ {c _ {p}} \\tag {52}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.477, + 0.893, + 0.507 + ], + "angle": 0, + "content": "where \\(M\\) is the decomposition modes of the TAPS solution; \\(m_{k} = 1.52 \\times 10^{-5} \\mathrm{~W} / (\\mathrm{mmK}^{2})\\); \\(n_{k} = 5.29 \\times 10^{-3} \\mathrm{~W} / (\\mathrm{mmK})\\); \\(m_{c_{p}} = 6.11 \\times 10^{-7} \\mathrm{~mm}^{-3} \\mathrm{~K}^{-2}\\); \\(n_{c p} = 3.25 \\times 10^{-3} \\mathrm{~mm}^{-3} \\mathrm{~K}^{-1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.507, + 0.894, + 0.576 + ], + "angle": 0, + "content": "The problem setup is shown in Fig. 11 (b). The spatial domain size is \\(10\\mathrm{mm} \\times 10\\mathrm{mm} \\times 1\\mathrm{mm}\\) where homogeneous Dirichlet boundary conditions are assumed for the left and right surfaces; homogeneous Neumann boundary conditions are applied to all other surfaces. As shown in Eq. 19, a moving Gaussian source term \\(f(\\boldsymbol{x}_s, t)\\) is applied as a volumetric source term with a radius \\(r = 0.5\\mathrm{mm}\\) and moving velocity \\(500\\mathrm{mm/s}\\). The diameter is discretized using 10 spatial elements." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.577, + 0.894, + 0.635 + ], + "angle": 0, + "content": "Since there is no analytical solution available to this problem, we use implicit finite element analysis as the baseline for validation. JAX-FEM [34] is used to generate the nonlinear FEM solution. For ease of comparison, we use the same time increment as \\(1.60 \\times 10^{-4}\\) sec for both TAPS and FEM. The solution along the center line, as shown in Fig. 11 (b) is compared. As can be seen from Fig. 12, the result of the nonlinear TAPS solver agrees well with FEM." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.648, + 0.332, + 0.662 + ], + "angle": 0, + "content": "3.4. Simulation of LPBF process" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.666, + 0.894, + 0.71 + ], + "angle": 0, + "content": "In this section, we use TAPS to efficiently model the laser powder bed fusion process (LPBF) in additive manufacturing. Here we only consider the free convection term in the Neumann boundary condition. The initial condition can be considered by splitting the total solution as a summation of the homogeneous part and the inhomogeneous part." + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.723, + 0.892, + 0.739 + ], + "angle": 0, + "content": "\\[\nu \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = u _ {0} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) + u _ {\\text {i n i t}} \\left(\\boldsymbol {x} _ {s}\\right) \\tag {53}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.745, + 0.893, + 0.773 + ], + "angle": 0, + "content": "As a result, \\( u_{0}(\\pmb{x}_{s}, x_{t}) \\) is subject to homogeneous initial conditions. In this section, we assume Ti-6Al-4V is used as the powder bed materials. The detailed material parameters can be found in Table 4." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.787, + 0.312, + 0.801 + ], + "angle": 0, + "content": "3.4.1. Single-track simulation" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.802, + 0.894, + 0.86 + ], + "angle": 0, + "content": "In this example, we investigate the computational complexity numerically for single-track LPBF simulation with a single S-T slab, as shown in Fig. 13 (a). A single NVIDIA RTX A6000 GPU is used for all the following analyses. To ensure accuracy, the number of modes is adopted as 5 times larger than the number of time steps in the following examples. In the first case, within the S-T slab, the spatial mesh is refined uniformly along each spatial dimension" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.34, + 0.131, + 0.65, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.353, + 0.768, + 0.366 + ], + "angle": 0, + "content": "Figure 12: Comparison of Nonlinear TAPS solution versus finite element solution at different times." + }, + { + "type": "table_caption", + "bbox": [ + 0.383, + 0.389, + 0.615, + 0.398 + ], + "angle": 0, + "content": "Table 4: Parameters used in the simulation" + }, + { + "type": "table", + "bbox": [ + 0.106, + 0.398, + 0.894, + 0.487 + ], + "angle": 0, + "content": "
ParameterVariableValueUnits
Thermal conductivityk22.0W m-1K-1
Densityρ4.27g cm-3
Specific heat capacitycp745J kg-1K-1
Ambient temperatureT0298.15K
Heat convection coefficienthconv14.73W m-2K-1
" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.51, + 0.891, + 0.539 + ], + "angle": 0, + "content": "while fixing the number of temporal grid points. The computational time for each subspace iteration is plotted in Fig. 13 (b). It can be seen that TAPS has a linear growth of computational complexity when refining the spatial mesh." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.539, + 0.894, + 0.651 + ], + "angle": 0, + "content": "Similarly, we only refine the temporal mesh while fixing the spatial mesh in the second case and plot the computational time for each subspace iteration as in Fig. 13 (c). It can be readily observed that refining the temporal mesh has a much higher computational complexity than refining the spatial mesh. This is because increasing temporal elements will also lead to an increased number of modes \\( M \\). As mentioned before, the computational cost for the sparse direct solver employed is \\( O(M^3 + M^2 n_d + C_c(n_d)) \\) for the \\( d \\)-th dimension subproblem, where \\( M \\) represents total number of modes; \\( n_d \\) refers to the total number of grid points in \\( d \\)-th dimension; \\( C_c(n_d) \\) refers to the computational cost of a banded sparse matrix with a shape of \\( (n_d \\times n_d) \\). Therefore, the increased number of modes leads to a cubic growth in computational time." + }, + { + "type": "table_caption", + "bbox": [ + 0.348, + 0.674, + 0.648, + 0.685 + ], + "angle": 0, + "content": "Table 5: Parameters used in the single-track simulation" + }, + { + "type": "table", + "bbox": [ + 0.106, + 0.685, + 0.893, + 0.83 + ], + "angle": 0, + "content": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Mesh sizeh5μm
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.148, + 0.134, + 0.331, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.347, + 0.133, + 0.603, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.605, + 0.131, + 0.848, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.298, + 0.893, + 0.321 + ], + "angle": 0, + "content": "Figure 13: (a) Single-track simulation. (b) Computational time of subspace iteration in refining the spatial mesh: linear growth. (c) Computational time of subspace iteration in refining the temporal mesh in a single space-time slab: cubic growth due to the increased number of modes" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.345, + 0.308, + 0.358 + ], + "angle": 0, + "content": "3.4.2. Multi-track simulation" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.36, + 0.894, + 0.474 + ], + "angle": 0, + "content": "A major challenge in simulating multiple tracks in LPBF is the substantial number of time steps needed. To circumvent the cubic growth associated with the increasing number of temporal grid points and modes for moving source problems, we can leverage multiple S-T slabs to break down the original problem with a large number of time steps into smaller slabs. Consequently, this method keeps the total number of modes required in each slab beneath a reasonable threshold, thereby optimizing computational efficiency. The detailed algorithm of simulating multiple space-time (S-T) slabs for LPBF process is shown in Appendix B. Using this method, we first simulate a multi-track LPBF problem and analyze how the total number of slabs influences computational cost. The detailed setup can be found in Table 6. Note that we only simulate the printing process of the final layer in this section." + }, + { + "type": "table_caption", + "bbox": [ + 0.351, + 0.497, + 0.646, + 0.507 + ], + "angle": 0, + "content": "Table 6: Parameters used in the multi-track simulation" + }, + { + "type": "table", + "bbox": [ + 0.106, + 0.506, + 0.893, + 0.666 + ], + "angle": 0, + "content": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Hatch space sizehs50μm
Mesh sizeh5μm
" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.679, + 0.894, + 0.75 + ], + "angle": 0, + "content": "We use different numbers of temporal grids within each S-T slab and compare the computation cost, as shown in Fig. 14. As can be seen from the figure, when each space-time slab contains around 20 temporal grid points, the computational efficiency is optimal. Hence, choosing the optimal number of temporal elements inside each space-time slab is crucial for the overall performance of the TAPS solver for modeling LPBF process. We will adopt 20 temporal grid points per S-T slab as the default for the following multi-track LPBF simulations." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.751, + 0.894, + 0.807 + ], + "angle": 0, + "content": "Next, we compare the performance of TAPS versus the classical explicit finite difference method. To this aim, we use a GPU-accelerated and optimized finite difference code, GAMMA, to model the LPBF process [28]. In this example, we increase the size of the domain while maintaining all other process parameters, as shown in Table 6. The corresponding computation time, GPU memory usage, and required data storage space are plotted in Fig. 15." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.808, + 0.894, + 0.864 + ], + "angle": 0, + "content": "Fig. 15(a) highlights the significant speed advantage of TAPS over GAMMA, especially as the size of the simulation domain increases. GAMMA only can simulate powder bed size up to \\(4.5^{3}\\mathrm{mm}^{3}\\) since the GPU memory can only handle up to \\(7.31\\times 10^{8}\\) spatial DoFs. For the \\(4.5^{3}\\mathrm{mm}^{3}\\) case, TAPS is 85 times faster than GAMMA. On the other hand, TAPS is able to model \\(100^{3}\\mathrm{mm}^{3}\\) powder bed, with its speed benefits becoming even more evident for larger" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.227, + 0.131, + 0.481, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.329, + 0.313, + 0.353, + 0.326 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.143, + 0.775, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.639, + 0.313, + 0.662, + 0.326 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.147, + 0.34, + 0.848, + 0.353 + ], + "angle": 0, + "content": "Figure 14: (a) Multi-track simulation. (b) Influence of number of temporal grid points in each S-T slab on the computational cost." + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.368, + 0.365, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.539, + 0.254, + 0.549 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.368, + 0.616, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.491, + 0.539, + 0.511, + 0.55 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.368, + 0.89, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.761, + 0.537, + 0.779, + 0.547 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.563, + 0.893, + 0.588 + ], + "angle": 0, + "content": "Figure 15: Performance comparison of TAPS and GAMMA for powder bed with different sizes in terms of (a) computational time (b) GPU memory requirement (c) Data storage requirement for each time increment" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.611, + 0.894, + 0.709 + ], + "angle": 0, + "content": "domains. Fig. 15(b) compares the memory requirements, where GAMMA experiences fast growth due to the cubic scaling of total spatial DoFs. In contrast, TAPS benefits from TD, requiring significantly less memory. TAPS uses 13 times smaller GPU memory compared to GAMMA for the \\(4.5^{3}\\mathrm{mm}^{3}\\) case. Additionally, TAPS can efficiently manage GPU memory usage for larger powder bed simulations by adopting different numbers of temporal grids in each S-T slab. Finally, Fig. 15(c) compares data storage needs where GAMMA's storage requirements grow cubically, whereas TAPS maintains a linear growth pattern. For the \\(4.5^{3}\\mathrm{mm}^{3}\\) case, the data storage of GAMMA is 2,700 times larger than TAPS." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.724, + 0.51, + 0.738 + ], + "angle": 0, + "content": "3.4.3. Large-scale multi-layer multi-track LPBF simulation" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.74, + 0.894, + 0.81 + ], + "angle": 0, + "content": "In this section, the proposed method is used to simulate a large-scale multi-layer multi-track LPBF process. Element birth is used to model newly added layers in the process. Details on element birth can be found in Appendix C. As shown in Fig. 16 (a), the run scenario is the production of a \\(10\\mathrm{mm}\\) cube within a \\(12\\mathrm{mm}\\) powder bed domain. The base plate height is \\(2\\mathrm{mm}\\). The tool path follows the pattern shown on the top surface. Material parameters are taken from Ti-6Al-4V [35]. The detailed parameters for the simulation setup are shown in Table 7." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.811, + 0.894, + 0.853 + ], + "angle": 0, + "content": "To showcase the capabilities of our approach using TAPS, we employ a fine spatial mesh for the simulation. The spatial element size is \\(10 \\times 10 \\times 5\\mu m^3\\). In classical numerical algorithms, this corresponds to \\(3.46 \\times 10^{9}\\) spatial DoFs, which is unmanageable for typical workstations due to the prohibitive RAM requirements." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.853, + 0.893, + 0.868 + ], + "angle": 0, + "content": "The simulation result is shown in Fig. 16 (b), where the temperature of the last layer is plotted. In total, it costs 60.7" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.234, + 0.148, + 0.482, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.317, + 0.309, + 0.344, + 0.323 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.141, + 0.77, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.308, + 0.638, + 0.323 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.337, + 0.782, + 0.351 + ], + "angle": 0, + "content": "Figure 16: (a) Problem statement: LPBF simulation. (b) Temperature solution for printing the final layer" + }, + { + "type": "table_caption", + "bbox": [ + 0.333, + 0.372, + 0.663, + 0.383 + ], + "angle": 0, + "content": "Table 7: Parameters used in the large-scale LPBF simulation" + }, + { + "type": "table", + "bbox": [ + 0.106, + 0.383, + 0.894, + 0.501 + ], + "angle": 0, + "content": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr100μm
Laser scan speedV500mm s-1
Absorptivityη0.251
Laser penetration depthd50μm
Layer thicknesshl50μm
Hatch space sizehs200μm
" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.524, + 0.894, + 0.596 + ], + "angle": 0, + "content": "hrs to run the simulation. The maximum GPU memory usage is 8.11 GB. The final solution vector size is 1.35 GB. As a comparison, it's estimated that GAMMA will solve the same problem with the same spatial resolution in 3,485 days, with at least 120 GB GPU memory usage and 1.26 TB storage space to store the solution [35]. Consequently, TAPS achieves around 1,370 X speedup, 14.8 X memory footprint savings, and 955 X storage gain compared to the finite difference method." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.617, + 0.206, + 0.63 + ], + "angle": 0, + "content": "4. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.642, + 0.895, + 0.74 + ], + "angle": 0, + "content": "In the previous sections, we have shown that TAPS tackles two drawbacks of data-driven surrogate modeling approaches which use offline data generated through direct numerical simulation (DNS). Firstly, the proposed TAPS is data-free, which means that it does not require any training data. This is of crucial importance for applications that require ultra high-resolution simulations because offline training data generation can be extremely costly. Our method circumvents expensive offline DNS data generation by directly solving the governing equation. Secondly, TAPS enables solving ultra large-scale problems with significant speedup, minimal memory requirement, and substantial storage gain as compared to standard DNS techniques." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.741, + 0.895, + 0.826 + ], + "angle": 0, + "content": "The computational speed of the current method can be further improved with the state-of-the-art high-performance numerical solvers and parallel computing on multiple GPUs. Right now, the TAPS linear systems of equations are solved on CPUs which results in additional overhead. With more sparse direct solvers/iterative schemes becoming available on GPU, we expect a further speedup of the current program. Moreover, parallel computing using multiple GPUs can be achieved using Message Passing Interface (MPI) [36]. For ultra large-scale analysis where each dimension contains millions of nodes, an efficient iterative solver with a suitable preconditioner needs to be developed." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.826, + 0.895, + 0.87 + ], + "angle": 0, + "content": "Variational multiscale methods can be used to further extend the capabilities of the current method to tackle zettascale space-time problems [37, 35]. Moreover, one major computational cost for the current method originates from the increased number of decomposition modes for a large number of time steps. This can be avoided by leveraging" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.879, + 0.508, + 0.89 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.102, + 0.133, + 0.894, + 0.19 + ], + "angle": 0, + "content": "coordinate transformation techniques where the moving source can be transformed into a fixed one. As a result, we expect to greatly improve the computational performance of the current method. Irregular geometry can also be considered using immersed finite element techniques or the Solid Isotropic Material with Penalization (SIMP) method in topology optimization [20, 38, 39, 40, 41]." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.211, + 0.212, + 0.224 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.236, + 0.894, + 0.32 + ], + "angle": 0, + "content": "In this paper, we propose TAPS as a data-free predictive scientific AI model to simulate ultra large-scale physical problems. This method eliminates the traditional necessity for offline training data generation, thereby exhibiting substantial speedup, memory efficiency, and storage gain as opposed to data-driven methods, making previously unsolvable large-scale and high-dimensional problems manageable. The convergence of the TAPS solver is numerically investigated. As a demonstration of the capabilities of TAPS, we showcase the application of the TAPS solver for a multi-layer multi-track additive manufacturing problem that is intractable with classical numerical algorithms." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.321, + 0.893, + 0.448 + ], + "angle": 0, + "content": "TAPS is well suited for a broad range of science or engineering problems where: 1) the finite element method and other conventional numerical methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained. The INN hierarchical neural network interpolants, particularly C-HiDeNN used by TAPS, demonstrate superior performance compared to other machine learning models. For the solving tasks, it has shown superior performance compared to physics-informed neural network (PINN) [7], CP-PINN [42], and Kolmogorov-Arnold Networks (KAN) [43] with orders of magnitude faster solution time, higher accuracy, and better scalability to ultra large-scale and high-dimensional PDEs [44]. INN interpolants can also be effectively used in data-driven training tasks and show better training accuracy compared to MLP, SIREN [45] and KAN [11, 44]." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.449, + 0.894, + 0.575 + ], + "angle": 0, + "content": "As illustrated in Fig. 17, the significance of this work in the area of predictive scientific AI models aligns with the trend in other areas in AI, such as language and vision AI models. The evolution of language models has seen dramatic growth, beginning with foundational models like BERT [46], followed by the GPT series [47], which expanded transformer architecture to hundreds of billions of parameters, showcasing powerful generative capabilities. In vision models, AlexNet [48] marked a breakthrough, while advancements like DIT-XL [49] and SORA [50] integrated diffusion models to handle more complex and challenging visual tasks. This trajectory of increasing scale and sophistication from its network architecture (i.e., transformer of language models and diffusion of vision models) is mirrored in predictive scientific AI where TAPS represents a significant advancement in its network architecture, INN." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.577, + 0.894, + 0.689 + ], + "angle": 0, + "content": "A major critical issue in the emerging large AI models is a more sophisticated model will generally lead to a larger amount of training data, more expensive training costs, and longer inference time. The advent of DeepSeek R1 breaks this rule since it has fewer parameters, much less training cost, faster inference speed, yet still comparable accuracy compared to other state-of-the-art models due to its novel architecture and training techniques such as distillation methods [51]. For predictive scientific AI, we face even more pronounced challenges due to strict accuracy demands and the necessity for high-resolution physics for large-scale problems. As a result, the future of predictive scientific AI is still largely untapped. TAPS provides a promising solution to these emerging challenges by delivering a highly accurate, exceptionally fast, and memory and storage efficient scientific AI model." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.69, + 0.894, + 0.747 + ], + "angle": 0, + "content": "In conclusion, the proposed TAPS computational framework offers substantial enhancements in computational efficiency, memory consumption, and storage demands for science and engineering simulations. As a result, TAPS paves a new path to address future challenges in ultra large-scale simulations pertinent to complex predictive scientific AI models." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.768, + 0.719, + 0.783 + ], + "angle": 0, + "content": "Appendix A. Solving nonlinear S-P-T PDEs: solution dependent material properties" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.793, + 0.894, + 0.866 + ], + "angle": 0, + "content": "The algorithm 1 works for linear PDEs where the PDE coefficients remain constant. However, in many engineering applications, the PDE coefficients can be solution-dependent. For instance, material properties such as heat conductivity and heat capacity can be a function of temperature in additive manufacturing. In these cases, the PDE becomes non-linear which requires an efficient solution scheme. In this section, we solely focus on the space-time problems formulation of a nonlinear PDE. As a result, the product of density and heat capacity \\(\\rho c_{p}(u)\\) and conductivity" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.879, + 0.511, + 0.891 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.227, + 0.133, + 0.775, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.381, + 0.645, + 0.394 + ], + "angle": 0, + "content": "Figure 17: Evolution of AI models for different tasks" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.417, + 0.895, + 0.446 + ], + "angle": 0, + "content": "\\(k(u)\\) are no longer temperature independent as in Eq. 18. Similar to the linear problem shown before, the generalized Galerkin weak form is used to solve this equation." + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.454, + 0.893, + 0.488 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} \\delta u \\nabla_ {x _ {t}} \\left[ \\rho c _ {p} (u) u \\right] d \\Omega - \\int_ {\\Omega} \\nabla_ {x _ {s}} \\delta u \\cdot k (u) \\nabla_ {x _ {s}} u d \\Omega + \\int_ {\\partial \\Omega_ {x _ {s}} \\otimes \\Omega_ {t}} \\delta u \\boldsymbol {q} \\cdot \\boldsymbol {n} d s d \\Omega_ {x _ {t}} = \\int_ {\\Omega} \\delta u b d \\Omega \\tag {A.1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.491, + 0.895, + 0.534 + ], + "angle": 0, + "content": "where \\( \\mathbf{q} \\) is the heat flux on the Neumann boundary. Since Eq. A.1 is a space-time integral, classical time-stepping based methods can't be directly used to update material parameters. Here we propose a global-local approach similar to the Large Time Increment (LATIN) method to effectively solve the above equations [52]." + }, + { + "type": "image", + "bbox": [ + 0.381, + 0.547, + 0.615, + 0.701 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.328, + 0.714, + 0.671, + 0.727 + ], + "angle": 0, + "content": "Figure A.18: Global-local approach for nonlinear TAPS solver" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.741, + 0.895, + 0.828 + ], + "angle": 0, + "content": "As shown in Fig. A.18, we split the nonlinear problem into 2 stages, a linear global stage and a nonlinear local update stage. In the global stage, we assume the spatio-temporal \\( k(\\pmb{x}_s,x_t) \\) and \\( \\rho c_{p}(\\pmb{x}_{s},x_{t}) \\) are known. As a result, we treat the global problem as a linear problem and obtain \\( u(x_{s},x_{t}) \\) using the previously proposed method for linear problems. After \\( u(\\pmb{x}_s,x_t) \\) is updated in the global stage, we update \\( k(u) \\) and \\( \\rho c_{p}(u) \\) locally at each Gauss integration point according to material models \\( k(u) \\) and \\( \\rho c_{p}(u) \\). We repeat the global-local iteration until the variation of \\( k(u) \\) and \\( \\rho c_{p}(u) \\) between consecutive iterations meets the convergence criteria. The algorithm is summarized in Algorithm 2:" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.106, + 0.132, + 0.711, + 0.146 + ], + "angle": 0, + "content": "Algorithm 2 Nonlinear TAPS solution scheme: PDE with solution dependent coefficients" + }, + { + "type": "algorithm", + "bbox": [ + 0.113, + 0.148, + 0.894, + 0.321 + ], + "angle": 0, + "content": "1: Initialize solution matrices with random values and update \\(\\rho_{c_p}(\\pmb{x}_s, x_t)\\) and \\(k(\\pmb{x}_s, x_t)\\). \n2: for iter\\(_\\gamma\\) = 1 to iter\\(_\\gamma_{max}\\) do \n3: for iter = 1 to iter\\(_max\\) do \n4: Update \\(\\rho_{c_p}(\\pmb{x}_s, x_t)\\) and \\(k(\\pmb{x}_s, x_t)\\) \n5: Use Algorithm 1 to solve solution \\(u(\\pmb{x}_s, x_t)\\) \n6: for \\(i = 1\\) to integration points do \n7: \\(\\rho_{c_p}(\\pmb{x}_s, x_t) = \\rho_{c_p}[u(\\pmb{x}_s, x_t)]\\) \n8: \\(k(\\pmb{x}_s, x_t) = k[u(\\pmb{x}_s, x_t)]\\) \n9: end for \n10: Check convergence \n11: end for \n12: end for" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.344, + 0.345, + 0.358 + ], + "angle": 0, + "content": "Appendix B. Mode compression" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.368, + 0.894, + 0.412 + ], + "angle": 0, + "content": "One significant challenge in multi-track simulation in LPBF is the huge number of time steps required. It is impossible to resolve all the time steps with only a single space-time (S-T) slab. Hence, we split the whole layer scan into multiple S-T slabs and relate each S-T slab using the following equation." + }, + { + "type": "equation", + "bbox": [ + 0.341, + 0.418, + 0.893, + 0.436 + ], + "angle": 0, + "content": "\\[\n{ } ^ { [ \\mathcal { T } + 1 ] } u ( \\boldsymbol { x } _ { s } , x _ { t } ) = { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } , x _ { t } ^ { m a x } ) + { } ^ { [ \\mathcal { T } + 1 ] } u _ { 0 } ( \\boldsymbol { x } _ { s } , x _ { t } ) \\tag {B.1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.445, + 0.894, + 0.53 + ], + "angle": 0, + "content": "where \\([T + 1]u(\\pmb{x}_s, x_t)\\) refers to the solution at \\((\\mathcal{T} + 1)\\)-th space-time slab; \\([T + 1]u_0(\\pmb{x}_s, x_t)\\) refers to the solution of the homogeneous initial value problem of \\((\\mathcal{T} + 1)\\)-th space-time slab; \\([T]u(\\pmb{x}_s, x_t^{max})\\) is the solution of \\(\\mathcal{T}\\)-th space-time slab at the last time increment. As can be seen from Eq. B.1, we impose the last time increment solution of previous space-time slab as the initial condition for the next space-time slab. This is efficiently implemented by adding the TD form of the last increment as new modes in the current space-slab solution. However, for large-scale computations requiring thousands of slabs, directly concatenating modes can result in substantial storage demands." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.53, + 0.894, + 0.603 + ], + "angle": 0, + "content": "In mode compression, we aim to compress the number of modes for \\(\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})\\) because of its spatial dependence and naturally low-dimensional structure. Consequently, it can be effectively decomposed using only a few modes. Denote the TD form of the last time step solution of the previous space-time slab as \\(\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})^{TD}\\), we aim to find a compact form that can be represented with much fewer number of modes \\(\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})_F^{TD}\\). For notation simplicity, we omit \\(x_{t}^{max}\\) in the following equations. Consequently, the mode compression problem can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.609, + 0.892, + 0.628 + ], + "angle": 0, + "content": "\\[\n{ } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } ) _ { F } ^ { T D } - { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } ) ^ { T D } = 0 \\tag {B.2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.635, + 0.592, + 0.652 + ], + "angle": 0, + "content": "The weighted sum residual form is used to approximate \\([T]u(\\pmb{x}_s,x_{t - 1})_F^{TD}\\) .." + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.66, + 0.893, + 0.692 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega_ {x}} \\delta^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) _ {F} ^ {T D} \\cdot \\left[ ^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) _ {F} ^ {T D} - ^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) ^ {T D} \\right] d \\boldsymbol {x} _ {s} = 0 \\tag {B.3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.699, + 0.457, + 0.714 + ], + "angle": 0, + "content": "Eq. B.3 can be efficiently solved using Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.733, + 0.312, + 0.748 + ], + "angle": 0, + "content": "Appendix C. Element birth" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.757, + 0.894, + 0.842 + ], + "angle": 0, + "content": "In the LPBF process, once the printing is finished for the current layer, a new layer of powder is deposited on top of the existing layer. This necessitates modeling the new layer with additional elements. Various studies have investigated different approaches for element birth techniques. While some researchers opt for activating small sections of geometry incrementally, others apply the technique by spreading the deposition across an entire layer or multiple layers simultaneously. The most widely adopted approach is to activate an entire layer and then scan the heat source over it [53]." + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.843, + 0.894, + 0.872 + ], + "angle": 0, + "content": "In TAPS, we propose a new scheme to generate new layers of elements. In this scheme, new elements are added only in the \\( x_{3} \\) direction, since the plan dimension doesn't change in the printing process. Therefore, as opposed to" + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.102, + 0.132, + 0.895, + 0.177 + ], + "angle": 0, + "content": "full-scale classical numerical methods, TAPS enables marginal overhead in generating new layers of elements with extra grid points added only in the \\( x_{3} \\) dimension. The solution scheme for multi-layer multi-track LPBF simulation using TAPS can be summarized in Algorithm 3." + }, + { + "type": "code_caption", + "bbox": [ + 0.106, + 0.189, + 0.56, + 0.204 + ], + "angle": 0, + "content": "Algorithm 3 Multi-layer multi-track LPBF simulation using TAPS" + }, + { + "type": "algorithm", + "bbox": [ + 0.114, + 0.206, + 0.657, + 0.433 + ], + "angle": 0, + "content": "1: for \\( n_{layer} = 1 \\) to \\( n_{layerTotal} \\) do \n2: Initialize solution matrices with random values for the new layer \n3: Compute the updated stiffness matrix and force vector for the \\( x_{3} \\) direction \n4: for \\( n_{track} = 1 \\) to \\( n_{tracktotal} \\) do \n5: for \\( iter = 1 \\) to \\( iter_{max} \\) do \n6: for \\( d = 1 \\) to dimension do \n7: Compute solution vectors according to Algorithm 1 or 2 \n8: end for \n9: Check convergence \n10: end for \n11: Compress modes \n12: Concatenate compressed modes to previous tracks as new modes \n13: end for \n14: Compress modes \n15: Concatenate compressed modes to previous layers as new modes \n16: end for" + }, + { + "type": "title", + "bbox": [ + 0.105, + 0.47, + 0.188, + 0.484 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.494, + 0.893, + 0.517 + ], + "angle": 0, + "content": "[1] Zongyi Li, Nikola Kovachki, Kamyar Azizzadenesheli, Burigede Liu, Kaushik Bhattacharya, Andrew Stuart, and Anima Anandkumar. Fourier neural operator for parametric partial differential equations. arXiv preprint arXiv:2010.08895, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.518, + 0.894, + 0.539 + ], + "angle": 0, + "content": "[2] Owen Huang, Sourav Saha, Jiachen Guo, and Wing Kam Liu. An introduction to kernel and operator learning methods for homogenization by self-consistent clustering analysis. Computational Mechanics, 72(1):195-219, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.54, + 0.872, + 0.551 + ], + "angle": 0, + "content": "[3] Can AI Solve Science? https://writings.sthenwolfram.com/2024/03/can-ai-solve-science/. [Accessed 03-04-2025]." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.552, + 0.894, + 0.573 + ], + "angle": 0, + "content": "[4] A new golden age of discovery — deepmind.google. https://deepmind.google/public-policy/ai-for-science/. [Accessed 03-04-2025]." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.574, + 0.895, + 0.596 + ], + "angle": 0, + "content": "[5] Wing Kam Liu, Shaofan Li, and Harold S Park. Eighty years of the finite element method: Birth, evolution, and future. Archives of Computational Methods in Engineering, 29(6):4431-4453, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.597, + 0.894, + 0.629 + ], + "angle": 0, + "content": "[6] Ye Lu, Hengyang Li, Lei Zhang, Chanwook Park, Satyajit Mojumder, Stefan Knapik, Zhongsheng Sang, Shaoqiang Tang, Daniel W Apley, Gregory J Wagner, et al. Convolution hierarchical deep-learning neural networks (c-hidenn): finite elements, isogeometric analysis, tensor decomposition, and beyond. Computational Mechanics, 72(2):333-362, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.63, + 0.894, + 0.652 + ], + "angle": 0, + "content": "[7] Maziar Raissi, Paris Perdikaris, and George E Karniadakis. Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations. Journal of Computational physics, 378:686-707, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.653, + 0.894, + 0.674 + ], + "angle": 0, + "content": "[8] Enrui Zhang, Ming Dao, George Em Karniadakis, and Subra Suresh. Analyses of internal structures and defects in materials using physics-informed neural networks. Science advances, 8(7):eabk0644, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.675, + 0.894, + 0.697 + ], + "angle": 0, + "content": "[9] Nick McGreavy and Ammar Hakim. Weak baselines and reporting biases lead to overoptimism in machine learning for fluid-related partial differential equations. Nature Machine Intelligence, 6(10):1256-1269, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.698, + 0.893, + 0.719 + ], + "angle": 0, + "content": "[10] Junwoo Cho, Seungtae Nam, Hyunmo Yang, Seok-Bae Yun, Youngjoon Hong, and Eunbyung Park. Separable physics-informed neural networks. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.72, + 0.894, + 0.752 + ], + "angle": 0, + "content": "[11] Chanwook Park, Sourav Saha, Jiachen Guo, Hantao Zhang, Xiaoyu Xie, Miguel A Bessa, Dong Qian, Wei Chen, Gregory J Wagner, Jian Cao, et al. Engineering software 2.0 by interpolating neural networks: unifying training, solving, and calibration. arXiv preprint arXiv:2404.10296, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.753, + 0.893, + 0.775 + ], + "angle": 0, + "content": "[12] Lei Zhang, Lin Cheng, Hengyang Li, Jiaying Gao, Cheng Yu, Reno Domel, Yang Yang, Shaoqiang Tang, and Wing Kam Liu. Hierarchical deep-learning neural networks: finite elements and beyond. Computational Mechanics, 67:207-230, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.776, + 0.893, + 0.798 + ], + "angle": 0, + "content": "[13] Lei Zhang, Ye Lu, Shaoqiang Tang, and Wing Kam Liu. Hidenn-td: reduced-order hierarchical deep learning neural networks. Computer Methods in Applied Mechanics and Engineering, 389:114414, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.799, + 0.894, + 0.831 + ], + "angle": 0, + "content": "[14] Chanwook Park, Ye Lu, Sourav Saha, Tianju Xue, Jiachen Guo, Satyajit Mojumder, Daniel W Apley, Gregory J Wagner, and Wing Kam Liu. Convolution hierarchical deep-learning neural network (c-hidenn) with graphics processing unit (gpu) acceleration. Computational Mechanics, 72(2):383-409, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.832, + 0.893, + 0.866 + ], + "angle": 0, + "content": "[15] Sourav Saha, Zhengtao Gan, Lin Cheng, Jiaying Gao, Orion L Kafka, Xiaoyu Xie, Hengyang Li, Mahsa Tajdari, H Alicia Kim, and Wing Kam Liu. Hierarchical deep learning neural network (hidenn): an artificial intelligence (ai) framework for computational science and engineering. Computer Methods in Applied Mechanics and Engineering, 373:113452, 2021." + }, + { + "type": "list", + "bbox": [ + 0.106, + 0.494, + 0.895, + 0.866 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.879, + 0.509, + 0.89 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.134, + 0.893, + 0.158 + ], + "angle": 0, + "content": "[16] Yingjian Liu, Chanwook Park, Ye Lu, Satyajit Mojumder, Wing Kam Liu, and Dong Qian. Hidenn-fem: a seamless machine learning approach to nonlinear finite element analysis. Computational Mechanics, 72(1):173-194, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.159, + 0.892, + 0.18 + ], + "angle": 0, + "content": "[17] Francisco Chinesta, Amine Ammar, Adrien Leygue, and Roland Keunings. An overview of the proper generalized decomposition with applications in computational rheology. Journal of Non-Newtonian Fluid Mechanics, 166(11):578-592, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.181, + 0.892, + 0.202 + ], + "angle": 0, + "content": "[18] Francisco Chinesta, Roland Keunings, and Adrien Leygue. The proper generalized decomposition for advanced numerical simulations: a primer. Springer Science & Business Media, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.203, + 0.892, + 0.224 + ], + "angle": 0, + "content": "[19] Anthony Nouy. A priori model reduction through proper generalized decomposition for solving time-dependent partial differential equations. Computer Methods in Applied Mechanics and Engineering, 199(23-24):1603-1626, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.225, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[20] Hengyang Li, Stefan Knapik, Yangfan Li, Chanwook Park, Jiachen Guo, Satyajit Mojumder, Ye Lu, Wei Chen, Daniel W Apley, and Wing Kam Liu. Convolution hierarchical deep-learning neural network tensor decomposition (c-hidenn-td) for high-resolution topology optimization. Computational Mechanics, 72(2):363-382, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.26, + 0.892, + 0.281 + ], + "angle": 0, + "content": "[21] Ehsan Kharazmi, Zhongqiang Zhang, and George Em Karniadakis. hp-vpinns: Variational physics-informed neural networks with domain decomposition. Computer Methods in Applied Mechanics and Engineering, 374:113547, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.282, + 0.892, + 0.292 + ], + "angle": 0, + "content": "[22] Thomas JR Hughes. The finite element method: linear static and dynamic finite element analysis. Courier Corporation, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.293, + 0.753, + 0.303 + ], + "angle": 0, + "content": "[23] Tamara G Kolda and Brett W Bader. Tensor decompositions and applications. SIAM review, 51(3):455-500, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.304, + 0.765, + 0.314 + ], + "angle": 0, + "content": "[24] Junuthula Narasimha Reddy. An introduction to the finite element method, volume 3. McGraw-Hill New York, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.315, + 0.892, + 0.337 + ], + "angle": 0, + "content": "[25] Thomas JR Hughes and Gregory M Hulbert. Space-time finite element methods for elastodynamics: formulations and error estimates. Computer methods in applied mechanics and engineering, 66(3):339-363, 1988." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.338, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[26] Wing Kam Liu, Ted Belytschko, and A. Mani. Probabilistic finite elements for nonlinear structural dynamics. Computer Methods in Applied Mechanics and Engineering, 56(1):61-81, 1986." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.361, + 0.892, + 0.381 + ], + "angle": 0, + "content": "[27] Wing Kam Liu, Ted Belytschko, and A. Mani. Random field finite elements. International Journal for Numerical Methods in Engineering, 23(3):1831-1845, 1986." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.382, + 0.892, + 0.404 + ], + "angle": 0, + "content": "[28] Shuheng Liao, Ashkan Golgoon, Mojtaba Mozaffar, and Jian Cao. Efficientgpu-accelerated thermomechanical solver for residual stress prediction in additive manufacturing. Computational Mechanics, 71(5):879-893, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.405, + 0.892, + 0.437 + ], + "angle": 0, + "content": "[29] Amine Ammar, Bechir Mokdad, Francisco Chinesta, and Roland Keunings. A new family of solvers for some classes of multidimensional partial differential equations encountered in kinetic theory modeling of complex fluids. Journal of non-Newtonian fluid Mechanics, 139(3): 153-176, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.438, + 0.892, + 0.46 + ], + "angle": 0, + "content": "[30] Abderrahman Bouhamidi and Khalide Jbilou. A note on the numerical approximate solutions for generalized sylvester matrix equations with applications. Applied Mathematics and Computation, 206(2):687-694, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.461, + 0.892, + 0.483 + ], + "angle": 0, + "content": "[31] Ya-Jun Xie and Chang-Feng Ma. The scaling conjugate gradient iterative method for two types of linear matrix equations. Computers & Mathematics with Applications, 70(5):1098-1113, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.484, + 0.892, + 0.506 + ], + "angle": 0, + "content": "[32] Ulrich Langer and Marco Zank. Efficient direct space-time finite element solvers for parabolic initial-boundary value problems in anisotropic sobolev spaces. SIAM Journal on Scientific Computing, 43(4):A2714-A2736, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.507, + 0.892, + 0.528 + ], + "angle": 0, + "content": "[33] A Sh Agazhanov, DA Samoshkin, and Yu M Kozlovskii. Thermophysical properties of inconel 718 alloy. In Journal of Physics: Conference Series, volume 1382, page 012175. IOP Publishing, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.529, + 0.892, + 0.561 + ], + "angle": 0, + "content": "[34] Tianju Xue, Shuheng Liao, Zhengtao Gan, Chanwook Park, Xiaoyu Xie, Wing Kam Liu, and Jian Cao. Jax-fem: A differentiablegpu-accelerated 3d finite element solver for automatic inverse design and mechanistic data science. Computer Physics Communications, 291: 108802, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.563, + 0.892, + 0.584 + ], + "angle": 0, + "content": "[35] Joseph P Leonor and Gregory J Wagner. Go-melt: GPU-optimized multilevel execution of lpbf thermal simulations. Computer Methods in Applied Mechanics and Engineering, 426:116977, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.585, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[36] Dana Jacobsen, Julien Thibault, and Inanc Senocak. An mpi-cuda implementation for massively parallel incompressible flow computations on multi-gpu clusters. In 48th AIAA Aerospace Sciences Meeting Including the New Horizons Forum and Aerospace Exposition, page 522, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.618, + 0.892, + 0.64 + ], + "angle": 0, + "content": "[37] Thomas JR Hughes, Gonzalo R Feijóo, Luca Mazzei, and Jean-Baptiste Quincy. The variational multiscale method—a paradigm for computational mechanics. Computer methods in applied mechanics and engineering, 166(1-2):3-24, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.641, + 0.892, + 0.663 + ], + "angle": 0, + "content": "[38] Xingshi Wang and Lucy T Zhang. Modified immersed finite element method for fully-coupled fluid-structure interactions. Computer methods in applied mechanics and engineering, 267:150–169, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.664, + 0.892, + 0.697 + ], + "angle": 0, + "content": "[39] Wing Kam Liu, Yaling Liu, David Farrell, Lucy Zhang, X Sheldon Wang, Yoshio Fukui, Neelesh Patankar, Yongjie Zhang, Chandrajit Bajaj, Junghoon Lee, et al. Immersed finite element method and its applications to biological systems. Computer methods in applied mechanics and engineering, 195(13-16):1722-1749, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.698, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[40] Wing Kam Liu, Do Wan Kim, and Shaoqiang Tang. Mathematical foundations of the immersed finite element method. Computational Mechanics, 39:211-222, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.72, + 0.892, + 0.742 + ], + "angle": 0, + "content": "[41] Adrian M Kopacz, Woon-Hong Yeo, Jae-Hyun Chung, and Wing Kam Liu. Nanoscale sensor analysis using the immersed molecular electrokinetic finite element method. Nanoscale, 4(16):5189-5194, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.743, + 0.892, + 0.765 + ], + "angle": 0, + "content": "[42] Sai Karthikeya Vemuri, Tim Büchner, Julia Niebling, and Joachim Denzler. Functional tensor decompositions for physics-informed neural networks. In International Conference on Pattern Recognition, pages 32-46. Springer, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.766, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[43] Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljačić, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.788, + 0.892, + 0.82 + ], + "angle": 0, + "content": "[44] Jiachen Guo, Xiaoyu Xie, Chanwook Park, Hantao Zhang, Matthew Politis, Gino Domel, T.J.R Hughes, and Wing Kam Liu. Interpolation neural network-tensor decomposition (inn-td): a scalable and interpretable approach for large-scale physics-based problems. arXiv preprint arXiv:2503.02041, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.821, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[45] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in neural information processing systems, 33:7462-7473, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.844, + 0.892, + 0.866 + ], + "angle": 0, + "content": "[46] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human" + }, + { + "type": "list", + "bbox": [ + 0.106, + 0.134, + 0.893, + 0.866 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.88, + 0.509, + 0.89 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.134, + 0.584, + 0.146 + ], + "angle": 0, + "content": "language technologies, volume 1 (long and short papers), pages 4171-4186, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.147, + 0.893, + 0.158 + ], + "angle": 0, + "content": "[47] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.159, + 0.892, + 0.179 + ], + "angle": 0, + "content": "[48] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.181, + 0.892, + 0.202 + ], + "angle": 0, + "content": "[49] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.203, + 0.892, + 0.224 + ], + "angle": 0, + "content": "[50] Yixin Liu, Kai Zhang, Yuan Li, Zhiling Yan, Chujie Gao, Ruoxi Chen, Zhengqing Yuan, Yue Huang, Hanchi Sun, Jianfeng Gao, et al. Sora: A review on background, technology, limitations, and opportunities of large vision models. arXiv preprint arXiv:2402.17177, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.225, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[51] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.248, + 0.831, + 0.259 + ], + "angle": 0, + "content": "[52] Pierre Ladevèze. On reduced models in nonlinear solid mechanics. European Journal of Mechanics-A/Solids, 60:227-237, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.107, + 0.26, + 0.892, + 0.281 + ], + "angle": 0, + "content": "[53] Richard J Williams, Catrin M Davies, and Paul A Hooper. A pragmatic part scale model for residual stress and distortion prediction in powder bed fusion. Additive Manufacturing, 22:416-425, 2018." + }, + { + "type": "list", + "bbox": [ + 0.107, + 0.134, + 0.893, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.879, + 0.508, + 0.89 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_origin.pdf b/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3409cf5121a13e39f6f441badbec6a2b7013be57 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1193ec2d12500fde0774dac9f694f7c614fc51eb236ac106dcd9f599f4862f6 +size 3387394 diff --git a/data/2025/2503_13xxx/2503.13933/full.md b/data/2025/2503_13xxx/2503.13933/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7e44c67847a32d4eb50c324112197ea999578b83 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/full.md @@ -0,0 +1,860 @@ +# Tensor-decomposition-based A Priori Surrogate (TAPS) modeling for ultra large-scale simulations + +Jiachen Guo $^{a}$ , Gino Domel $^{b}$ , Chanwook Park $^{b}$ , Hantao Zhang $^{a}$ , Ozgur Can Gumus $^{b}$ , Ye Lu $^{c}$ , Gregory J. Wagner $^{b}$ , Dong Qian $^{d,e}$ , Jian Cao $^{b}$ , Thomas J.R. Hughes $^{f}$ , Wing Kam Liu $^{b,e}$ + +aTheoretical and Applied Mechanics Program, Northwestern University, 2145 Sheridan Road, Evanston, 60201, IL, USA + +$^{b}$ Department of Mechanical Engineering, Northwestern University, 2145 Sheridan Road, Evanston, IL, USA + +$^{c}$ Department of Mechanical Engineering, University of Maryland, Baltimore County, 1000 Hilltop Circle, Baltimore, 21250, MD, USA + +$^{d}$ Department of Mechanical Engineering, University of Texas, Dallas, 800 W. Campbell Road, Richardson, 75080, TX, USA + +${}^{e}$ Co-Founders of HIDENN-AI,LLC,1801 Maple Ave,Evanston,60201,IL,USA + +fInstitute for Computational Engineering and Sciences, The University of Texas at Austin, 201 East 24th Street, Stop + +C0200, Austin, 78712, TX, USA + +# Abstract + +A data-free, predictive scientific AI model, Tensor-decomposition-based A Priori Surrogate (TAPS), is proposed for tackling ultra large-scale engineering simulations with significant speedup, memory savings, and storage gain. TAPS can effectively obtain surrogate models for high-dimensional parametric problems with equivalent zetta-scale $(10^{21})$ degrees of freedom (DoFs). TAPS achieves this by directly obtaining reduced-order models through solving governing equations with multiple independent variables such as spatial coordinates, parameters, and time. The paper first introduces an AI-enhanced finite element-type interpolation function called convolution hierarchical deep-learning neural network (C-HiDeNN) with tensor decomposition (TD). Subsequently, the generalized space-parameter-time Galerkin weak form and the corresponding matrix form are derived. Through the choice of TAPS hyperparameters, an arbitrary convergence rate can be achieved. To show the capabilities of this framework, TAPS is then used to simulate a large-scale additive manufacturing process as an example and achieves around 1,370x speedup, 14.8x memory savings, and 955x storage gain compared to the finite difference method with 3.46 billion spatial degrees of freedom (DoFs). As a result, the TAPS framework opens a new avenue for many challenging ultra large-scale engineering problems, such as additive manufacturing and integrated circuit design, among others. + +# Keywords: + +Predictive scientific AI, hierarchical neural network finite element interpolation, generalized Galerkin formulation for parametric PDEs, large-scale simulation, additive manufacturing + +# 1. Introduction + +Precision is a fundamental aspect of scientific and engineering applications, especially in advanced industries such as semiconductor manufacturing. The capability to perform accurate computational simulations for these applications is essential for advancing these fields. Precise simulations enable the optimization of design and manufacturing processes by utilizing virtual prototypes and process simulations. This reduces the need for expensive physical prototypes and tests and provides virtual prototypes in circumstances where physical ones are impractical. Traditional computational methods for engineering simulations, however, suffer from prohibitive computational costs when attempting to accurately predict responses across multiple length and time scales (typically done by increasing mesh resolution), making achieving high precision for large-scale problems challenging. In fact, the random-access memory (RAM) requirement can be far beyond the capability of typical workstations and may require massive parallelization on supercomputers. In other industries, such as additive manufacturing (a term encompassing all forms of 3D printing), the vast design space further exacerbates these limitations, as numerous expensive simulations are required to thoroughly explore the effects of different design parameters. + +Table 1: Nomenclature + +
VariablesDescription
uh(x)Interpolated scalar field defined inside of an element
AeNodes within element e
AsNodes within patch domain of element e
WiS,a,p,j(x)Convolution patch function at node j for i-th nodal patch with hyperparameters s, a, and p
MTotal number of modes in tensor decomposition (TD)
mIndex for mode
DTotal number of dimensions
dIndex for dimension
xIndependent variable which includes spatial variable xs, parametric variable xp and temporal variable xt
Nd(xd;ad,sd,pd)Global C-HiDeNN shape function for dimension d with dilation parameter ad, patch size sd and reproducing polynomial order pd
bSource function in laser powder bed fusion process
uTDApproximation of the solution field expressed via TD
TTime slab index for space-parameter-time problem
kThermal conductivity
ρMaterial density
cpHeat capacity
ηMaterial absorptivity
PLaser power
rStandard deviation that characterizes the width of the heat source
qHeat flux
qconvHeat flux from convection
qradHeat flux from radiation
qevapHeat flux from evaporative cooling
hconvConvection coefficient
σSBStefan-Boltzman constant
mevapMass evaporation flux
LevapHeat of evaporation
BndShape function derivative
UdSolution matrix (Rnd×M) for dimension d that contains all the modes
+ +To fulfill the ever-growing challenges in predictive scientific models, data-driven surrogates, especially artificial intelligence (AI)-based models, present an alternative to conventional numerical models by significantly reducing the forward prediction time. These models can be treated as a reasonably accurate, reduced representation of real physics. Once trained properly, they can be used for fast prediction on unseen parameters [1, 2]. However, it is still uncertain whether a data-driven surrogate model can be trained to achieve the level of accuracy required in engineering design. Recently, it has been pointed out by Wolfram Research that standard AI models cannot easily fulfill the high accuracy requirement of predictive scientific tasks [3]. Furthermore, as suggested by Google Deepmind, the real potential of AI models lies in enhancing, rather than thoroughly replacing, well-established classical numerical algorithms [4]. In addition, the current standard data-driven approaches follow an offline-online scheme, where the offline stage involves a huge amount of training data, which can again be prohibitive. For problems with known physics, this data can be obtained by running multiple expensive simulations relying on standard numerical algorithms. In scenarios involving high-dimensional design spaces governed by parameterized partial differential equations (PDEs), such as in additive manufacturing (AM), conducting repetitive simulations with varying parameters in this offline stage becomes exceedingly expensive both in terms of computation time and data storage. + +To avoid the prohibitive offline stage, one can try to obtain a surrogate model directly from governing equations + +![](images/ba32edcab434e8ad5e0767139009d16908e9de00ed4f1dfddd99493d69460436.jpg) +Figure 1: The parameterized PDE is a PDE that includes parameters $\mathbf{x}_p$ that can vary and influence the solution $\mathbf{u}(\mathbf{x}_s, \mathbf{x}_p, x_t)$ , where $\mathbf{x}_s$ and $x_t$ are the spatial and time variables, respectively. The a priori approach directly finds a surrogate model from the governing parameterized PDE, whereas the data-driven approach has to solve the parameter-fixed PDE on sampled parameters to generate simulation data, followed by training tasks. FEM: Finite Element Method [5], C-HiDeNN: Convolution Hierarchical Deep-learning Neural Network [6], TAPS: Tensor-decomposition-based A Priori Surrogate, PINN: Physics Informed Neural Network [7]. + +without generating any data. As shown in Fig. 1 denoted by the words "A Priori", this approach aims to find the surrogate model before actually "seeing" any data. For example, multilayer perceptron (MLP) architectures have been vastly used in physics-informed neural networks (PINNs) and their variations to approximate solutions to PDEs without requiring data [7, 8]. However, the results of these efforts are underwhelming, as it has been shown that PINN results have often been compared to weak baselines [9], and it is unclear if they guarantee convergence. Moreover, this method is still susceptible to high computational costs for both large-scale and high-dimensional problems [10]. + +![](images/436e24e7870b3692253c8bb03616ef37d9e6600fc07fc58cdc25ddba38c6aade.jpg) +Figure 2: Development history of INN [11]. Figures are borrowed from references: HiDeNN [12], HiDeNN-TD [13], C-HiDeNN [14], C-HiDeNN-TD [6]. + +Instead of developing solvers solely based on machine learning concepts, a new class of Hierarchical Deep-learning Neural Networks (HiDeNN) has been developed recently. This network architecture incorporates principles from the finite element method (FEM) to construct their architecture [15, 12]. Originally designed to advance FEM as opposed to solve parameterized PDEs, this approach significantly enhances computational accuracy and efficiency for both linear and nonlinear problems compared to standard FEM [16]. HiDeNN was then enhanced by adding an additional hidden layer in the form of a nonlinear convolutional filter, formulating a new neural network architecture named Convolutional HiDeNN (C-HiDeNN) [6, 14]. C-HiDeNN mimics the structure of the generalized finite element method but leverages machine learning to optimize its hyperparameters to further improve accuracy and efficiency. Arbitrary orders of convergence have been observed for C-HiDeNN despite utilizing a linear finite element mesh [14]. Although these methods offers greater accuracy with fewer DoFs, like FEM, they still encounter computational challenges such as balancing memory usage against mesh resolution, which limits their efficiency in modeling ultra large-scale and high-dimensional problems. Therefore, it becomes necessary to employ model order reduction + +techniques to address these limitations. + +Model order reduction techniques have been widely used to tackle the ever-growing challenges from high-dimensional and large-scale problems. For example, proper generalized decomposition (PGD) [17, 18, 19] has been proposed to efficiently solve high-dimensional PDEs. Recently, tensor decomposition (TD) has been successfully leveraged within the HiDeNN framework. For example, Zhang showed that HiDeNN combined with TD (HiDeNN-TD) significantly improved the speed of HiDeNN while maintaining higher accuracy [13]. Li proposed C-HiDeNN combined with TD (C-HiDeNN-TD) for extremely large-scale nested topology optimization problems [20]. Recently, Park generalized the HiDeNN-family networks under the umbrella of Interpolating Neural Networks (INNs) and demonstrated that the network can be used for both data-driven learning and data-free (i.e., a priori) solving [11]. The development history of HiDeNN family networks and INN is summarized in Fig. 2. While INN clearly explains how to construct the network architecture, an efficient optimization scheme for solving ultra large-scale and high-dimensional problems remains underdeveloped. In this paper, ultra large-scale problems refer to problems on the zetta-scale $(10^{21})$ in terms of DoFs. + +The demand for high-precision engineering simulations and efficient solution schemes highlights the need for innovative modeling approaches that swiftly solve large-scale problems while optimizing the design space. This research aims to fulfill this need by developing tensor-decomposition-based A Priori Surrogate (TAPS), a data-free predictive AI model, which aims to enhance high-resolution capabilities while simultaneously optimizing computational efficiency with a minimal memory footprint, low data storage needs, and fast prediction. The proposed comprehensive framework sets a foundation for scalable, adaptable, and future-proof solutions to counter the ever-growing complexity in simulation-driven advanced industries. TAPS is particularly well-suited for engineering challenges where: 1) the finite element method and other conventional methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained. + +This paper is structured as follows. We first introduce the formulation of TAPS in section 2. In section 3, we examine the numerical convergence of TAPS for both space-time (S-T) and space-parameter-time (S-P-T) problems (i.e., problems that are dependent on spatial, parametric, and temporal inputs). In section 4, TAPS is applied to large-scale additive manufacturing problems that are considered intractable with standard numerical algorithms. This application effectively demonstrates TAPS's capability to address all of the three identified challenges. + +# 2. Theory + +# 2.1. Review of C-HiDeNN interpolation theory + +Leveraging the universal approximation theorem, multilayer perceptrons (MLPs) have been successfully applied as global basis functions in deep learning-based solvers [7]. However, as shown in Table 2, MLPs have a few potential caveats when approximating PDE solutions. To overcome these limitations, we leverage the Convolutional HiDeNN (C-HiDeNN) interpolation function, which leverages the merits of both locally supported finite element shape functions and the flexibility of machine learning. Note that C-HiDeNN also belongs to the INN category as shown in Fig. 2. C-HiDeNN maintains all the essential finite element approximation properties such as Kronecker delta and partition of unity [14]. + +Table 2: Comparison of MLP and C-HiDeNN as approximation functions of PDE solutions. + +
MLPC-HiDeNN
Boundary/initial conditionPenalty term in the loss function [7]Automatic satisfaction [6]
Convergence and stabilityStochastic in nature and not guaranteedShown for different PDEs [6]
Numerical integrationQuasi-Monte Carlo integration [21]Gaussian integration [22]
InterpretabilityBlack-box modelInterpretable [11]
+ +We first review the C-HiDeNN formulation as illustrated in Fig. 3 (a) [14]. A scalar field $u(\pmb{x})$ defined in each element within a domain $\Omega_{\pmb{x}}$ can be approximated using C-HiDeNN interpolation as: + +$$ +u _ {e} ^ {h} (\boldsymbol {x}) = \sum_ {i \in A ^ {e}} N _ {i} (\boldsymbol {x}) \sum_ {j \in A _ {s} ^ {i}} \mathcal {W} _ {s, a, p, j} ^ {i} (\boldsymbol {x}) u _ {j} = \sum_ {k \in A _ {s} ^ {e}} \widetilde {N} _ {k} (\boldsymbol {x}; s, a, p) u _ {k} \tag {1} +$$ + +![](images/26d6d373a42682554486e271777c219d82e17016041bbe53dc8f96b1cfed8012.jpg) +- $A^e$ : nodes at element $e$ +- s: patch size, integer +a: dilation parameter + +![](images/788d964946bd243c8ce37085ed0bd577906f91cee22b4fe5ceb8bc3ee147c7e1.jpg) +- $A_{\mathrm{s}}^{i}$ : patch domain at node $i$ with patch size $s$ +- $W_{a,p,j}^{i}$ : convolution interpolant for node $j$ +- $R_{i}(x)$ : radial basis function centered at node $i$ +1 $G$ : moment matrix +1 $\cdot x^{A_s^i}$ : nodal coordinates of nodes in $A_{s}^{i}$ + +Figure 3: (a) Covolution patch in 1D C-HiDeNN shape function (b) Construction of convolution patch function (c) C-HiDeNN shape function as MLP with 3 hidden layers +![](images/2bafb2a9d73b20b8974d709a877de33ff2d86e93482c01b687869081af82ca9e.jpg) +- $A_{s}^{e} = \bigcup_{i\in A^{e}}A_{s}^{i}$ : patch nodes of element $e$ with patch size $s$ +- $p$ : reproducing polynomial order,integer + +where $u_{j}$ is the nodal value and $u_{j} = u(\pmb{x}_{j})$ ; $N_{i}$ is the linear finite element shape function at node $j$ centered in $i$ -th nodal patch; $\mathcal{W}_{s,a,p,j}^{i}$ is the convolution patch function at node $i$ that can be represented with a partially connected MLP as illustrated in Fig. 3 (b). The convolution patch functions are controlled by three hyperparameters: patch size $s$ that controls nodal connectivity, dilation parameter $a$ that normalizes distances between patch nodes, and reproducing order $p$ that defines types/orders of activation functions to be reproduced by the patch functions. Due to the inherent local support nature of both $N_{i}$ and $\mathcal{W}_{s,a,p,j}^{i}$ , the C-HiDeNN shape function $\widetilde{N}_k(\pmb{x};s,a,p)$ is also locally supported. + +Similar to standard finite element, the approximation for the solution field can be written as: + +$$ +u ^ {h} (\boldsymbol {x}) = \sum_ {k} ^ {n n o d e} \widetilde {N} _ {k} (\boldsymbol {x}; s _ {k}, a _ {k}, p _ {k}) u _ {k} \tag {2} +$$ + +where $nnode$ is the total number of nodes and $k$ is the nodal index. It should be noted that the hyperparameters $s, a, p$ can vary across nodes since C-HiDeNN can optimize these hyperparameters like machine learning parameters, rendering an adaptable functional space without altering the number of global nodes or hidden layers. This clearly distinguishes C-HiDeNN from MLP, where the activation functions and network architectures are mostly fixed. + +The C-HiDeNN shape function $\widetilde{N}_k(\pmb{x})$ satisfies Kronecker-delta property at nodal positions [6] (hyperparameters $s, a, p$ are dropped for brevity): + +$$ +\widetilde {N} _ {k} \left(\boldsymbol {x} _ {l}\right) = \delta_ {k l} \tag {3} +$$ + +where the Kronecker delta is defined as: + +$$ +\delta_ {k l} = \left\{ \begin{array}{l l} 0 & \text {i f} k \neq l, \\ 1 & \text {i f} k = l. \end{array} \right. \tag {4} +$$ + +Thus, at the Dirichlet boundary node $\mathbf{x}_b$ where $u(\mathbf{x}_b) = u_b$ , C-HiDeNN automatically satisfies the Dirichlet boundary condition: + +$$ +u ^ {h} \left(\boldsymbol {x} _ {b}\right) = \sum_ {k} ^ {n n o d e} \widetilde {N} _ {k} \left(\boldsymbol {x} _ {b}\right) u _ {k} = u _ {b} \tag {5} +$$ + +Going forward, we will employ the C-HiDeNN shape function $\widetilde{N}_k(\pmb{x})$ as the locally supported basis function for the interpolation. + +# 2.2. Discrete Tensor decomposition + +Tensor decomposition is a mathematical technique used to break down a high-dimensional tensor, such as a 3D finite element solution, into a set of simpler components, making it easier to analyze, store, and process [23]. It generalizes matrix decomposition methods like singular value decomposition (SVD) to higher-order tensors. + +Consider a cubic spatial domain $\Omega_{x}$ discretized with a regular Cartesian grid where each grid point (or node) stores a scalar value (see Fig. 4). The discrete nodal values can be represented as a 3rd order tensor $u_{JK}$ where $I = 1,..,n_1;J = 1,\dots,n_2;K = 1,\dots,n_3$ . The number of DoFs for this structured mesh is $n_1\times n_2\times n_3$ . When high resolution is required for the analysis, as the case in AM simulations, the number of DoFs can be extremely large. To effectively reduce the DoFs, different discrete tensor decomposition methods can be used to project the original 3rd order tensor into lower order tensors. In this paper, we focus on CANDECOMP/PARAFAC (CP) decomposition, where the higher-order tensors are approximated using a finite sum of products of 1D vectors [23]: + +$$ +u _ {I J K} \approx u _ {I J K} ^ {T D} = \sum_ {m = 1} ^ {M} u _ {I m} ^ {[ 1 ]} u _ {J m} ^ {[ 2 ]} u _ {K m} ^ {[ 3 ]} \tag {6} +$$ + +where $M$ is defined as the total number of modes in CP decomposition; $u_{lm}^{[1]}$ refers to the projected 1D vector in the first dimension and $m$ -th mode; the superscript $[d]$ represents the dimension index and $d = 1,2,3$ ; the 1st subscript $I$ is the nodal index, and the 2nd subscript $m$ refers to the modal index. + +![](images/65f7d6cba907050260e3f59100990b220e18b3e661510dbe74314d86ce119fce.jpg) +(a) +Figure 4: (a) 3D Cartesian mesh. (b) Nodal values can be treated as a 3rd order tensor. + +![](images/3b85eb658518d72411e3ec79246a5857219aa58d19ca589a83cb951c71802013.jpg) +(b) + +As can be seen from Eq. 6, with CP decomposition, the total number of DoFs can be reduced from $n_1 \times n_2 \times n_3$ to $M \times (n_1 + n_2 + n_3)$ . Assuming $M$ does not increase when the mesh is refined along each dimension, then the solution matrix $u_{IJK}$ will have cubic growth, whereas CP decomposition $\sum_{m=1}^{M} u_{Im}^{[1]} u_{Jm}^{[2]} u_{Km}^{[3]}$ only exhibits linear growth, as shown in Fig. 5 (a). This reduction is paramount to making large-scale simulation achievable. + +As an extension of the previous case, we consider $D$ dimensional general time-dependent parametric problems where the independent variables $(x_{1},x_{2},\ldots ,x_{D})$ can be classified into 3 different categories, namely, spatial variables $\pmb{x}_s$ , parametric variables $\pmb{x}_p$ , and temporal variable $x_{t}$ . Spatial variables $\pmb{x}_s$ describe the spatial coordinates of the problem. Parametric variables $\pmb{x}_p$ can represent any PDE coefficients, initial/boundary conditions, or geometry descriptors as extra-coordinates. The temporal variable $x_{t}$ represents time. Assuming the spatial domain $\Omega_{\pmb{x}_s}$ is cubic, the parametric domain $\Omega_{\pmb{x}_p}$ is hypercubic and Cartesian grids are used for discretization, then the nodal solution to these problems can be written as a discrete $D$ -th order tensor $u_{I_1I_2,\dots,I_D}$ . Similarly, CP decomposition can be used to effectively decompose higher-order tensors into a finite sum of tensor products of 1D vectors. + +$$ +u _ {I _ {1} I _ {2}, \dots , I _ {D}} \approx u _ {I _ {1} I _ {2}, \dots , I _ {D}} ^ {T D} = \sum_ {m = 1} ^ {M} u _ {I _ {1} m} ^ {[ 1 ]} u _ {I _ {2} m} ^ {[ 2 ]} \dots u _ {I _ {D} m} ^ {[ D ]} \tag {7} +$$ + +![](images/27d2162fec1e0e4eba57ca6fdf9b4a421edbc273be308e22965966bcd12bf083.jpg) +(a) +Figure 5: Comparison of number of DoFs, (a) in terms of mesh size $n$ , (b) in terms of problem dimension $D$ + +![](images/2356fbd6103d0f1224ab953d79b4dd1accc5dfa6c206f9f994a88a02796022c1.jpg) +(b) + +If every dimension is discretized into $n$ grid points, then a $D$ -th order tensor will have DoFs of $n^D$ , whereas CP decomposition only requires $M \times D \times n$ DoFs. Consequently, CP decomposition can dramatically reduce the total DoFs of general high-dimensional parametric problems, as shown in Fig. 5 (b). + +# 2.3. TD interpolation in TAPS + +Assume that the $D$ -th order tensor $u_{I_1I_2,\dots,I_D}$ represents a $D$ -input one-output continuous function $u(\pmb{x})$ measured at a Cartesian grid discretized with $I_1, I_2, \dots, I_D$ grid points in each input dimension. The discrete tensor decomposition $u_{I_1I_2,\dots,I_D}^{TD}$ can only approximate the function $u(\pmb{x})$ at these grid points. In this case, how can we measure the value of the function on an arbitrary input $\pmb{x}$ with tensor decomposition? A natural answer is using C-HiDeNN interpolation functions. + +Similar to standard finite element shape functions, for a 3D spatial problem discretized with a Cartesian grid, a 3D C-HiDeNN interpolation function can be rewritten as a tensor product of one-dimensional C-HiDeNN interpolation functions (hyperparameters $s$ , $a$ and $p$ will be dropped from now on for brevity): + +$$ +\widetilde {N} _ {k} \left(x _ {1}, x _ {2}, x _ {3}\right) = \widetilde {N} _ {I} ^ {[ 1 ]} \left(x _ {1}\right) \widetilde {N} _ {J} ^ {[ 2 ]} \left(x _ {2}\right) \widetilde {N} _ {K} ^ {[ 3 ]} \left(x _ {3}\right) \tag {8} +$$ + +where the superscript refers to the dimension of the 1D C-HiDeNN shape function. Therefore, we can rewrite Eq. 2 as: + +$$ +u ^ {h} \left(\boldsymbol {x} _ {s}\right) = \sum_ {I} \sum_ {J} \sum_ {K} \widetilde {N} _ {I} ^ {[ 1 ]} \left(x _ {1}\right) \widetilde {N} _ {J} ^ {[ 2 ]} \left(x _ {2}\right) \widetilde {N} _ {K} ^ {[ 3 ]} \left(x _ {3}\right) u _ {I J K} \tag {9} +$$ + +where $\boldsymbol{x}_s = [x_1, x_2, x_3]$ is the spatial variable. Plugging the CP decomposition form of the tensor $u_{IJK}^{TD}$ into Eq. 6 into Eq. 9 and rearranging the terms, we have: + +$$ +u ^ {T D} \left(\boldsymbol {x} _ {s}\right) = \sum_ {m = 1} ^ {M} \left[ \sum_ {I} \widetilde {N} _ {I} ^ {[ 1 ]} \left(x _ {1}\right) u _ {I m} ^ {[ 1 ]} \right] \left[ \sum_ {J} \widetilde {N} _ {J} ^ {[ 2 ]} \left(x _ {2}\right) u _ {J m} ^ {[ 2 ]} \right] \left[ \sum_ {K} \widetilde {N} _ {K} ^ {[ 3 ]} \left(x _ {3}\right) u _ {K m} ^ {[ 3 ]} \right] \tag {10} +$$ + +Eq. 10 represents the TD interpolation (with C-HiDeNN) for a 3D spatial problem. Extending this framework to a general $D$ -dimensional space-parameter-time (S-P-T) problem with independent variables defined in Eq. 11: + +$$ +\boldsymbol {x} = \left( \begin{array}{l l l l} x _ {1}, \dots , x _ {S} & \underbrace {x _ {S + 1}} _ {\text {上}} & \dots & x _ {P} \end{array} , x _ {t}\right) \tag {11} +$$ + +spatial variables parametric variables + +Then the TD interpolation to the S-P-T solution field can be written as follows: + +$$ +u ^ {T D} \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) = \sum_ {m = 1} ^ {M} \underbrace {\left[ \sum_ {I _ {1}} \widetilde {N} _ {I _ {1}} ^ {[ 1 ]} \left(x _ {1}\right) u _ {I _ {1} m} ^ {[ 1 ]} \right] \cdots \left[ \sum_ {I _ {S}} \widetilde {N} _ {I _ {S}} ^ {[ S ]} \left(x _ {I _ {S}}\right) u _ {I _ {S} m} ^ {[ S ]} \right]} _ {\text {s p a t i a l}} \underbrace {\left[ \sum_ {I _ {S + 1}} \widetilde {N} _ {I _ {S + 1}} ^ {[ S + 1 ]} \left(x _ {S + 1}\right) u _ {I _ {S + 1} m} ^ {[ S + 1 ]} \right] \cdots \left[ \sum_ {P} \widetilde {N} _ {I _ {P}} ^ {[ P ]} \left(x _ {P}\right) u _ {I _ {P} m} ^ {[ P ]} \right]} _ {\text {p a r a m e t r i c}} \underbrace {\left[ \sum_ {I _ {D}} \widetilde {N} _ {I _ {D}} ^ {[ D ]} (t) u _ {I _ {D}} ^ {[ D ]} \right]} _ {\text {t e m p o r a l}} \tag {12} +$$ + +This can be further simplified using the product notation: + +$$ +u ^ {T D} \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) = \sum_ {m = 1} ^ {M} \prod_ {d = 1} ^ {D} \sum_ {I _ {d}} \widetilde {N} _ {I _ {d}} ^ {[ d ]} \left(x _ {d}\right) u _ {I _ {d} m} ^ {[ d ]} \tag {13} +$$ + +where $\widetilde{N}_{I_d}^{[d]}(x_d)$ refers to the 1D C-HiDeNN shape function in the $d$ -th dimension; $u_{I_d m}^{[d]}$ is the nodal solution for dimension $d$ and mode $m$ . + +# 2.4. The General S-P-T Galerkin form of TAPS + +Similar to FEM, TAPS adopts the weighted-sum formulation to solve PDEs. Consider a general S-P-T PDE: + +$$ +\mathcal {L} (u (\boldsymbol {x})) = f (\boldsymbol {x}), \tag {14} +$$ + +where $\mathcal{L}$ is the differential operator; the independent variable vector $\pmb{x} = (x_{s}, x_{p}, x_{t})$ ; $f(\pmb{x})$ is the forcing function. Table 3 lists different examples of operator $\mathcal{L}$ and corresponding dependent and independent variables. + +Table 3: Examples for differential operators, dependent and independent variables + +
PDEDifferential operator LDependent variablexsxpxt
∂2u/∂x12 + ∂2u/∂x22 + ... + ∂2u/∂x2D = f(x)∂2/∂x12 + ∂2/∂x22 + ... + ∂2/∂x2Du(x1, x2, ...,xD)--
μui, jj + (μ + λ)uj,ij + Fi = e(x12+x22+x32)μ(·)i,jj + (μ + λ)(·)j,ijui, i = 1, 2, 3(x1, x2, x3)(λ,μ)-
ρcp,du/dt + k(∂2u/∂x12 + ∂2u/∂x22 + ∂2u/∂x32) = Pe(x12+x22+x32)ρcp,du/dt + k(∂2/∂x12 + ∂2/∂x22 + ∂2/∂x32)u(x1, x2, x3)(ρ, cp, k, P)t
+ +The weighted-sum residual form of the PDE with TD interpolation can be written as: + +$$ +\int_ {\Omega} \delta u ^ {T D} (\boldsymbol {x}) \left[ \mathcal {L} \left(u ^ {T D} (\boldsymbol {x})\right) - f (\boldsymbol {x}) \right] d \Omega = 0 \tag {15} +$$ + +where $u^{TD}$ is the approximation of the solution (i.e., trial function), $\delta u^{TD}$ is the test function, and $d\Omega = d\Omega_{x_s}d\Omega_{x_p}d\Omega_{x_t}$ . + +Depending on how $\delta u^{TD}$ is adopted, different mathematical formulations can be obtained. If the test function resides in the same function space as the trial function, it becomes the Galerkin formulation. When the test function space differs from the trial function space, it becomes the Petrov-Galerkin formulation [22]. If the Dirac delta function is used for the test function, then Eq. 15 corresponds to the collocation method [24]. In this paper, we employ the Galerkin formulation. However, the proposed framework is versatile and can be extended to accommodate other formulations as well. + +In Eq. 12, the entire S-P-T domain is approximated using TD interpolation. However, this approach may result in a large system of equations due to the rapid increase in the number of TD modes for certain cases. For example, if the forcing function represents a moving source function in Eq. 14), this complexity may arise. To maintain computational efficiency, we can partition the temporal domain into a series of time slabs. As illustrated in Fig. 6(a), the S-P-T continuum is divided into S-P-T slabs $\mathcal{T}_1,\mathcal{T}_2,\dots ,\mathcal{T}_T$ . The solution within each time slab is then approximated individually using the TD interpolation. + +Between consecutive S-P-T slabs, either a continuous or discontinuous formulation can be employed. As shown in Fig. 6(b) for the continuous Galerkin scheme, the continuity of the solution in time is enforced by imposing the solution at the end of slab $\mathcal{T}_{i-1}$ as the initial condition of $\mathcal{T}_i$ : + +$$ +{ } ^ { [ \mathcal { T } + 1 ] } u ( \boldsymbol { x } _ { s } , \boldsymbol { x } _ { p } , 0 ) = { } ^ { [ \mathcal { T } ] } u ( \boldsymbol { x } _ { s } , \boldsymbol { x } _ { p } , x _ { t } ^ { m a x } ) \tag {16} +$$ + +Discontinuous Galerkin method can be used when a discontinuity is allowed between S-P-T slabs, as illustrated in Fig. 6(c). Discontinuity in time can be modeled using the jump operator $\llbracket \dots \rrbracket$ [25]. + +$$ +\llbracket u \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, t\right) \rrbracket = \lim _ {\epsilon \rightarrow 0 ^ {+}} \left(^ {\mathcal {T} + 1} u \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, \epsilon\right) - ^ {\mathcal {T}} u \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t} ^ {\max } - \epsilon\right)\right) \tag {17} +$$ + +![](images/6c2d52bb0f68ce1b2b23d130b8706969e5dd55fc54afb40e93be9e7a63dfc8a8.jpg) +(a) + +![](images/2211d1927532087afa8c9ab6bf9f0e46ebf0f9fcc68e95f91179e9e07f54486f.jpg) +(b) +Figure 6: (a) Multiple S-P-T slabs along the temporal dimension. (b) Continuous Galerkin: the solution is continuous across different S-P-T slabs. (c) Discontinuous Galerkin: jumps are allowed across the slab boundaries + +![](images/f9df96ab1e94c3f197c125cc26bdae791e3769bfcd4aa7fddc2a762fde3a337b.jpg) +(c) + +Keeping in mind that this approach can be applied generally to a range of engineering problems, we will demonstrate an example of the Galerkin formulation using a single space-parameter-time partition (S-P-T) slab in the remainder of this section. For illustrative purposes, the transient heat transfer equation will be utilized: + +$$ +\rho c _ {p} \nabla_ {x _ {t}} u - \nabla_ {\boldsymbol {x} _ {s}} \cdot k \nabla_ {\boldsymbol {x} _ {s}} u = f (\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}) \tag {18} +$$ + +as we focus on the example of modeling the laser powder bed fusion (LPBF) process in additive manufacturing (AM). In an LPBF simulation, we adopt the following time-dependent moving heat source function: + +$$ +f \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) = \frac {2 \eta P}{\pi r ^ {2} d _ {\nu}} \exp \left(- \frac {2 \left((x - x _ {0} (t)) ^ {2} + (y - y _ {0} (t)) ^ {2}\right)}{r ^ {2}}\right) \cdot \mathbf {1} _ {\left(x _ {3} \geq d _ {\nu}\right)} \tag {19} +$$ + +Summarizing the independent variables in Eq. 18, there are spatial variables $\mathbf{x}_s = (x_1, x_2, x_3)$ ; parametric variables $\mathbf{x}_p = (k, \rho, c_p, \eta, P, r, d_v)$ ; and a temporal variable $x_t = t$ . Among the parametric variables, $k$ is conductivity; $\rho$ is the material density; $c_p$ is heat capacity; $\eta$ is the material absorptivity; $P$ represents laser power; $r$ is the standard deviation that characterizes the width of the heat source; $d_v$ is the penetration depth of the heat source. In Eq. 19, $[x_0(t), y_0(t)]$ represents the center of the moving heat source; $\mathbf{1}_{(x_3 \geq d_v)}$ is the indicator function where $\mathbf{1}_{(x_3 \geq d_v)} = 1$ if $x_3 \geq d_v$ or $\mathbf{1}_{(x_3 \geq d_v)} = 0$ if $x_3 < d_v$ . Note that the discretization of the material parameters, in particular, in a random field setting, has been previously proposed by Liu et al. [26, 27]. + +As shown in the schematic below, we classify the boundary surfaces into 2 categories: the Dirichlet boundary surface $\Gamma_{D}$ and the Neumann boundary surface $\Gamma_{N}$ . + +A uniform ambient temperature is used as the initial condition. The bottom of the powder bed is subject to the Dirichlet boundary condition and the Neumann boundary conditions are prescribed on the other surfaces. The initial and boundary conditions are: + +$$ +u (\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, 0) | _ {\Omega} = u _ {0}, +$$ + +$$ +\left. u \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) \right| _ {\Gamma_ {D}} = u _ {0}, \tag {20} +$$ + +$$ +\boldsymbol {n} \cdot \boldsymbol {q} | _ {\Gamma_ {N}} = q _ {\text {c o n v}} + q _ {\text {r a d}} + q _ {\text {e v a p}} +$$ + +where $u_{0}$ is the ambient temperature, $q_{conv}$ accounts for free convection, $q_{rad}$ accounts for radiation, and $q_{evap}$ imposes evaporative cooling when any material surface reaches the evaporation temperature [28]. Each flux is defined as: + +$$ +q _ {c o n v} = h _ {c o n v} [ u (x, t) - u _ {0} ], +$$ + +$$ +q _ {r a d} = - \sigma_ {S B} \epsilon \left(u ^ {4} \left(\boldsymbol {x} _ {s}, x _ {t}\right) - u _ {0} ^ {4}\right), \tag {21} +$$ + +$$ +q _ {e v a p} = - m _ {e v a p} L _ {e v a p}. +$$ + +![](images/a36972000cd774cc5ab556e0900c90f4c6eea0a3a42c11d6c7322779ba30c687.jpg) +Figure 7: Transient heat transfer with initial condition and boundary conditions. + +where $\sigma_{SB}$ is the Stefan-Boltzmann constant; $\epsilon$ is the material emissivity; $u_0$ is the ambient temperature; $h_{conv}$ is the convection coefficient of the surrounding gas, $m_{evap}$ is the mass evaporation flux and $L_{evap}$ is the heat of evaporation. In the following numerical examples, we only consider the free convection term in the Neumann boundary condition. The solution to Eq. 18 is approximated using TD interpolation function: + +$$ +u ^ {T D} \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) = \sum_ {m = 1} ^ {M} u _ {\boldsymbol {x} _ {s}} ^ {(m)} \left(\boldsymbol {x} _ {s}\right) u _ {\boldsymbol {x} _ {p}} ^ {(m)} \left(\boldsymbol {x} _ {p}\right) u _ {x _ {t}} ^ {(m)} \left(x _ {t}\right) \tag {22} +$$ + +Here a general notation is employed to represent different types of components in Eq. 22. For example, the spatial component $u_{\boldsymbol{x}_s}^{(m)}(\boldsymbol{x}_s)$ is equivalent to $u_{x_1}^{(m)}(x_1)u_{x_2}^{(m)}(x_2)u_{x_3}^{(m)}(x_3)$ . The corresponding test function can be obtained using the variational principle: + +$$ +\delta u ^ {T D} \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) = \sum_ {m = 1} ^ {M} \left[ \delta u _ {\boldsymbol {x} _ {s}} ^ {(m)} \left(\boldsymbol {x} _ {s}\right) u _ {\boldsymbol {x} _ {p}} ^ {(m)} \left(\boldsymbol {x} _ {p}\right) u _ {x _ {t}} ^ {(m)} \left(x _ {t}\right) + u _ {\boldsymbol {x} _ {s}} ^ {(m)} \left(\boldsymbol {x} _ {s}\right) \delta u _ {\boldsymbol {x} _ {p}} ^ {(m)} \left(\boldsymbol {x} _ {p}\right) u _ {x _ {t}} ^ {(m)} \left(x _ {t}\right) + u _ {\boldsymbol {x} _ {s}} ^ {(m)} \left(\boldsymbol {x} _ {s}\right) u _ {\boldsymbol {x} _ {p}} ^ {(m)} \left(\boldsymbol {x} _ {p}\right) \delta u _ {x _ {t}} ^ {(m)} \left(x _ {t}\right) \right] \tag {23} +$$ + +Plugging the trial and test functions, the S-P-T Galerkin form of Eq. 18 can be obtained by following Eq. 15: + +$$ +\int_ {\Omega} \delta u ^ {T D} \left[ \rho c _ {p} \nabla_ {x _ {t}} u ^ {T D} - \nabla_ {x _ {s}} \cdot k \nabla_ {x _ {s}} u ^ {T D} - f \right] d \Omega = 0 \tag {24} +$$ + +Using integration by parts on the diffusion term, we get the corresponding general S-P-T Galerkin weak form in TAPS formulation: + +$$ +\int_ {\Omega} \delta u ^ {T D} \rho c _ {p} \nabla_ {x _ {t}} u ^ {T D} d \Omega + \int_ {\Omega} \nabla_ {\boldsymbol {x} _ {s}} \delta u ^ {T D} \cdot k \nabla_ {\boldsymbol {x} _ {s}} u ^ {T D} d \Omega - \int_ {\partial \boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, t} \delta u ^ {T D} \boldsymbol {n} \cdot \boldsymbol {q} | _ {\Gamma_ {N}} d s d \Omega_ {\boldsymbol {x} _ {p}} d \Omega_ {t} - \int_ {\Omega} \delta u ^ {T D} f (\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}) d \Omega = 0 \tag {25} +$$ + +where $q$ is the heat flux on the Neumann boundary. + +# 2.5. Discretized matrix form + +The S-P-T Galerkin weak form shown in Eq. 25 is nonlinear in nature due to the tensor product structure in TD interpolation, necessitating efficient solution schemes. To illustrate the detailed solution approach for the general S-P-T weak form, we simplify the governing equation Eq. 18 by considering a one-dimensional spatial problem where $x_{s} = x$ . We assume that the product of density and specific heat capacity $\rho c_{p}$ is equal to 1. Additionally, the forcing term is solely dependent on $x$ . Therefore, the simplified governing equation for this example is given by: + +$$ +\frac {\partial u}{\partial t} - \frac {\partial u}{\partial x} \cdot k \frac {\partial x}{\partial x} = f (x) \tag {26} +$$ + +subject to homogeneous boundary conditions and initial conditions. This equation has 3 independent variables ( $D = 3$ ), i.e., spatial variable $x_{s} = x_{1} = x$ , parametric variable $x_{p} = x_{2} = k$ and temporal variable $x_{t} = x_{3} = t$ . The S-P-T + +Galerkin weak form of this problem can be written as follows according to Eq. 25 (the superscripts "TD" for both trial and test functions are omitted for brevity). + +$$ +\int_ {\Omega} \delta u \nabla_ {t} u d \Omega + \int_ {\Omega} \nabla_ {x} \delta u \cdot k \nabla_ {x} u d \Omega - \int_ {\Omega} \delta u f d \Omega = 0 \tag {27} +$$ + +The corresponding trial and test functions can be obtained using Eqs. 22-23: + +$$ +u (x, k, t) = \sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t) \tag {28} +$$ + +$$ +\delta u (x, k, t) = \underbrace {\sum_ {m = 1} ^ {M} \delta u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\text {s p a t i a l v a r i a t i o n}} + \underbrace {\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) \delta u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\text {p a r a m e t r i c v a r i a t i o n}} + \underbrace {\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) \delta u _ {t} ^ {(m)} (t)} _ {\text {t e m p o r a l v a r i a t i o n}} \tag {29} +$$ + +As shown in Eq. 29, the test function is further split into $D$ variational terms for a general $D$ dimensional problem (in the current example, $D = 3$ ). As an example, we first plug Eq. 28 and the spatial variation term of Eq. 29 into the Galerkin weak form in Eq. 27 to obtain the S-P-T weak form terms corresponding to spatial variation: + +$$ +\underbrace {\int_ {\Omega} \sum_ {m = 1} ^ {M} \sum_ {n = 1} ^ {M} \left[ \nabla \delta u _ {x} ^ {(m)} (x) \nabla u _ {x} ^ {(n)} (x) d x \right] \cdot \left[ u _ {k} ^ {(m)} (k) k u _ {k} ^ {(n)} (k) d k \right] \cdot \left[ u _ {t} ^ {(m)} (t) u _ {t} ^ {(n)} (t) d t \right]} _ {\text {d i f f u s i o n t e r m}} + +$$ + +$$ +\underbrace {\int_ {\Omega} \sum_ {m = 1} ^ {M} \sum_ {n = 1} ^ {M} \left[ \delta u _ {x} ^ {(m)} (x) u _ {x} ^ {(n)} (x) d x \right] \cdot \left[ u _ {k} ^ {(m)} (k) u _ {k} ^ {(n)} (k) d k \right] \cdot \left[ u _ {t} ^ {(m)} (t) \nabla_ {t} u _ {t} ^ {(n)} (t) d t \right]} _ {\text {t i m e d e r i v a t i v e t e r m}} - \tag {30} +$$ + +$$ +\underbrace {\int_ {\Omega} \sum_ {m = 1} ^ {M} \left[ \delta u _ {x} ^ {(m)} (x) f (x) d x \right] \cdot \left[ u _ {k} ^ {(m)} (k) d k \right] \cdot \left[ u _ {t} ^ {(m)} (t) d t \right]} _ {\text {f o r c i n g t e r m}} +$$ + +We use 1D C-HiDeNN shape functions to approximate each univariate function: + +$$ +u _ {d} ^ {(n)} (x _ {d}) = \widetilde {N} _ {n _ {d} ^ {\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\prime} n} ^ {[ d ]} \quad (\text {n o s u m o n} d) +$$ + +$$ +\delta u _ {d} ^ {(m)} \left(x _ {d}\right) = \widetilde {N} _ {n _ {d}} ^ {[ d ]} \left(x _ {d}\right) \delta u _ {n _ {d} m} ^ {[ d ]} \quad (\text {n o s u m o n} d) \tag {31} +$$ + +where Einstein summation is used. The free index $d$ refers to dimension and $d = x,k$ or $t$ . The gradient of the interpolated variable can be computed using the shape function derivative $\widetilde{B}_{n_d}^{[d]}(x_d) = \frac{d\widetilde{N}_{n_d}^{[d]}(x_d)}{dx_d}$ . + +$$ +\nabla_ {x _ {d}} u _ {d} ^ {(n)} (x _ {d}) = \widetilde {B} _ {n _ {d} ^ {\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\prime} n} ^ {[ d ]} \quad (\text {n o s u m o n} d) +$$ + +$$ +\nabla_ {x _ {d}} \delta u _ {d} ^ {(m)} (x _ {d}) = \widetilde {B} _ {n _ {d}} ^ {[ d ]} (x _ {d}) \delta u _ {n _ {d} m} ^ {[ d ]} \quad (\text {n o s u m o n} d) \tag {32} +$$ + +Plugging Eq. 31 - 32 into Eq. 30, the diffusion term can be rewritten as: + +$$ +\sum_ {m = 1} ^ {M} \sum_ {n = 1} ^ {M} \underbrace {\int_ {\Omega_ {x}} \widetilde {B} _ {n _ {x}} (x) \delta u _ {n _ {x} m} ^ {[ x ]} \widetilde {B} _ {n _ {x} ^ {\prime}} (x) u _ {n _ {x} ^ {\prime} n} ^ {[ x ]} d x} _ {\text {s p a t i a l t e r m}} \cdot \underbrace {\int_ {\Omega_ {k}} \widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \widetilde {N} _ {n _ {k} ^ {\prime}} (k) u _ {n _ {k} ^ {\prime} n} ^ {[ k ]} d k} _ {\text {p a r a m e t r i c t e r m}} \cdot \underbrace {\int_ {\Omega_ {t}} \widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \widetilde {N} _ {n _ {t} ^ {\prime}} (t) u _ {n _ {t} ^ {\prime} n} ^ {[ t ]} d t} _ {\text {t e m p o r a l t e r m}} \tag {33} +$$ + +As can be readily seen from Eq. 33, after doing 1D integration of each term, the parametric and temporal terms can be treated as coefficient matrices: + +$$ +C _ {m n} ^ {[ k ]} = \underbrace {\int_ {\Omega_ {k}} \widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \widetilde {N} _ {n _ {k} ^ {\prime}} (k) u _ {n _ {k} ^ {\prime} n} ^ {[ k ]} d k} _ {\text {p a r a m e t r i c t e r m}} +$$ + +$$ +C _ {m n} ^ {[ t ]} = \underbrace {\int_ {\Omega_ {t}} \widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \widetilde {N} _ {n _ {t} ^ {\prime}} (t) u _ {n _ {t} ^ {\prime} n} ^ {[ t ]} d t} _ {\text {t e m p o r a l t e r m}} \tag {34} +$$ + +as the only free indices are $m$ and $n$ . Substituting the coefficient matrices and rearranging different terms in Eq. 33, we have: + +$$ +\sum_ {m = 1} ^ {M} \delta u _ {n _ {x} m} ^ {[ x ]} \sum_ {n = 1} ^ {M} \left[ \int_ {\Omega_ {x}} \widetilde {B} _ {n _ {x}} (x) \widetilde {B} _ {n _ {x} ^ {\prime}} (x) d x \right] \cdot C _ {m n} ^ {[ k ]} C _ {m n} ^ {[ t ]} \cdot u _ {n _ {x} ^ {\prime} n} ^ {[ x ]} \tag {35} +$$ + +Like standard FEM, we can define $\int_{x}\widetilde{B}_{n_x}(x)\widetilde{B}_{n_x'}(x)dx$ as the 1D stiffness matrix $K_{n_x n_x'}^{[x]}$ of $x$ dimension in Eq. 35. We let $C_{mn}^{[x]} = C_{mn}^{[k]}C_{mn}^{[t]}$ with no summation on $(m,n)$ . Furthermore, let us define the following 4-th order tensor: + +$$ +A _ {n _ {x} n _ {x} ^ {\prime} m n} ^ {[ x ]} = K _ {n _ {x} n _ {x} ^ {\prime}} ^ {[ x ] ^ {\prime}} C _ {m n} ^ {[ x ]} \tag {36} +$$ + +where $A_{n_s n_s'mn}^{[x]}$ is a function of solution vectors $u_{n_k m}^{[k]}$ and $u_{n_m m}^{[t]}$ since the coefficient matrix $C_{mn}^{[x]}$ depends on these solution vectors as shown in Eq. 34. This dependency reflects the interconnected nature of the variables across different dimensions in the S-P-T framework, highlighting how the spatial, parameter, and temporal components influence each other through the coefficients. As a result, Eq. 33 can be further simplified as follows: + +$$ +\delta u _ {n _ {x} m} ^ {[ x ]} A _ {n _ {x} n _ {x} ^ {\prime} m n} ^ {[ x ]} u _ {n _ {x} ^ {\prime} n} ^ {[ x ]} \tag {37} +$$ + +where the summation signs are neglected since $m$ and $n$ become dummy variables. The 4-th order tensor $A_{n_x n_x' mn}^{[x]}$ can be reshaped as a 2nd order tensor $\mathbb{A}_{IJ}^{[x]}$ : the indices $n_x$ and $m$ are combined into a single composite index $I$ , and the indices $n_x'$ and $n$ are combined into a single composite index $J$ . + +$$ +A _ {n _ {x} n _ {x} ^ {\prime} m n} ^ {[ x ]} = \mathbb {A} _ {I J} ^ {[ x ]} \tag {38} +$$ + +Define the following vectorization: + +$$ +\delta \mathbb {U} _ {I} ^ {[ x ]} = \left[ \operatorname {v e c} \left(\delta u _ {n _ {x} m} ^ {[ x ]}\right) \right] _ {I} +$$ + +$$ +\mathbb {U} _ {J} ^ {[ x ]} = \left[ \operatorname {v e c} \left(u _ {n _ {x} ^ {\prime} n} ^ {[ x ]}\right) \right] _ {J} \tag {39} +$$ + +As a result, Eq. 37 is equivalent to: + +$$ +\delta \mathbb {U} ^ {[ x ] T} \mathbb {A} ^ {[ x ]} \mathbb {U} ^ {[ x ]} \tag {40} +$$ + +Following the same procedure, we can obtain matrix forms corresponding to the time derivative term $\delta \mathbb{U}^{[x]^T}\mathbb{B}^{[x]}\mathbb{U}^{[x]}$ , and the forcing term $\delta \mathbb{U}^{[x]^T}\mathbb{Q}^{[x]}$ for the spatial variational part of Eq. 30. Similar structures can also be obtained for the parametric and temporal variational parts of the test function in Eq. 29. Denoting $\mathbb{K}^{[d]} = \mathbb{A}^{[d]} + \mathbb{B}^{[d]}$ , the matrix form of the generalized S-P-T Galerkin form in Eq. 27 can be written as: + +$$ +\underbrace {\delta \mathbb {U} ^ {[ x ] ^ {T}} \mathbb {K} ^ {[ x ]} \mathbb {U} ^ {[ x ]} - \delta \mathbb {U} ^ {[ x ] ^ {T}} \mathbb {Q} ^ {[ x ]}} _ {\text {s p a t i a l v a r i a t i o n a l p a r t}} + \underbrace {\delta \mathbb {U} ^ {[ k ] ^ {T}} \mathbb {K} ^ {[ k ]} \mathbb {U} ^ {[ k ]} - \delta \mathbb {U} ^ {[ k ] ^ {T}} \mathbb {Q} ^ {[ k ]}} _ {\text {p a r a m e t r i c v a r i a t i o n a l p a r t}} + \underbrace {\delta \mathbb {U} ^ {[ t ] ^ {T}} \mathbb {K} ^ {[ t ]} \mathbb {U} ^ {[ t ]} - \delta \mathbb {U} ^ {[ t ] ^ {T}} \mathbb {Q} ^ {[ t ]}} _ {\text {t e m p o r a l v a r i a t i o n a l p a r t}} = 0 \tag {41} +$$ + +Eq. 41 is equivalent to the following nonlinear system of equations. Note that the nonlinearity comes from the fact that $\mathbb{K}^{[d]}$ is solution dependent: + +$$ +\left[ \delta \mathbb {U} ^ {[ x ] ^ {T}}, \delta \mathbb {U} ^ {[ k ] ^ {T}}, \delta \mathbb {U} ^ {[ t ] ^ {T}} \right] \left\{\left[ \begin{array}{c c c} \mathbb {K} ^ {[ x ]} (\mathbb {U} ^ {[ k ]}, \mathbb {U} ^ {[ t ]}) & 0 & 0 \\ 0 & \mathbb {K} ^ {[ k ]} (\mathbb {U} ^ {[ x ]}, \mathbb {U} ^ {[ t ]}) & 0 \\ 0 & 0 & \mathbb {K} ^ {[ t ]} (\mathbb {U} ^ {[ x ]}, \mathbb {U} ^ {[ k ]}) \end{array} \right] \left[ \begin{array}{l} \mathbb {U} ^ {[ x ]} \\ \mathbb {U} ^ {[ k ]} \\ \mathbb {U} ^ {[ t ]} \end{array} \right] - \left[ \begin{array}{l} \mathbb {Q} ^ {[ x ]} (\mathbb {U} ^ {[ k ]}, \mathbb {U} ^ {[ t ]}) \\ \mathbb {Q} ^ {[ k ]} (\mathbb {U} ^ {[ x ]}, \mathbb {U} ^ {[ t ]}) \\ \mathbb {Q} ^ {[ t ]} (\mathbb {U} ^ {[ x ]}, \mathbb {U} ^ {[ k ]}) \end{array} \right] \right\} = 0 \tag {42} +$$ + +where we can treat the solution vector $\left[\mathbb{U}^{[x]^T},\mathbb{U}^{[k]^T},\mathbb{U}^{[t]^T}\right]$ as generalized DoFs like standard FEM. There are many ways to solve Eq. 42. For example, standard linearization schemes such as Newton's method have been used [29]. However, this method may suffer from ill-conditioning since the mismatch of scales for different dimensions can be significant. In this paper, we use the concept of subspace iteration to efficiently approximate the solution by iterating in the subspace of the test function space until a convergence criteria is met [19]. A similar counterpart has been widely adopted as the gold standard in discrete tensor decomposition [23]. + +# 2.6. Solution scheme of TAPS: subspace iteration + +For subspace iteration in $d$ -th dimension, only the solution matrix $\mathbb{U}^{[d]}$ is treated as unknown while all other functions are considered as known constants. Consequently, the variations of the univariate functions other than $d$ -th dimension will vanish. From Eq. 42, it can be seen that this will lead to a linear system of equations for the unknowns in the $d$ -th dimension. The updated solution matrix $\mathbb{U}^{[d]}$ from this process is then used in the next subspace iteration for dimension $d + 1$ (when $d = D$ , we come back to the first dimension $d = 1$ ). The complete solution scheme for subspace iteration is shown in Algorithm 1. + +Algorithm 1 TAPS solution scheme (subspace iteration) + +1: Initialize solution vector $\mathbb{U}^{[x_1][0]}$ ..., $\mathbb{U}^{[x_D][0]}$ with random values and compute $\mathbb{K}^{[x_1][0]}$ , and $\mathbb{Q}^{[x_1][0]}$ + +2: for iter = 0 to iter_max do +3: for $d = 1$ to D do +4: Update iteration number $\mathcal{K} = iter\times D + d$ +5: Solve TD linear system $\mathbb{K}^{[x_d][\mathcal{K} - 1]}\mathbb{U}^{[x_d][\mathcal{K}]} = \mathbb{Q}^{[x_d][\mathcal{K} - 1]}$ +6: Update matrices $\mathbb{K}^{[x_{d + 1}][\mathcal{K}]}$ and force vector $\mathbb{Q}^{[x_{d + 1}][\mathcal{K}]}$ +7: end for +8: Check convergence +9: end for + +To illustrate the details of the subspace iteration algorithm, we consider the $\mathcal{K}$ -th subspace iteration (which is on spatial variable $x$ ). Here, we assume that the parametric and temporal solutions have been updated from the previous $(\mathcal{K} - 1)$ -th iteration, leaving the spatial solution as unknown to be solved in $\mathcal{K}$ -th iteration. Moreover, instead of the full variation form of the test function as in Eq. 42, we only consider the subspace $x$ of the test function by setting the parametric and temporal variational parts as 0. As a result, we have: + +$$ +\mathbb {K} ^ {[ x ] [ \mathcal {K} - 1 ]} \left(\mathbb {U} ^ {[ k ] [ \mathcal {K} - 1 ]}, \mathbb {U} ^ {[ t ] [ \mathcal {K} - 1 ]}\right) \mathbb {U} ^ {[ x ] [ \mathcal {K} ]} = \mathbb {Q} ^ {[ x ] [ \mathcal {K} - 1 ]} \left(\mathbb {U} ^ {[ k ] [ \mathcal {K} - 1 ]}, \mathbb {U} ^ {[ t ] [ \mathcal {K} - 1 ]}\right) \tag {43} +$$ + +which is a linear system of equations with unknown $\mathbb{U}^{[x][\mathcal{K}]}$ . This is a general Sylvester equation which can be solved using many efficient solution schemes [30, 31]. In this paper, sparse direct solvers based on fast diagonalization/complex Schur decomposition methods are adopted [32]. The computational complexity of the sparse direct solver is $O(M^3 + M^2 n_d + C_c(n_d))$ for the $d$ -th dimension subspace iteration, where $M$ is the total number of modes; $n_d$ is the number of grid points for $d$ -th dimension; $C_c(n_d)$ refers to the computational cost of the banded sparse mass/stiffness matrix for $d$ -th dimension with a shape of $(n_d \times n_d)$ . + +Once $\mathbb{U}^{[x][\mathcal{K}]}$ is obtained, we then update matrix $\mathbb{K}^{[k][\mathcal{K}]}(\mathbb{U}^{[x][\mathcal{K}]},\mathbb{U}^{[t][\mathcal{K}]})$ and forcing vector $\mathbb{Q}^{[k][\mathcal{K}]}(\mathbb{U}^{[x][\mathcal{K}]},\mathbb{U}^{[t][\mathcal{K}]})$ . In the next iteration (for dimension $k$ ), we treat $\mathbb{U}^{[k][\mathcal{K} + 1]}$ as the only unknown. Subspace iteration will continue unless the relative change of all solution matrices (for example, $L_{2}$ norm) is within the tolerance. + +# 2.7. Error estimates of TAPS + +Since the TAPS solution is based on the C-HiDeNN-TD approximation and the generalized Galerkin formulation, we can have the following theoretical results on the error bounds, as demonstrated in our previous work on C-HiDeNN [6]: + +$$ +\left\| u ^ {\mathrm {C} - \mathrm {H i D e N N}} - u ^ {\mathrm {e x}} \right\| _ {E} \leq \left\| u ^ {\mathrm {T A P S}} - u ^ {\mathrm {e x}} \right\| _ {E} \leq \left\| u ^ {\mathrm {F E M}} - u ^ {\mathrm {e x}} \right\| _ {E} \tag {44} +$$ + +where $\| \cdot \|_E$ denotes the energy norm, $u^{\mathrm{ex}}$ denotes the exact solution, $u^{\mathrm{C - HiDeNN}}$ denotes the solution obtained by the full C-HiDeNN method without tensor decomposition, $u^{\mathrm{TAPS}}$ denotes the TAPS solution with a sufficient number of modes, $u^{\mathrm{FEM}}$ denotes the FEM solution. The proof of the above results is based on the fact that the full C-HiDeNN approximation can provide a larger function space and therefore more accurate solutions than conventional FEM [6]. The subspace iteration can be considered as a local (directional) version of the Galerkin formulation and is expected to enable an optimized solution for the tensor decomposition that will converge to the Galerkin-based full C-HiDeNN method. + +# 3. Results + +# 3.1. Convergence study for moving heat source + +In this section, we first analyze the convergence of the TAPS solver for a space-time (S-T) transient heat transfer problem. A single NVIDIA RTX A6000 GPU is used for all the following analyses. In Eq. 18, we let $\rho c_{p} = 1$ , $k = 1$ , and replace the heat source as shown in Eq. 45. In this example, we have the spatial variable $x_{s} = (x,y,z)$ and the temporal variable $x_{t} = t$ . + +$$ +\begin{array}{l} f \left(\boldsymbol {x} _ {s}, x _ {t}\right) = 2 (1 - 2 y ^ {2}) \left(1 - e ^ {- 1 5 t}\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\ + 2 (1 - 2 (1 0 0 t - x - 5) ^ {2}) \left(1 - e ^ {- 1 5 t}\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} + (1 \tag {45} \\ - e ^ {- 1 5 t}) (2 0 0 x + 1 0 0 0 - 2 0 0 0 0 t) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\ - 1 5 e ^ {- 1 5 t} e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\ \end{array} +$$ + +The analytical solution to the PDE is inherently non-separable. + +$$ +u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s}, x _ {t}\right) = (1 - e ^ {- 1 5 t}) e ^ {- y ^ {2} - (x - 1 0 0 t - 5) ^ {2}} \tag {46} +$$ + +The initial and boundary conditions are: + +$$ +u \left(x _ {s}, 0\right) = 0, +$$ + +$$ +\left. u \left(\boldsymbol {x} _ {s}, x _ {t}\right) \right| _ {\partial \Omega} = \left. u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s}, x _ {t}\right) \right| _ {\partial \Omega}. \tag {47} +$$ + +The relative $L_{2}$ norm error is defined as: + +$$ +\epsilon_ {L _ {2}} = \frac {\| u ^ {T D} \left(\boldsymbol {x} _ {s} , x _ {t}\right) - u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s} , x _ {t}\right) \| _ {L _ {2} \left(\Omega_ {\boldsymbol {x} _ {s}} \otimes \Omega_ {x _ {t}}\right)}}{\| u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s} , x _ {t}\right) \| _ {L _ {2} \left(\Omega_ {\boldsymbol {x} _ {s}} \otimes \Omega_ {x _ {t}}\right)}} \tag {48} +$$ + +First, we investigate the influence of the number of subspace iterations. As shown in Fig. 8(a), 3 iterations are enough to obtain an accurate result. Next, we investigate the convergence in terms of the number of modes. Here we compare the relative $L_{2}$ norm error for both TAPS and proper generalized decomposition (PGD) methods [17, 18]. To this aim, we use the same discretization for the space-time domain with each dimension discretized by 100 grid points, the same reproducing polynomial order $p = 1$ and convolution patch size $s = 1$ . As can be seen from Fig. 8(b), TAPS requires a much smaller number of modes than PGD. For TAPS, when the number of modes equals 25, the relative $L_{2}$ norm error decreases to $2.5 \times 10^{-3}$ . The total solution time is 15.2 s. However, PGD requires 1,000 modes which takes 60.6 s solution time to reach the same level of accuracy. This is because the test function space in PGD is a subspace of TAPS [29]. Furthermore, the modal decomposition obtained from PGD is not optimal and thus requires a larger storage requirement due to the increased number of modes. + +![](images/71b70cc86e81f29f17717ebc5e990a233cea4fd792c202ed7d02aa919c3be394.jpg) +(a) + +![](images/fc1a326bac6804be45fa1c9b3cf27c427a62e03aa83dfcf816fe3cdb17e70be1.jpg) +(b) +Figure 8: Relative L2 norm error with respect to (a) the number of subspace iterations (b) the number of modes + +The spatial and temporal convergence are also studied. In Fig. 9(a), the number of temporal nodes is fixed as 500, and the spatial mesh is refined. It shows the relative $L_{2}$ norm error with respect to the number of nodes along each spatial dimension. As can be readily seen from the figure, larger patch size $s$ leads to smaller error given the same reproducing polynomial orders $p$ . Moreover, we can adjust $p$ to control the spatial convergence rate. Similarly, Fig. 9(b) demonstrates the convergence rate in the temporal domain where we fix the spatial discretization as 500 along each spatial dimension. By adjusting $s$ and $p$ , we can obtain different temporal convergence rates. + +Finally, we refine the spatial and temporal mesh simultaneously and study the spatio-temporal convergence rate in Fig. 9(c). As can be observed from the figure, higher reproducing polynomial order $p$ will lead to a higher-order convergence rate. + +![](images/2345a49f326f69ec67725fc081645d1b37d8869821b2514f92f47f0a0b9cc7e8.jpg) +(a) + +![](images/aa5c83a28c6329ef2d94f536ced88460656363be508afd476967c069343f0768.jpg) +(b) + +![](images/d1d0eb95867b86e555e3a95df3d5b5a14cd34d608385d9234dcdbecea9446986.jpg) +(c) +Figure 9: Relative $L_{2}$ norm error with respect to the number of grid points (a) spatial convergence (b) temporal convergence (c) spatio-temporal convergence + +# 3.2. Convergence study of $S-P-T$ problems up to equivalent zetta-scale (10 $^{21}$ ) full models + +In this example, we study the convergence of the TAPS solver for the time-dependent parametric heat transfer problem in a S-P-T setting. In Eq. 18, we adopt the heat source as shown in Eq. 49. In this example, we have spatial variable $\boldsymbol{x}_s = (x,y,z)$ , parametric variable $\boldsymbol{x}_p = (k,P,\rho ,c_p)$ and temporal variable $x_{t} = t$ . + +$$ +\begin{array}{l} f \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) = 1 5 \rho^ {2} c _ {p} ^ {2} k p e ^ {- 1 5 k t} e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\ + 5 0 \rho c _ {p} k p \left(1 - e ^ {- 1 5 k t}\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \left[ \left(1 - 5 0 x ^ {2}\right) + \left(1 - 5 0 y ^ {2}\right) \right] \tag {49} \\ \end{array} +$$ + +The analytical solution to the PDE is inherently non-separable. + +$$ +u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) = \rho c _ {p} P \left(1 - e ^ {- 1 5 k t}\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \tag {50} +$$ + +The initial and boundary conditions are: + +$$ +u \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, 0\right) = 0 +$$ + +$$ +u \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) | _ {\partial \Omega} = u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s}, \boldsymbol {x} _ {p}, x _ {t}\right) | _ {\partial \Omega} +$$ + +The relative $L_{2}$ norm error is defined as: + +$$ +\epsilon_ {L _ {2}} = \frac {\left\| u ^ {T D} \left(\boldsymbol {x} _ {s} , \boldsymbol {x} _ {p} , x _ {t}\right) - u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s} , \boldsymbol {x} _ {p} , x _ {t}\right) \right\| _ {L _ {2} \left(\Omega_ {x _ {s}} \otimes \Omega_ {x _ {p}} \otimes \Omega_ {x _ {t}}\right)}}{\left\| u ^ {\mathrm {e x}} \left(\boldsymbol {x} _ {s} , \boldsymbol {x} _ {p} , x _ {t}\right) \right\| _ {L _ {2} \left(\Omega_ {x _ {s}} \otimes \Omega_ {x _ {p}} \otimes \Omega_ {x _ {t}}\right)}} \tag {51} +$$ + +To study the convergence of TAPS for S-P-T problems, the number of grid points is refined simultaneously in each dimension and corresponding relative $L_{2}$ norm errors are computed as shown in Fig. 10. When the number of grid points in each dimension is 450, the equivalent DoFs of a full model achieves $450^{8} = 1.68 \times 10^{21}$ . Consequently, it is equivalent to a zetta-scale $(10^{21})$ full problem. As can be seen from the figure, a larger patch size $s$ leads to a smaller error and faster convergence. A higher reproducing polynomial order $p$ also leads to a higher convergence rate. It can be noticed that the convergence rate for $p = 3$ case is smaller than expected $p + 1 = 4$ . This is attributed to the fact that the S-P-T mesh is not fine enough. However, due to the rounding error in computing the relative $L_{2}$ norm error, we can only accurately compute the error up to 450 grid points per dimension. + +![](images/7fb0c377642dbc2df6a083a06da8df232e8bdb8c9587d121c766dfaf94864ad3.jpg) +Figure 10: Relative $L_{2}$ norm error with respect to the number of grid points in each dimension + +In summary, we have the flexibility to choose different $s$ and $p$ to control the accuracy of TAPS by directly solving the S-P-T PDE. This is different from other data-driven modeling approaches (for instance, neural networks-based data-driven methods) in two notable ways. First, unlike a black-box neural network interpolator where the accuracy of the model is not guaranteed, our method is built upon the AI-enhanced finite element method, and we can control the convergence rate by choosing suitable hyperparameters $s$ and $p$ . Second, unlike most data-driven reduced-order models for physical problems, our method directly solves the governing PDE by plugging in the TD interpolation without seeing any training data. As a result, we can avoid the most expensive offline data generation stage as opposed to data-driven methods. + +# 3.3. Moving source with solution dependent material parameters + +In this section, we model moving heat sources using temperature-dependent material parameters. The solution scheme of this problem is provided in detail in Appendix A. Figure 11(a) illustrates a typical representation of temperature-dependent heat conductivity and capacity for Inconel 718 [33]. + +Since the temperature dependency of $k(u)$ and $\rho c_{p}(u)$ can be approximated using a linear relationship. As a result, we can directly rewrite $k(u(\boldsymbol{x}_s,x_t))$ and $\rho c_{p}(u(\boldsymbol{x}_{s},x_{t}))$ in the TD format. + +![](images/5e704b162ebdd448550d9186205e95bdb2ff6e27cf2d3fe5c63a122256540197.jpg) +(a) + +![](images/716c83297ce005647d99e5f23cc46e6e3024853b336292259b2f47b9f8a009d8.jpg) +(b) +Figure 11: (a) Temperature dependent material properties for Inconel 718 [33] (b) Schematic of numerical simulation, where the solution along the center line is compared for FEM and TAPS. + +$$ +k \left(\boldsymbol {x} _ {s}, x _ {t}\right) \approx \sum_ {m = 1} ^ {M} m _ {k} u _ {x _ {1}} ^ {(m)} \left(x _ {1}\right) u _ {x _ {2}} ^ {(m)} \left(x _ {2}\right) u _ {x _ {3}} ^ {(m)} \left(x _ {3}\right) u _ {x _ {t}} ^ {(m)} \left(x _ {t}\right) + n _ {k} +$$ + +$$ +\rho c _ {p} \left(\boldsymbol {x} _ {s}, x _ {t}\right) \approx \sum_ {m = 1} ^ {M} m _ {c _ {p}} u _ {x _ {1}} ^ {(m)} \left(x _ {1}\right) u _ {x _ {2}} ^ {(m)} \left(x _ {2}\right) u _ {x _ {3}} ^ {(m)} \left(x _ {3}\right) u _ {x _ {t}} ^ {(m)} \left(x _ {t}\right) + n _ {c _ {p}} \tag {52} +$$ + +where $M$ is the decomposition modes of the TAPS solution; $m_{k} = 1.52 \times 10^{-5} \mathrm{~W} / (\mathrm{mmK}^{2})$ ; $n_{k} = 5.29 \times 10^{-3} \mathrm{~W} / (\mathrm{mmK})$ ; $m_{c_{p}} = 6.11 \times 10^{-7} \mathrm{~mm}^{-3} \mathrm{~K}^{-2}$ ; $n_{c p} = 3.25 \times 10^{-3} \mathrm{~mm}^{-3} \mathrm{~K}^{-1}$ + +The problem setup is shown in Fig. 11 (b). The spatial domain size is $10\mathrm{mm} \times 10\mathrm{mm} \times 1\mathrm{mm}$ where homogeneous Dirichlet boundary conditions are assumed for the left and right surfaces; homogeneous Neumann boundary conditions are applied to all other surfaces. As shown in Eq. 19, a moving Gaussian source term $f(\boldsymbol{x}_s, t)$ is applied as a volumetric source term with a radius $r = 0.5\mathrm{mm}$ and moving velocity $500\mathrm{mm/s}$ . The diameter is discretized using 10 spatial elements. + +Since there is no analytical solution available to this problem, we use implicit finite element analysis as the baseline for validation. JAX-FEM [34] is used to generate the nonlinear FEM solution. For ease of comparison, we use the same time increment as $1.60 \times 10^{-4}$ sec for both TAPS and FEM. The solution along the center line, as shown in Fig. 11 (b) is compared. As can be seen from Fig. 12, the result of the nonlinear TAPS solver agrees well with FEM. + +# 3.4. Simulation of LPBF process + +In this section, we use TAPS to efficiently model the laser powder bed fusion process (LPBF) in additive manufacturing. Here we only consider the free convection term in the Neumann boundary condition. The initial condition can be considered by splitting the total solution as a summation of the homogeneous part and the inhomogeneous part. + +$$ +u \left(\boldsymbol {x} _ {s}, x _ {t}\right) = u _ {0} \left(\boldsymbol {x} _ {s}, x _ {t}\right) + u _ {\text {i n i t}} \left(\boldsymbol {x} _ {s}\right) \tag {53} +$$ + +As a result, $u_{0}(\pmb{x}_{s}, x_{t})$ is subject to homogeneous initial conditions. In this section, we assume Ti-6Al-4V is used as the powder bed materials. The detailed material parameters can be found in Table 4. + +# 3.4.1. Single-track simulation + +In this example, we investigate the computational complexity numerically for single-track LPBF simulation with a single S-T slab, as shown in Fig. 13 (a). A single NVIDIA RTX A6000 GPU is used for all the following analyses. To ensure accuracy, the number of modes is adopted as 5 times larger than the number of time steps in the following examples. In the first case, within the S-T slab, the spatial mesh is refined uniformly along each spatial dimension + +![](images/3d5930d344ad41e50bbdf9aa4fa400ae67373da8806b2d086f22076597db5e79.jpg) +Figure 12: Comparison of Nonlinear TAPS solution versus finite element solution at different times. + +Table 4: Parameters used in the simulation + +
ParameterVariableValueUnits
Thermal conductivityk22.0W m-1K-1
Densityρ4.27g cm-3
Specific heat capacitycp745J kg-1K-1
Ambient temperatureT0298.15K
Heat convection coefficienthconv14.73W m-2K-1
+ +while fixing the number of temporal grid points. The computational time for each subspace iteration is plotted in Fig. 13 (b). It can be seen that TAPS has a linear growth of computational complexity when refining the spatial mesh. + +Similarly, we only refine the temporal mesh while fixing the spatial mesh in the second case and plot the computational time for each subspace iteration as in Fig. 13 (c). It can be readily observed that refining the temporal mesh has a much higher computational complexity than refining the spatial mesh. This is because increasing temporal elements will also lead to an increased number of modes $M$ . As mentioned before, the computational cost for the sparse direct solver employed is $O(M^3 + M^2 n_d + C_c(n_d))$ for the $d$ -th dimension subproblem, where $M$ represents total number of modes; $n_d$ refers to the total number of grid points in $d$ -th dimension; $C_c(n_d)$ refers to the computational cost of a banded sparse matrix with a shape of $(n_d \times n_d)$ . Therefore, the increased number of modes leads to a cubic growth in computational time. + +Table 5: Parameters used in the single-track simulation + +
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Mesh sizeh5μm
+ +![](images/06d93feedd76d1074ae325710d3e3e673d487dd19e7ca7144df3fe686afb12f4.jpg) +Figure 13: (a) Single-track simulation. (b) Computational time of subspace iteration in refining the spatial mesh: linear growth. (c) Computational time of subspace iteration in refining the temporal mesh in a single space-time slab: cubic growth due to the increased number of modes + +![](images/4f647f9527a5dd106e3c64c9d9a5f2ac9e311427cc75722f1e8cd157bd35a51d.jpg) + +![](images/fa2ca97b6f3903dd29bd21860b65642768d7856f00c975369e0e7129f9aa8323.jpg) + +# 3.4.2. Multi-track simulation + +A major challenge in simulating multiple tracks in LPBF is the substantial number of time steps needed. To circumvent the cubic growth associated with the increasing number of temporal grid points and modes for moving source problems, we can leverage multiple S-T slabs to break down the original problem with a large number of time steps into smaller slabs. Consequently, this method keeps the total number of modes required in each slab beneath a reasonable threshold, thereby optimizing computational efficiency. The detailed algorithm of simulating multiple space-time (S-T) slabs for LPBF process is shown in Appendix B. Using this method, we first simulate a multi-track LPBF problem and analyze how the total number of slabs influences computational cost. The detailed setup can be found in Table 6. Note that we only simulate the printing process of the final layer in this section. + +Table 6: Parameters used in the multi-track simulation + +
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Hatch space sizehs50μm
Mesh sizeh5μm
+ +We use different numbers of temporal grids within each S-T slab and compare the computation cost, as shown in Fig. 14. As can be seen from the figure, when each space-time slab contains around 20 temporal grid points, the computational efficiency is optimal. Hence, choosing the optimal number of temporal elements inside each space-time slab is crucial for the overall performance of the TAPS solver for modeling LPBF process. We will adopt 20 temporal grid points per S-T slab as the default for the following multi-track LPBF simulations. + +Next, we compare the performance of TAPS versus the classical explicit finite difference method. To this aim, we use a GPU-accelerated and optimized finite difference code, GAMMA, to model the LPBF process [28]. In this example, we increase the size of the domain while maintaining all other process parameters, as shown in Table 6. The corresponding computation time, GPU memory usage, and required data storage space are plotted in Fig. 15. + +Fig. 15(a) highlights the significant speed advantage of TAPS over GAMMA, especially as the size of the simulation domain increases. GAMMA only can simulate powder bed size up to $4.5^{3}\mathrm{mm}^{3}$ since the GPU memory can only handle up to $7.31\times 10^{8}$ spatial DoFs. For the $4.5^{3}\mathrm{mm}^{3}$ case, TAPS is 85 times faster than GAMMA. On the other hand, TAPS is able to model $100^{3}\mathrm{mm}^{3}$ powder bed, with its speed benefits becoming even more evident for larger + +![](images/cc72b3743dea1acce6b7f995d1ed32b295f09a088f75ac84fbe7696ee90234f1.jpg) +(a) + +![](images/13672870ba3d3e65ac32f7b556569b2691beeea4c17ceda38811f12e48a4d9bc.jpg) +(b) + +![](images/1e0837c8a03c5dd7bb6ad48fde26cc3fea68510e35b307ac270e07be256a353a.jpg) +Figure 14: (a) Multi-track simulation. (b) Influence of number of temporal grid points in each S-T slab on the computational cost. +(a) +Figure 15: Performance comparison of TAPS and GAMMA for powder bed with different sizes in terms of (a) computational time (b) GPU memory requirement (c) Data storage requirement for each time increment + +![](images/f9ccd59eb06fa06952a41ddbeabb1229514b05ff8f3cebdcc5d4513deee979da.jpg) +(b) + +![](images/b1257e053086d2dd69596cd534767d87c78a5bcb112c31e469f474d08790a53b.jpg) +(c) + +domains. Fig. 15(b) compares the memory requirements, where GAMMA experiences fast growth due to the cubic scaling of total spatial DoFs. In contrast, TAPS benefits from TD, requiring significantly less memory. TAPS uses 13 times smaller GPU memory compared to GAMMA for the $4.5^{3}\mathrm{mm}^{3}$ case. Additionally, TAPS can efficiently manage GPU memory usage for larger powder bed simulations by adopting different numbers of temporal grids in each S-T slab. Finally, Fig. 15(c) compares data storage needs where GAMMA's storage requirements grow cubically, whereas TAPS maintains a linear growth pattern. For the $4.5^{3}\mathrm{mm}^{3}$ case, the data storage of GAMMA is 2,700 times larger than TAPS. + +# 3.4.3. Large-scale multi-layer multi-track LPBF simulation + +In this section, the proposed method is used to simulate a large-scale multi-layer multi-track LPBF process. Element birth is used to model newly added layers in the process. Details on element birth can be found in Appendix C. As shown in Fig. 16 (a), the run scenario is the production of a $10\mathrm{mm}$ cube within a $12\mathrm{mm}$ powder bed domain. The base plate height is $2\mathrm{mm}$ . The tool path follows the pattern shown on the top surface. Material parameters are taken from Ti-6Al-4V [35]. The detailed parameters for the simulation setup are shown in Table 7. + +To showcase the capabilities of our approach using TAPS, we employ a fine spatial mesh for the simulation. The spatial element size is $10 \times 10 \times 5\mu m^3$ . In classical numerical algorithms, this corresponds to $3.46 \times 10^{9}$ spatial DoFs, which is unmanageable for typical workstations due to the prohibitive RAM requirements. + +The simulation result is shown in Fig. 16 (b), where the temperature of the last layer is plotted. In total, it costs 60.7 + +![](images/0f42907a3d6c93d278eb81dc581820a0260002f7e28abedb7a27b81e2d7be42a.jpg) +(a) + +![](images/6dccaac9c3bd5c80948b57cdbca225315a06fe5add804187394c7973ca6c6a07.jpg) +(b) +Figure 16: (a) Problem statement: LPBF simulation. (b) Temperature solution for printing the final layer + +Table 7: Parameters used in the large-scale LPBF simulation + +
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr100μm
Laser scan speedV500mm s-1
Absorptivityη0.251
Laser penetration depthd50μm
Layer thicknesshl50μm
Hatch space sizehs200μm
+ +hrs to run the simulation. The maximum GPU memory usage is 8.11 GB. The final solution vector size is 1.35 GB. As a comparison, it's estimated that GAMMA will solve the same problem with the same spatial resolution in 3,485 days, with at least 120 GB GPU memory usage and 1.26 TB storage space to store the solution [35]. Consequently, TAPS achieves around 1,370 X speedup, 14.8 X memory footprint savings, and 955 X storage gain compared to the finite difference method. + +# 4. Discussion + +In the previous sections, we have shown that TAPS tackles two drawbacks of data-driven surrogate modeling approaches which use offline data generated through direct numerical simulation (DNS). Firstly, the proposed TAPS is data-free, which means that it does not require any training data. This is of crucial importance for applications that require ultra high-resolution simulations because offline training data generation can be extremely costly. Our method circumvents expensive offline DNS data generation by directly solving the governing equation. Secondly, TAPS enables solving ultra large-scale problems with significant speedup, minimal memory requirement, and substantial storage gain as compared to standard DNS techniques. + +The computational speed of the current method can be further improved with the state-of-the-art high-performance numerical solvers and parallel computing on multiple GPUs. Right now, the TAPS linear systems of equations are solved on CPUs which results in additional overhead. With more sparse direct solvers/iterative schemes becoming available on GPU, we expect a further speedup of the current program. Moreover, parallel computing using multiple GPUs can be achieved using Message Passing Interface (MPI) [36]. For ultra large-scale analysis where each dimension contains millions of nodes, an efficient iterative solver with a suitable preconditioner needs to be developed. + +Variational multiscale methods can be used to further extend the capabilities of the current method to tackle zettascale space-time problems [37, 35]. Moreover, one major computational cost for the current method originates from the increased number of decomposition modes for a large number of time steps. This can be avoided by leveraging + +coordinate transformation techniques where the moving source can be transformed into a fixed one. As a result, we expect to greatly improve the computational performance of the current method. Irregular geometry can also be considered using immersed finite element techniques or the Solid Isotropic Material with Penalization (SIMP) method in topology optimization [20, 38, 39, 40, 41]. + +# 5. Conclusion + +In this paper, we propose TAPS as a data-free predictive scientific AI model to simulate ultra large-scale physical problems. This method eliminates the traditional necessity for offline training data generation, thereby exhibiting substantial speedup, memory efficiency, and storage gain as opposed to data-driven methods, making previously unsolvable large-scale and high-dimensional problems manageable. The convergence of the TAPS solver is numerically investigated. As a demonstration of the capabilities of TAPS, we showcase the application of the TAPS solver for a multi-layer multi-track additive manufacturing problem that is intractable with classical numerical algorithms. + +TAPS is well suited for a broad range of science or engineering problems where: 1) the finite element method and other conventional numerical methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained. The INN hierarchical neural network interpolants, particularly C-HiDeNN used by TAPS, demonstrate superior performance compared to other machine learning models. For the solving tasks, it has shown superior performance compared to physics-informed neural network (PINN) [7], CP-PINN [42], and Kolmogorov-Arnold Networks (KAN) [43] with orders of magnitude faster solution time, higher accuracy, and better scalability to ultra large-scale and high-dimensional PDEs [44]. INN interpolants can also be effectively used in data-driven training tasks and show better training accuracy compared to MLP, SIREN [45] and KAN [11, 44]. + +As illustrated in Fig. 17, the significance of this work in the area of predictive scientific AI models aligns with the trend in other areas in AI, such as language and vision AI models. The evolution of language models has seen dramatic growth, beginning with foundational models like BERT [46], followed by the GPT series [47], which expanded transformer architecture to hundreds of billions of parameters, showcasing powerful generative capabilities. In vision models, AlexNet [48] marked a breakthrough, while advancements like DIT-XL [49] and SORA [50] integrated diffusion models to handle more complex and challenging visual tasks. This trajectory of increasing scale and sophistication from its network architecture (i.e., transformer of language models and diffusion of vision models) is mirrored in predictive scientific AI where TAPS represents a significant advancement in its network architecture, INN. + +A major critical issue in the emerging large AI models is a more sophisticated model will generally lead to a larger amount of training data, more expensive training costs, and longer inference time. The advent of DeepSeek R1 breaks this rule since it has fewer parameters, much less training cost, faster inference speed, yet still comparable accuracy compared to other state-of-the-art models due to its novel architecture and training techniques such as distillation methods [51]. For predictive scientific AI, we face even more pronounced challenges due to strict accuracy demands and the necessity for high-resolution physics for large-scale problems. As a result, the future of predictive scientific AI is still largely untapped. TAPS provides a promising solution to these emerging challenges by delivering a highly accurate, exceptionally fast, and memory and storage efficient scientific AI model. + +In conclusion, the proposed TAPS computational framework offers substantial enhancements in computational efficiency, memory consumption, and storage demands for science and engineering simulations. As a result, TAPS paves a new path to address future challenges in ultra large-scale simulations pertinent to complex predictive scientific AI models. + +# Appendix A. Solving nonlinear S-P-T PDEs: solution dependent material properties + +The algorithm 1 works for linear PDEs where the PDE coefficients remain constant. However, in many engineering applications, the PDE coefficients can be solution-dependent. For instance, material properties such as heat conductivity and heat capacity can be a function of temperature in additive manufacturing. In these cases, the PDE becomes non-linear which requires an efficient solution scheme. In this section, we solely focus on the space-time problems formulation of a nonlinear PDE. As a result, the product of density and heat capacity $\rho c_{p}(u)$ and conductivity + +![](images/5ac20a39f47685ebb9378a0c4e12e417a603d78fe820a3ac37f08ecdb86a0b5e.jpg) +Figure 17: Evolution of AI models for different tasks + +$k(u)$ are no longer temperature independent as in Eq. 18. Similar to the linear problem shown before, the generalized Galerkin weak form is used to solve this equation. + +$$ +\int_ {\Omega} \delta u \nabla_ {x _ {t}} \left[ \rho c _ {p} (u) u \right] d \Omega - \int_ {\Omega} \nabla_ {x _ {s}} \delta u \cdot k (u) \nabla_ {x _ {s}} u d \Omega + \int_ {\partial \Omega_ {x _ {s}} \otimes \Omega_ {t}} \delta u \boldsymbol {q} \cdot \boldsymbol {n} d s d \Omega_ {x _ {t}} = \int_ {\Omega} \delta u b d \Omega \tag {A.1} +$$ + +where $\mathbf{q}$ is the heat flux on the Neumann boundary. Since Eq. A.1 is a space-time integral, classical time-stepping based methods can't be directly used to update material parameters. Here we propose a global-local approach similar to the Large Time Increment (LATIN) method to effectively solve the above equations [52]. + +![](images/200be0b0946761ab17f9c2c59f91eed1c6bccd651b97031cd574a612f7e34b9a.jpg) +Figure A.18: Global-local approach for nonlinear TAPS solver + +As shown in Fig. A.18, we split the nonlinear problem into 2 stages, a linear global stage and a nonlinear local update stage. In the global stage, we assume the spatio-temporal $k(\pmb{x}_s,x_t)$ and $\rho c_{p}(\pmb{x}_{s},x_{t})$ are known. As a result, we treat the global problem as a linear problem and obtain $u(x_{s},x_{t})$ using the previously proposed method for linear problems. After $u(\pmb{x}_s,x_t)$ is updated in the global stage, we update $k(u)$ and $\rho c_{p}(u)$ locally at each Gauss integration point according to material models $k(u)$ and $\rho c_{p}(u)$ . We repeat the global-local iteration until the variation of $k(u)$ and $\rho c_{p}(u)$ between consecutive iterations meets the convergence criteria. The algorithm is summarized in Algorithm 2: + +Algorithm 2 Nonlinear TAPS solution scheme: PDE with solution dependent coefficients +1: Initialize solution matrices with random values and update $\rho_{c_p}(\pmb{x}_s, x_t)$ and $k(\pmb{x}_s, x_t)$ . +2: for iter $_\gamma$ = 1 to iter $_\gamma_{max}$ do +3: for iter = 1 to iter $_max$ do +4: Update $\rho_{c_p}(\pmb{x}_s, x_t)$ and $k(\pmb{x}_s, x_t)$ +5: Use Algorithm 1 to solve solution $u(\pmb{x}_s, x_t)$ +6: for $i = 1$ to integration points do +7: $\rho_{c_p}(\pmb{x}_s, x_t) = \rho_{c_p}[u(\pmb{x}_s, x_t)]$ +8: $k(\pmb{x}_s, x_t) = k[u(\pmb{x}_s, x_t)]$ +9: end for +10: Check convergence +11: end for +12: end for + +# Appendix B. Mode compression + +One significant challenge in multi-track simulation in LPBF is the huge number of time steps required. It is impossible to resolve all the time steps with only a single space-time (S-T) slab. Hence, we split the whole layer scan into multiple S-T slabs and relate each S-T slab using the following equation. + +$$ +{ } ^ { [ \mathcal { T } + 1 ] } u ( \boldsymbol { x } _ { s } , x _ { t } ) = { } ^ { [ \mathcal { T } ] } u ( \boldsymbol { x } _ { s } , x _ { t } ^ { m a x } ) + { } ^ { [ \mathcal { T } + 1 ] } u _ { 0 } ( \boldsymbol { x } _ { s } , x _ { t } ) \tag {B.1} +$$ + +where $[T + 1]u(\pmb{x}_s, x_t)$ refers to the solution at $(\mathcal{T} + 1)$ -th space-time slab; $[T + 1]u_0(\pmb{x}_s, x_t)$ refers to the solution of the homogeneous initial value problem of $(\mathcal{T} + 1)$ -th space-time slab; $[T]u(\pmb{x}_s, x_t^{max})$ is the solution of $\mathcal{T}$ -th space-time slab at the last time increment. As can be seen from Eq. B.1, we impose the last time increment solution of previous space-time slab as the initial condition for the next space-time slab. This is efficiently implemented by adding the TD form of the last increment as new modes in the current space-slab solution. However, for large-scale computations requiring thousands of slabs, directly concatenating modes can result in substantial storage demands. + +In mode compression, we aim to compress the number of modes for $\left[{}^{\mathcal{T}}\right]u(\boldsymbol{x}_s,x_t^{max})$ because of its spatial dependence and naturally low-dimensional structure. Consequently, it can be effectively decomposed using only a few modes. Denote the TD form of the last time step solution of the previous space-time slab as $\left[{}^{\mathcal{T}}\right]u(\boldsymbol{x}_s,x_t^{max})^{TD}$ , we aim to find a compact form that can be represented with much fewer number of modes $\left[{}^{\mathcal{T}}\right]u(\boldsymbol{x}_s,x_t^{max})_F^{TD}$ . For notation simplicity, we omit $x_{t}^{max}$ in the following equations. Consequently, the mode compression problem can be written as: + +$$ +{ } ^ { [ \mathcal { T } ] } u ( \boldsymbol { x } _ { s } ) _ { F } ^ { T D } - { } ^ { [ \mathcal { T } ] } u ( \boldsymbol { x } _ { s } ) ^ { T D } = 0 \tag {B.2} +$$ + +The weighted sum residual form is used to approximate $[T]u(\pmb{x}_s,x_{t - 1})_F^{TD}$ .. + +$$ +\int_ {\Omega_ {x}} \delta^ {[ \mathcal {T} ]} u \left(\boldsymbol {x} _ {s}\right) _ {F} ^ {T D} \cdot \left[ ^ {[ \mathcal {T} ]} u \left(\boldsymbol {x} _ {s}\right) _ {F} ^ {T D} - ^ {[ \mathcal {T} ]} u \left(\boldsymbol {x} _ {s}\right) ^ {T D} \right] d \boldsymbol {x} _ {s} = 0 \tag {B.3} +$$ + +Eq. B.3 can be efficiently solved using Algorithm 1. + +# Appendix C. Element birth + +In the LPBF process, once the printing is finished for the current layer, a new layer of powder is deposited on top of the existing layer. This necessitates modeling the new layer with additional elements. Various studies have investigated different approaches for element birth techniques. While some researchers opt for activating small sections of geometry incrementally, others apply the technique by spreading the deposition across an entire layer or multiple layers simultaneously. The most widely adopted approach is to activate an entire layer and then scan the heat source over it [53]. + +In TAPS, we propose a new scheme to generate new layers of elements. In this scheme, new elements are added only in the $x_{3}$ direction, since the plan dimension doesn't change in the printing process. Therefore, as opposed to + +full-scale classical numerical methods, TAPS enables marginal overhead in generating new layers of elements with extra grid points added only in the $x_{3}$ dimension. The solution scheme for multi-layer multi-track LPBF simulation using TAPS can be summarized in Algorithm 3. + +Algorithm 3 Multi-layer multi-track LPBF simulation using TAPS +1: for $n_{layer} = 1$ to $n_{layerTotal}$ do +2: Initialize solution matrices with random values for the new layer +3: Compute the updated stiffness matrix and force vector for the $x_{3}$ direction +4: for $n_{track} = 1$ to $n_{tracktotal}$ do +5: for $iter = 1$ to $iter_{max}$ do +6: for $d = 1$ to dimension do +7: Compute solution vectors according to Algorithm 1 or 2 +8: end for +9: Check convergence +10: end for +11: Compress modes +12: Concatenate compressed modes to previous tracks as new modes +13: end for +14: Compress modes +15: Concatenate compressed modes to previous layers as new modes +16: end for + +# References + +[1] Zongyi Li, Nikola Kovachki, Kamyar Azizzadenesheli, Burigede Liu, Kaushik Bhattacharya, Andrew Stuart, and Anima Anandkumar. Fourier neural operator for parametric partial differential equations. arXiv preprint arXiv:2010.08895, 2020. +[2] Owen Huang, Sourav Saha, Jiachen Guo, and Wing Kam Liu. An introduction to kernel and operator learning methods for homogenization by self-consistent clustering analysis. Computational Mechanics, 72(1):195-219, 2023. +[3] Can AI Solve Science? https://writings.sthenwolfram.com/2024/03/can-ai-solve-science/. [Accessed 03-04-2025]. +[4] A new golden age of discovery — deepmind.google. https://deepmind.google/public-policy/ai-for-science/. [Accessed 03-04-2025]. +[5] Wing Kam Liu, Shaofan Li, and Harold S Park. Eighty years of the finite element method: Birth, evolution, and future. Archives of Computational Methods in Engineering, 29(6):4431-4453, 2022. +[6] Ye Lu, Hengyang Li, Lei Zhang, Chanwook Park, Satyajit Mojumder, Stefan Knapik, Zhongsheng Sang, Shaoqiang Tang, Daniel W Apley, Gregory J Wagner, et al. Convolution hierarchical deep-learning neural networks (c-hidenn): finite elements, isogeometric analysis, tensor decomposition, and beyond. Computational Mechanics, 72(2):333-362, 2023. +[7] Maziar Raissi, Paris Perdikaris, and George E Karniadakis. Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations. Journal of Computational physics, 378:686-707, 2019. +[8] Enrui Zhang, Ming Dao, George Em Karniadakis, and Subra Suresh. Analyses of internal structures and defects in materials using physics-informed neural networks. Science advances, 8(7):eabk0644, 2022. +[9] Nick McGreavy and Ammar Hakim. Weak baselines and reporting biases lead to overoptimism in machine learning for fluid-related partial differential equations. Nature Machine Intelligence, 6(10):1256-1269, 2024. +[10] Junwoo Cho, Seungtae Nam, Hyunmo Yang, Seok-Bae Yun, Youngjoon Hong, and Eunbyung Park. Separable physics-informed neural networks. Advances in Neural Information Processing Systems, 36, 2024. +[11] Chanwook Park, Sourav Saha, Jiachen Guo, Hantao Zhang, Xiaoyu Xie, Miguel A Bessa, Dong Qian, Wei Chen, Gregory J Wagner, Jian Cao, et al. Engineering software 2.0 by interpolating neural networks: unifying training, solving, and calibration. arXiv preprint arXiv:2404.10296, 2024. +[12] Lei Zhang, Lin Cheng, Hengyang Li, Jiaying Gao, Cheng Yu, Reno Domel, Yang Yang, Shaoqiang Tang, and Wing Kam Liu. Hierarchical deep-learning neural networks: finite elements and beyond. Computational Mechanics, 67:207-230, 2021. +[13] Lei Zhang, Ye Lu, Shaoqiang Tang, and Wing Kam Liu. Hidenn-td: reduced-order hierarchical deep learning neural networks. Computer Methods in Applied Mechanics and Engineering, 389:114414, 2022. +[14] Chanwook Park, Ye Lu, Sourav Saha, Tianju Xue, Jiachen Guo, Satyajit Mojumder, Daniel W Apley, Gregory J Wagner, and Wing Kam Liu. Convolution hierarchical deep-learning neural network (c-hidenn) with graphics processing unit (gpu) acceleration. Computational Mechanics, 72(2):383-409, 2023. +[15] Sourav Saha, Zhengtao Gan, Lin Cheng, Jiaying Gao, Orion L Kafka, Xiaoyu Xie, Hengyang Li, Mahsa Tajdari, H Alicia Kim, and Wing Kam Liu. Hierarchical deep learning neural network (hidenn): an artificial intelligence (ai) framework for computational science and engineering. Computer Methods in Applied Mechanics and Engineering, 373:113452, 2021. + +[16] Yingjian Liu, Chanwook Park, Ye Lu, Satyajit Mojumder, Wing Kam Liu, and Dong Qian. Hidenn-fem: a seamless machine learning approach to nonlinear finite element analysis. Computational Mechanics, 72(1):173-194, 2023. +[17] Francisco Chinesta, Amine Ammar, Adrien Leygue, and Roland Keunings. An overview of the proper generalized decomposition with applications in computational rheology. Journal of Non-Newtonian Fluid Mechanics, 166(11):578-592, 2011. +[18] Francisco Chinesta, Roland Keunings, and Adrien Leygue. The proper generalized decomposition for advanced numerical simulations: a primer. Springer Science & Business Media, 2013. +[19] Anthony Nouy. A priori model reduction through proper generalized decomposition for solving time-dependent partial differential equations. Computer Methods in Applied Mechanics and Engineering, 199(23-24):1603-1626, 2010. +[20] Hengyang Li, Stefan Knapik, Yangfan Li, Chanwook Park, Jiachen Guo, Satyajit Mojumder, Ye Lu, Wei Chen, Daniel W Apley, and Wing Kam Liu. Convolution hierarchical deep-learning neural network tensor decomposition (c-hidenn-td) for high-resolution topology optimization. Computational Mechanics, 72(2):363-382, 2023. +[21] Ehsan Kharazmi, Zhongqiang Zhang, and George Em Karniadakis. hp-vpinns: Variational physics-informed neural networks with domain decomposition. Computer Methods in Applied Mechanics and Engineering, 374:113547, 2021. +[22] Thomas JR Hughes. The finite element method: linear static and dynamic finite element analysis. Courier Corporation, 2003. +[23] Tamara G Kolda and Brett W Bader. Tensor decompositions and applications. SIAM review, 51(3):455-500, 2009. +[24] Junuthula Narasimha Reddy. An introduction to the finite element method, volume 3. McGraw-Hill New York, 2005. +[25] Thomas JR Hughes and Gregory M Hulbert. Space-time finite element methods for elastodynamics: formulations and error estimates. Computer methods in applied mechanics and engineering, 66(3):339-363, 1988. +[26] Wing Kam Liu, Ted Belytschko, and A. Mani. Probabilistic finite elements for nonlinear structural dynamics. Computer Methods in Applied Mechanics and Engineering, 56(1):61-81, 1986. +[27] Wing Kam Liu, Ted Belytschko, and A. Mani. Random field finite elements. International Journal for Numerical Methods in Engineering, 23(3):1831-1845, 1986. +[28] Shuheng Liao, Ashkan Golgoon, Mojtaba Mozaffar, and Jian Cao. Efficientgpu-accelerated thermomechanical solver for residual stress prediction in additive manufacturing. Computational Mechanics, 71(5):879-893, 2023. +[29] Amine Ammar, Bechir Mokdad, Francisco Chinesta, and Roland Keunings. A new family of solvers for some classes of multidimensional partial differential equations encountered in kinetic theory modeling of complex fluids. Journal of non-Newtonian fluid Mechanics, 139(3): 153-176, 2006. +[30] Abderrahman Bouhamidi and Khalide Jbilou. A note on the numerical approximate solutions for generalized sylvester matrix equations with applications. Applied Mathematics and Computation, 206(2):687-694, 2008. +[31] Ya-Jun Xie and Chang-Feng Ma. The scaling conjugate gradient iterative method for two types of linear matrix equations. Computers & Mathematics with Applications, 70(5):1098-1113, 2015. +[32] Ulrich Langer and Marco Zank. Efficient direct space-time finite element solvers for parabolic initial-boundary value problems in anisotropic sobolev spaces. SIAM Journal on Scientific Computing, 43(4):A2714-A2736, 2021. +[33] A Sh Agazhanov, DA Samoshkin, and Yu M Kozlovskii. Thermophysical properties of inconel 718 alloy. In Journal of Physics: Conference Series, volume 1382, page 012175. IOP Publishing, 2019. +[34] Tianju Xue, Shuheng Liao, Zhengtao Gan, Chanwook Park, Xiaoyu Xie, Wing Kam Liu, and Jian Cao. Jax-fem: A differentiablegpu-accelerated 3d finite element solver for automatic inverse design and mechanistic data science. Computer Physics Communications, 291: 108802, 2023. +[35] Joseph P Leonor and Gregory J Wagner. Go-melt: GPU-optimized multilevel execution of lpbf thermal simulations. Computer Methods in Applied Mechanics and Engineering, 426:116977, 2024. +[36] Dana Jacobsen, Julien Thibault, and Inanc Senocak. An mpi-cuda implementation for massively parallel incompressible flow computations on multi-gpu clusters. In 48th AIAA Aerospace Sciences Meeting Including the New Horizons Forum and Aerospace Exposition, page 522, 2010. +[37] Thomas JR Hughes, Gonzalo R Feijóo, Luca Mazzei, and Jean-Baptiste Quincy. The variational multiscale method—a paradigm for computational mechanics. Computer methods in applied mechanics and engineering, 166(1-2):3-24, 1998. +[38] Xingshi Wang and Lucy T Zhang. Modified immersed finite element method for fully-coupled fluid-structure interactions. Computer methods in applied mechanics and engineering, 267:150–169, 2013. +[39] Wing Kam Liu, Yaling Liu, David Farrell, Lucy Zhang, X Sheldon Wang, Yoshio Fukui, Neelesh Patankar, Yongjie Zhang, Chandrajit Bajaj, Junghoon Lee, et al. Immersed finite element method and its applications to biological systems. Computer methods in applied mechanics and engineering, 195(13-16):1722-1749, 2006. +[40] Wing Kam Liu, Do Wan Kim, and Shaoqiang Tang. Mathematical foundations of the immersed finite element method. Computational Mechanics, 39:211-222, 2007. +[41] Adrian M Kopacz, Woon-Hong Yeo, Jae-Hyun Chung, and Wing Kam Liu. Nanoscale sensor analysis using the immersed molecular electrokinetic finite element method. Nanoscale, 4(16):5189-5194, 2012. +[42] Sai Karthikeya Vemuri, Tim Büchner, Julia Niebling, and Joachim Denzler. Functional tensor decompositions for physics-informed neural networks. In International Conference on Pattern Recognition, pages 32-46. Springer, 2025. +[43] Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljačić, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024. +[44] Jiachen Guo, Xiaoyu Xie, Chanwook Park, Hantao Zhang, Matthew Politis, Gino Domel, T.J.R Hughes, and Wing Kam Liu. Interpolation neural network-tensor decomposition (inn-td): a scalable and interpretable approach for large-scale physics-based problems. arXiv preprint arXiv:2503.02041, 2025. +[45] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in neural information processing systems, 33:7462-7473, 2020. +[46] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human + +language technologies, volume 1 (long and short papers), pages 4171-4186, 2019. +[47] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018. +[48] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25, 2012. +[49] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023. +[50] Yixin Liu, Kai Zhang, Yuan Li, Zhiling Yan, Chujie Gao, Ruoxi Chen, Zhengqing Yuan, Yue Huang, Hanchi Sun, Jianfeng Gao, et al. Sora: A review on background, technology, limitations, and opportunities of large vision models. arXiv preprint arXiv:2402.17177, 2024. +[51] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[52] Pierre Ladevèze. On reduced models in nonlinear solid mechanics. European Journal of Mechanics-A/Solids, 60:227-237, 2016. +[53] Richard J Williams, Catrin M Davies, and Paul A Hooper. A pragmatic part scale model for residual stress and distortion prediction in powder bed fusion. Additive Manufacturing, 22:416-425, 2018. \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13933/images/0075aeaf6cca885996621046020cbd1c77f74ed6eae9c69767afa526cd023f66.jpg b/data/2025/2503_13xxx/2503.13933/images/0075aeaf6cca885996621046020cbd1c77f74ed6eae9c69767afa526cd023f66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9adf42053f13acecc1fe4b19d57644c05892ae6f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/0075aeaf6cca885996621046020cbd1c77f74ed6eae9c69767afa526cd023f66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ddf6d6d05b787e2b5352f8a8b41fa6cdb4f00ff4b04cb0163923a47ba4d99d1 +size 15581 diff --git a/data/2025/2503_13xxx/2503.13933/images/043303db4e5c82962bde585e2e5978667fa6324d753cd893b2e7c4471a16091e.jpg b/data/2025/2503_13xxx/2503.13933/images/043303db4e5c82962bde585e2e5978667fa6324d753cd893b2e7c4471a16091e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70c9f2d35bf6f1f5f7255ebb0b700e1dbc5ce886 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/043303db4e5c82962bde585e2e5978667fa6324d753cd893b2e7c4471a16091e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64863e80cb05133b70ba09a7bda4154c13c11ccca1cc65b219ecc308f191658a +size 44053 diff --git a/data/2025/2503_13xxx/2503.13933/images/06d93feedd76d1074ae325710d3e3e673d487dd19e7ca7144df3fe686afb12f4.jpg b/data/2025/2503_13xxx/2503.13933/images/06d93feedd76d1074ae325710d3e3e673d487dd19e7ca7144df3fe686afb12f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2708e5016933bf2bfd1a4ee081e49e2d250517f1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/06d93feedd76d1074ae325710d3e3e673d487dd19e7ca7144df3fe686afb12f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:386ee234f8542e47b2a89d4ff8a7c693f0fdeab8ce3fadc8a97dec0fa0beaed5 +size 9677 diff --git a/data/2025/2503_13xxx/2503.13933/images/0c0195a00540cb3bf109305d5fb451bbe379f9f22a7f1b85b10592b6f1b26c16.jpg b/data/2025/2503_13xxx/2503.13933/images/0c0195a00540cb3bf109305d5fb451bbe379f9f22a7f1b85b10592b6f1b26c16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ad400a9d5d78c799a4d17970ac72e67dccbded2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/0c0195a00540cb3bf109305d5fb451bbe379f9f22a7f1b85b10592b6f1b26c16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd049f638f0cac3550e68cd2aa01714fd23a2c76c17a36a937de8b1582844d67 +size 8299 diff --git a/data/2025/2503_13xxx/2503.13933/images/0e82a5c6d9e2756db10568be6ffab3861b55523fb118976e8de8b681c9bca3cf.jpg b/data/2025/2503_13xxx/2503.13933/images/0e82a5c6d9e2756db10568be6ffab3861b55523fb118976e8de8b681c9bca3cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b5ec2ad9f5dbb60239de2218ee1cefeabebb660 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/0e82a5c6d9e2756db10568be6ffab3861b55523fb118976e8de8b681c9bca3cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eaf8415fb697691d9ee1f241acaed2649cb08f141698040c0ad67c02af764954 +size 4427 diff --git a/data/2025/2503_13xxx/2503.13933/images/0f42907a3d6c93d278eb81dc581820a0260002f7e28abedb7a27b81e2d7be42a.jpg b/data/2025/2503_13xxx/2503.13933/images/0f42907a3d6c93d278eb81dc581820a0260002f7e28abedb7a27b81e2d7be42a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa94870bdf927817cb3764c9c72a5a3d681d8476 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/0f42907a3d6c93d278eb81dc581820a0260002f7e28abedb7a27b81e2d7be42a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e12bb79daf4846044a760109ae7d0bfe51ebdaac14113692f4bb80263a14f57 +size 22133 diff --git a/data/2025/2503_13xxx/2503.13933/images/121736df2595f3224598b727c8507c7b14a0a69c6722ac181695a58825c7f8e5.jpg b/data/2025/2503_13xxx/2503.13933/images/121736df2595f3224598b727c8507c7b14a0a69c6722ac181695a58825c7f8e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..566849b69dc84083ff82e9acf08f4314cbd0c361 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/121736df2595f3224598b727c8507c7b14a0a69c6722ac181695a58825c7f8e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d34f920acf8a7ba942905ec5828d3f0c0d5475e8c62e2aab24068a495c5c3642 +size 5010 diff --git a/data/2025/2503_13xxx/2503.13933/images/12677f4cd4093cc99589a613f6ce0b437602b37dec61882d8dff98d389a1e840.jpg b/data/2025/2503_13xxx/2503.13933/images/12677f4cd4093cc99589a613f6ce0b437602b37dec61882d8dff98d389a1e840.jpg new file mode 100644 index 0000000000000000000000000000000000000000..620c0867a5c0ad0cbe286fad769342740057c36a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/12677f4cd4093cc99589a613f6ce0b437602b37dec61882d8dff98d389a1e840.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da08b9ae32d078466b241805f4f640b37136e75297bf6d3a3d931878cf0fe086 +size 9913 diff --git a/data/2025/2503_13xxx/2503.13933/images/13672870ba3d3e65ac32f7b556569b2691beeea4c17ceda38811f12e48a4d9bc.jpg b/data/2025/2503_13xxx/2503.13933/images/13672870ba3d3e65ac32f7b556569b2691beeea4c17ceda38811f12e48a4d9bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccea3eaac616ac04cbefee5800043b9a9d1b551d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/13672870ba3d3e65ac32f7b556569b2691beeea4c17ceda38811f12e48a4d9bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f76081d82950a17fbfa9554ee0ffa91757b481f52adec7b2fffcb3b4e9ccdc9f +size 22532 diff --git a/data/2025/2503_13xxx/2503.13933/images/1631e568cf76f755a995c303cd8aadd671a1fe046ecd4536bcbb2227090f0a43.jpg b/data/2025/2503_13xxx/2503.13933/images/1631e568cf76f755a995c303cd8aadd671a1fe046ecd4536bcbb2227090f0a43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0fd91a517ce894315062873bf81c65d522325075 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/1631e568cf76f755a995c303cd8aadd671a1fe046ecd4536bcbb2227090f0a43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbd5cffc7823235402a912757b467a99e61a27539f988d58166d8533f69c1cd6 +size 15656 diff --git a/data/2025/2503_13xxx/2503.13933/images/18ab3d2686f8cae986e5a3e3ca82f2a21d5836b3069fa44a2d1c92029694b446.jpg b/data/2025/2503_13xxx/2503.13933/images/18ab3d2686f8cae986e5a3e3ca82f2a21d5836b3069fa44a2d1c92029694b446.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0163b0f7ea0c0b59f848a550348b99a853ceabeb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/18ab3d2686f8cae986e5a3e3ca82f2a21d5836b3069fa44a2d1c92029694b446.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:979a5ab050be6fe0fd4e6a734c7721a5f2e6bb9adc21ac128e5671ec476faaa1 +size 23799 diff --git a/data/2025/2503_13xxx/2503.13933/images/1d955bb1a1f7889ad13801e8d5887d83d6b136b048d6cff4c4dd8a26eb41a37a.jpg b/data/2025/2503_13xxx/2503.13933/images/1d955bb1a1f7889ad13801e8d5887d83d6b136b048d6cff4c4dd8a26eb41a37a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6cf62f9f0f418aaaf09618d8b084efec5eb9b7ad --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/1d955bb1a1f7889ad13801e8d5887d83d6b136b048d6cff4c4dd8a26eb41a37a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c231ad50651f1dab1c29ba0e8b169b5d77bfd76f101eac0788b038743a73109 +size 4963 diff --git a/data/2025/2503_13xxx/2503.13933/images/1e0837c8a03c5dd7bb6ad48fde26cc3fea68510e35b307ac270e07be256a353a.jpg b/data/2025/2503_13xxx/2503.13933/images/1e0837c8a03c5dd7bb6ad48fde26cc3fea68510e35b307ac270e07be256a353a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..156f5b924e71d4b8f821e92121e28acb523b865a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/1e0837c8a03c5dd7bb6ad48fde26cc3fea68510e35b307ac270e07be256a353a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9815d7416de3dd036aca0a9cb76dfd5e03fe6447cdc841993426cb54ee2cd746 +size 22239 diff --git a/data/2025/2503_13xxx/2503.13933/images/1ff792e01f6a4782e6a7a960c755bc1261ea125e23cd82a65e43e7b723d1f98f.jpg b/data/2025/2503_13xxx/2503.13933/images/1ff792e01f6a4782e6a7a960c755bc1261ea125e23cd82a65e43e7b723d1f98f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dee5242652f225e83e9e9a6542944dc6fe11ee2d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/1ff792e01f6a4782e6a7a960c755bc1261ea125e23cd82a65e43e7b723d1f98f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2b06231137fb13f12581aad2ee59d32570e8be34cc89735f813086d066945a1 +size 3642 diff --git a/data/2025/2503_13xxx/2503.13933/images/1ffca6770318f49f4d362fee97c27f8a0a54ef98fbc6d78dd5736e0864301f88.jpg b/data/2025/2503_13xxx/2503.13933/images/1ffca6770318f49f4d362fee97c27f8a0a54ef98fbc6d78dd5736e0864301f88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0faf2c50d3af51573cf2bef586cc7990402026e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/1ffca6770318f49f4d362fee97c27f8a0a54ef98fbc6d78dd5736e0864301f88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dc8edcffb51918478a561ea4f38dedd74bf819184990a1dd5e6fa92c5936b10 +size 19086 diff --git a/data/2025/2503_13xxx/2503.13933/images/200be0b0946761ab17f9c2c59f91eed1c6bccd651b97031cd574a612f7e34b9a.jpg b/data/2025/2503_13xxx/2503.13933/images/200be0b0946761ab17f9c2c59f91eed1c6bccd651b97031cd574a612f7e34b9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1e7bb243cc2cc2f791033a9f5586417a30c521a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/200be0b0946761ab17f9c2c59f91eed1c6bccd651b97031cd574a612f7e34b9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f4c0ff6b5e58d2c8fab5f600829964930396cd3a5d4e7cb273a39f7ee746862 +size 18269 diff --git a/data/2025/2503_13xxx/2503.13933/images/200e944ecb9a12948443abdf482c6009a2431b4b8b208dfd2e997da50f353ad7.jpg b/data/2025/2503_13xxx/2503.13933/images/200e944ecb9a12948443abdf482c6009a2431b4b8b208dfd2e997da50f353ad7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..046cd2416b4ac87c7adc7ae396eecc498eb732ef --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/200e944ecb9a12948443abdf482c6009a2431b4b8b208dfd2e997da50f353ad7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0165680cdd2abf0d12aa362db967ba8255cd266907ef1d522b4fd22a538dd17 +size 10549 diff --git a/data/2025/2503_13xxx/2503.13933/images/2070e61fd49ddc5fc7f11d25203116c3b06771ac17d03dde98df5bdaae2bcd76.jpg b/data/2025/2503_13xxx/2503.13933/images/2070e61fd49ddc5fc7f11d25203116c3b06771ac17d03dde98df5bdaae2bcd76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2d74a5dd73be9ff7c7a34e5889ac4e1cbc5e260 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2070e61fd49ddc5fc7f11d25203116c3b06771ac17d03dde98df5bdaae2bcd76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff180de8c4abdb08fa53cd0a26dc32ae1e554c166847b49dcfca2d3efb920811 +size 3975 diff --git a/data/2025/2503_13xxx/2503.13933/images/216c6378c7e02132273fd396d64406c154b8a9b745b589affd537bb35c61797d.jpg b/data/2025/2503_13xxx/2503.13933/images/216c6378c7e02132273fd396d64406c154b8a9b745b589affd537bb35c61797d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0629b4d9000bd99dd38bc26161e6f5f7fb3aaca3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/216c6378c7e02132273fd396d64406c154b8a9b745b589affd537bb35c61797d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f5cfb13bf61d524bef1dcba2a4a8ce76f3fc5c7d78c3de017130b440e8878d6 +size 4770 diff --git a/data/2025/2503_13xxx/2503.13933/images/2211d1927532087afa8c9ab6bf9f0e46ebf0f9fcc68e95f91179e9e07f54486f.jpg b/data/2025/2503_13xxx/2503.13933/images/2211d1927532087afa8c9ab6bf9f0e46ebf0f9fcc68e95f91179e9e07f54486f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14171e1a3294b61b133d8d968bc0506ef8c8ea8f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2211d1927532087afa8c9ab6bf9f0e46ebf0f9fcc68e95f91179e9e07f54486f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79ffdb4b44b4312c22e3758c83785328954f4159862ff04bfdacb2ce8d6d6fac +size 7733 diff --git a/data/2025/2503_13xxx/2503.13933/images/2345a49f326f69ec67725fc081645d1b37d8869821b2514f92f47f0a0b9cc7e8.jpg b/data/2025/2503_13xxx/2503.13933/images/2345a49f326f69ec67725fc081645d1b37d8869821b2514f92f47f0a0b9cc7e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..873603df9f640b914936c7bff7ee2a9226f220fe --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2345a49f326f69ec67725fc081645d1b37d8869821b2514f92f47f0a0b9cc7e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98a5096c3768b4bb6694f0ffb72565bc30aca8cdab55d647752ee41154f16ce4 +size 30600 diff --git a/data/2025/2503_13xxx/2503.13933/images/2356fbd6103d0f1224ab953d79b4dd1accc5dfa6c206f9f994a88a02796022c1.jpg b/data/2025/2503_13xxx/2503.13933/images/2356fbd6103d0f1224ab953d79b4dd1accc5dfa6c206f9f994a88a02796022c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c38b79f61277c530e450ada755988a89332b0205 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2356fbd6103d0f1224ab953d79b4dd1accc5dfa6c206f9f994a88a02796022c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bb29a560fabea1af927d6669e535635ccd7369c5695a1f5f95cb3733a63d147 +size 28707 diff --git a/data/2025/2503_13xxx/2503.13933/images/26d6d373a42682554486e271777c219d82e17016041bbe53dc8f96b1cfed8012.jpg b/data/2025/2503_13xxx/2503.13933/images/26d6d373a42682554486e271777c219d82e17016041bbe53dc8f96b1cfed8012.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f96e4fc7cc034d7060d470e973c5a93a0ca6b571 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/26d6d373a42682554486e271777c219d82e17016041bbe53dc8f96b1cfed8012.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cd144fbc12b3d52ad33c6a069afae4a066674c84ef435f60741b2a887aa89b9 +size 19517 diff --git a/data/2025/2503_13xxx/2503.13933/images/27c9b9d37a59318116c0189817ddcf4d426935b9529d8ffcf8d94821ff8726ba.jpg b/data/2025/2503_13xxx/2503.13933/images/27c9b9d37a59318116c0189817ddcf4d426935b9529d8ffcf8d94821ff8726ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9c44449a2ec4054fc3c86adec4005c9070f1ff8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/27c9b9d37a59318116c0189817ddcf4d426935b9529d8ffcf8d94821ff8726ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad1501b15dfecc6fe6f01253872692ad40e4cdbd98d5a3b979792fa0f616f934 +size 7121 diff --git a/data/2025/2503_13xxx/2503.13933/images/27d2162fec1e0e4eba57ca6fdf9b4a421edbc273be308e22965966bcd12bf083.jpg b/data/2025/2503_13xxx/2503.13933/images/27d2162fec1e0e4eba57ca6fdf9b4a421edbc273be308e22965966bcd12bf083.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0598f8e950c1c5b3a57a5ab4d53a239dd582ba09 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/27d2162fec1e0e4eba57ca6fdf9b4a421edbc273be308e22965966bcd12bf083.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47bfe6f3f296e6dc8ee8ba63efdb0347a11342e40bd7e1bb16a057b02d8fd96a +size 34120 diff --git a/data/2025/2503_13xxx/2503.13933/images/2850ea393a41fa50b8726a0787ed58a81109d8f74bcacd2cf3ae869fd55d92b6.jpg b/data/2025/2503_13xxx/2503.13933/images/2850ea393a41fa50b8726a0787ed58a81109d8f74bcacd2cf3ae869fd55d92b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3fa554e2f13ab34002fa826bee9390a9a97f60f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2850ea393a41fa50b8726a0787ed58a81109d8f74bcacd2cf3ae869fd55d92b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deea328ccce3b5291012520f9366fd8dafb2bfd00f9b9f4aef7e774a3d7786da +size 4332 diff --git a/data/2025/2503_13xxx/2503.13933/images/2887806201dfdc52940b4f29cfa9a76d0349e2e81f67aec52ce92d0e88fd9b3f.jpg b/data/2025/2503_13xxx/2503.13933/images/2887806201dfdc52940b4f29cfa9a76d0349e2e81f67aec52ce92d0e88fd9b3f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..871699d9093329b4729421e2a2dbca588ae94f6f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2887806201dfdc52940b4f29cfa9a76d0349e2e81f67aec52ce92d0e88fd9b3f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c33fb287e9a715c053e0c0a570381a1135f9f4640a4c8fb5a59f2fa1a483f0b +size 2183 diff --git a/data/2025/2503_13xxx/2503.13933/images/295ddd836e79bc713015f9c58a7d2c6ffb9cb7a0b208645ae1e873293965bec6.jpg b/data/2025/2503_13xxx/2503.13933/images/295ddd836e79bc713015f9c58a7d2c6ffb9cb7a0b208645ae1e873293965bec6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d9d139fd680f2f753ec945bd5d5d46fc0eb4887 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/295ddd836e79bc713015f9c58a7d2c6ffb9cb7a0b208645ae1e873293965bec6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cc1e7ca6979754e119209218e0abe2c1aef835c918183914e0634d8756f259c +size 2675 diff --git a/data/2025/2503_13xxx/2503.13933/images/2b76570ff52e322aa151ab2d01b26bd55700698f7f22581a405d4ac31235ad8a.jpg b/data/2025/2503_13xxx/2503.13933/images/2b76570ff52e322aa151ab2d01b26bd55700698f7f22581a405d4ac31235ad8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7856f04046f62b18b9fe5d9983497e14d8208e8b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2b76570ff52e322aa151ab2d01b26bd55700698f7f22581a405d4ac31235ad8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f493efadafbbb9b262d6bbde7491c599aeb78a41ef5b56777182eb04a1204d36 +size 7204 diff --git a/data/2025/2503_13xxx/2503.13933/images/2bafb2a9d73b20b8974d709a877de33ff2d86e93482c01b687869081af82ca9e.jpg b/data/2025/2503_13xxx/2503.13933/images/2bafb2a9d73b20b8974d709a877de33ff2d86e93482c01b687869081af82ca9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a525803a03257f29b5c4bb04f269c9f0296b53c3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2bafb2a9d73b20b8974d709a877de33ff2d86e93482c01b687869081af82ca9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b832fb8217568467892fc5ecf6d332b7e6b5481357094f7797e1bf3683ae0280 +size 54232 diff --git a/data/2025/2503_13xxx/2503.13933/images/2f518732887fd82f432149602affcd4d525bcadbceb4b370fede13fc298c6cf3.jpg b/data/2025/2503_13xxx/2503.13933/images/2f518732887fd82f432149602affcd4d525bcadbceb4b370fede13fc298c6cf3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f67efec7a79005212ac41a135940cebe0ecf993 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/2f518732887fd82f432149602affcd4d525bcadbceb4b370fede13fc298c6cf3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d70312719f39423f1afe3c1d33fc89b07732aee19b9b96cfdc909a36236111bd +size 2343 diff --git a/data/2025/2503_13xxx/2503.13933/images/3b85eb658518d72411e3ec79246a5857219aa58d19ca589a83cb951c71802013.jpg b/data/2025/2503_13xxx/2503.13933/images/3b85eb658518d72411e3ec79246a5857219aa58d19ca589a83cb951c71802013.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02fe8a76ceec10ce0bdff1fcce80a98152a30acd --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/3b85eb658518d72411e3ec79246a5857219aa58d19ca589a83cb951c71802013.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7de93032c3848ea10069c0232beb44fc81ce0aec5c5997f7898dd485fae977b +size 27530 diff --git a/data/2025/2503_13xxx/2503.13933/images/3c2e43dbebb9daba3688af22ec37ecb4d7500b7352d31753f4e28373423f66c8.jpg b/data/2025/2503_13xxx/2503.13933/images/3c2e43dbebb9daba3688af22ec37ecb4d7500b7352d31753f4e28373423f66c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..946ce1d2f886f51bd067e2d8075e0e5ae1224156 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/3c2e43dbebb9daba3688af22ec37ecb4d7500b7352d31753f4e28373423f66c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4410824524000d12c61ca500591da7aacd906478d9bec7d759b722bb6ae08477 +size 5004 diff --git a/data/2025/2503_13xxx/2503.13933/images/3d5930d344ad41e50bbdf9aa4fa400ae67373da8806b2d086f22076597db5e79.jpg b/data/2025/2503_13xxx/2503.13933/images/3d5930d344ad41e50bbdf9aa4fa400ae67373da8806b2d086f22076597db5e79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3eeabe9c051fb1b7994d1833025014b37d8809a2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/3d5930d344ad41e50bbdf9aa4fa400ae67373da8806b2d086f22076597db5e79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5335014974b873d97bfba77134023a45616694b2282a7c36bc54116f6b67ec08 +size 27220 diff --git a/data/2025/2503_13xxx/2503.13933/images/3eaf36b2f65add23c007434e828372dc2c739c3a4cae5f057eede6b298198cc2.jpg b/data/2025/2503_13xxx/2503.13933/images/3eaf36b2f65add23c007434e828372dc2c739c3a4cae5f057eede6b298198cc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d303d3c2ec3f99a8ad69c22388528ea9c6a4de4f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/3eaf36b2f65add23c007434e828372dc2c739c3a4cae5f057eede6b298198cc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4546a78ff8e13fc794a8929bafa051deada10ef60c80b981fab9f2eb785cde0f +size 13109 diff --git a/data/2025/2503_13xxx/2503.13933/images/436e24e7870b3692253c8bb03616ef37d9e6600fc07fc58cdc25ddba38c6aade.jpg b/data/2025/2503_13xxx/2503.13933/images/436e24e7870b3692253c8bb03616ef37d9e6600fc07fc58cdc25ddba38c6aade.jpg new file mode 100644 index 0000000000000000000000000000000000000000..793e60fba154977b7fd4ab5608a77b5d87445450 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/436e24e7870b3692253c8bb03616ef37d9e6600fc07fc58cdc25ddba38c6aade.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a3f74176e28db8e740766bfc21a5b15a6beae84ef79e8d55e2ad1b3b4e0d3dd +size 104488 diff --git a/data/2025/2503_13xxx/2503.13933/images/43c0b7c65e4bba26c746c9a922247c0398fa273e4791579a9699590c69d1bac1.jpg b/data/2025/2503_13xxx/2503.13933/images/43c0b7c65e4bba26c746c9a922247c0398fa273e4791579a9699590c69d1bac1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc85a9204fcb0fb1dbb4fff788e368ef04f892eb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/43c0b7c65e4bba26c746c9a922247c0398fa273e4791579a9699590c69d1bac1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edef8d6d429def21eb8bb1210486b366d7be1cbbacba3674c54506752ae0c800 +size 6872 diff --git a/data/2025/2503_13xxx/2503.13933/images/443ce4eba0f6dc0c99596fa2efb800af5593d2e111fec3b3a4777d6acdf943e3.jpg b/data/2025/2503_13xxx/2503.13933/images/443ce4eba0f6dc0c99596fa2efb800af5593d2e111fec3b3a4777d6acdf943e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4464223e081b38c3f620a09886243dc41a2690f8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/443ce4eba0f6dc0c99596fa2efb800af5593d2e111fec3b3a4777d6acdf943e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3cf02e57ac171ee9cb61502bf65357097deac26b311349eff6816681ebb5eb1 +size 3008 diff --git a/data/2025/2503_13xxx/2503.13933/images/455e449627117c21f40ecb4a6ab5bd0b4fee5a46f3b2c0e54b88fc6f4f3fa290.jpg b/data/2025/2503_13xxx/2503.13933/images/455e449627117c21f40ecb4a6ab5bd0b4fee5a46f3b2c0e54b88fc6f4f3fa290.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88ae3e9f7d1987a926dc28174366f7b208c2b47e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/455e449627117c21f40ecb4a6ab5bd0b4fee5a46f3b2c0e54b88fc6f4f3fa290.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17f436f2720530f920451ed7108e7f0f3899157d5846d760b56c299068587a94 +size 3303 diff --git a/data/2025/2503_13xxx/2503.13933/images/4780699941f48ed622c9dc594a6387e86809e70653b7ccda7100991196fd833e.jpg b/data/2025/2503_13xxx/2503.13933/images/4780699941f48ed622c9dc594a6387e86809e70653b7ccda7100991196fd833e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69b04c3e05cf236444e7bf079a96af81f45c7678 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/4780699941f48ed622c9dc594a6387e86809e70653b7ccda7100991196fd833e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef6e8930a0e1ae860183dec07991f99c1a5ba750ea6d7061bc61c6ac33150745 +size 2669 diff --git a/data/2025/2503_13xxx/2503.13933/images/49cfe79290b02e9e6803436e489e02a9a13c2c0eb602d965d0b2a8f2d1022319.jpg b/data/2025/2503_13xxx/2503.13933/images/49cfe79290b02e9e6803436e489e02a9a13c2c0eb602d965d0b2a8f2d1022319.jpg new file mode 100644 index 0000000000000000000000000000000000000000..714799e14c2e12bdf994692c8128bbfa5ee9abb8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/49cfe79290b02e9e6803436e489e02a9a13c2c0eb602d965d0b2a8f2d1022319.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a9b3abb2033fc5977a2e7e2920c81bf6f854f457fb79013688e0d69cedc19ba +size 6008 diff --git a/data/2025/2503_13xxx/2503.13933/images/4f647f9527a5dd106e3c64c9d9a5f2ac9e311427cc75722f1e8cd157bd35a51d.jpg b/data/2025/2503_13xxx/2503.13933/images/4f647f9527a5dd106e3c64c9d9a5f2ac9e311427cc75722f1e8cd157bd35a51d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfb9165d59fdd554f9ce589cb3d1aecdf472c57f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/4f647f9527a5dd106e3c64c9d9a5f2ac9e311427cc75722f1e8cd157bd35a51d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dc9a1afb43bdddac75a2f1b67acc4c8b5f162df1a79f90f442150d7e8d89dbc +size 19591 diff --git a/data/2025/2503_13xxx/2503.13933/images/510d3c85b34ccf42b968db87b2dc61e5a968979d6d9ee0f5bb108c984c52059b.jpg b/data/2025/2503_13xxx/2503.13933/images/510d3c85b34ccf42b968db87b2dc61e5a968979d6d9ee0f5bb108c984c52059b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47a388fa4b6326620b763b56f06710f8798da35e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/510d3c85b34ccf42b968db87b2dc61e5a968979d6d9ee0f5bb108c984c52059b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeb466c4b75a9ab65c88e8139b2d46610fdc9284db00ca9d9e3249a3e175400e +size 8159 diff --git a/data/2025/2503_13xxx/2503.13933/images/51c1fa786235050f5172fb12b02d8471bc9e59b9720701806c1efb90ea6ce7f8.jpg b/data/2025/2503_13xxx/2503.13933/images/51c1fa786235050f5172fb12b02d8471bc9e59b9720701806c1efb90ea6ce7f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7dcf440858bd707a962a8bb97af4fec7b27f0ff6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/51c1fa786235050f5172fb12b02d8471bc9e59b9720701806c1efb90ea6ce7f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc16a6fec75e3f041deb48cc0dc61e5148285b46745c83c60cbb3f7755938813 +size 2012 diff --git a/data/2025/2503_13xxx/2503.13933/images/555f4436ae49bf816ef7e5a627c0b85a2e2e913055dd1ca634be549a8ad7c3e9.jpg b/data/2025/2503_13xxx/2503.13933/images/555f4436ae49bf816ef7e5a627c0b85a2e2e913055dd1ca634be549a8ad7c3e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8009a08bb9f345d98f38c8f585a9463432c2533b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/555f4436ae49bf816ef7e5a627c0b85a2e2e913055dd1ca634be549a8ad7c3e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94a774ae81c0930dd87a86eaa936bd52f40a2744b4354154f3c815fa751a6abe +size 6061 diff --git a/data/2025/2503_13xxx/2503.13933/images/57bcb0ff36070307aebbf1ca331a3212b7f214e6efa75ee050ee9d20916b0329.jpg b/data/2025/2503_13xxx/2503.13933/images/57bcb0ff36070307aebbf1ca331a3212b7f214e6efa75ee050ee9d20916b0329.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6669308d0ccbdf0cfbcb90b8eb53836a08b4063d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/57bcb0ff36070307aebbf1ca331a3212b7f214e6efa75ee050ee9d20916b0329.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72ba3762be567dcb21367c3b793b26188afee0430bb7aaadab232f8e2d69ad05 +size 18416 diff --git a/data/2025/2503_13xxx/2503.13933/images/58230b63f91aab316085467bba07586972fef6aba1811c02d700384871cb156e.jpg b/data/2025/2503_13xxx/2503.13933/images/58230b63f91aab316085467bba07586972fef6aba1811c02d700384871cb156e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..188695c9e29bb24c1aa24b5c1c3f789f30dd7afb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/58230b63f91aab316085467bba07586972fef6aba1811c02d700384871cb156e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29da833e91e47d1b534a0048949306acbf49b7b6c99f0bdb563a96ece5c6bbdd +size 4688 diff --git a/data/2025/2503_13xxx/2503.13933/images/5ac20a39f47685ebb9378a0c4e12e417a603d78fe820a3ac37f08ecdb86a0b5e.jpg b/data/2025/2503_13xxx/2503.13933/images/5ac20a39f47685ebb9378a0c4e12e417a603d78fe820a3ac37f08ecdb86a0b5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aaee2fd91c6b693c920cd551a5b4413dfc6e5b91 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/5ac20a39f47685ebb9378a0c4e12e417a603d78fe820a3ac37f08ecdb86a0b5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c152d5b5616856b8c046e5d7c9fb3842d6e5dac942e728cd5ee6b9551c620cfa +size 54167 diff --git a/data/2025/2503_13xxx/2503.13933/images/5dd1f815b0f0d6ce05c783498743769c58a8c3fe64da410116c95bde0cf3ea5e.jpg b/data/2025/2503_13xxx/2503.13933/images/5dd1f815b0f0d6ce05c783498743769c58a8c3fe64da410116c95bde0cf3ea5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ca3b728a307d77d31d18c9a300e1391d01eb293 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/5dd1f815b0f0d6ce05c783498743769c58a8c3fe64da410116c95bde0cf3ea5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bbc4dc87d7c64f7d5bee7e47b98bf14efa74ddfe3da05111748cde86561ee01 +size 3479 diff --git a/data/2025/2503_13xxx/2503.13933/images/5e704b162ebdd448550d9186205e95bdb2ff6e27cf2d3fe5c63a122256540197.jpg b/data/2025/2503_13xxx/2503.13933/images/5e704b162ebdd448550d9186205e95bdb2ff6e27cf2d3fe5c63a122256540197.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f4df123965c7d324ccf78a8e34d729864be2e17 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/5e704b162ebdd448550d9186205e95bdb2ff6e27cf2d3fe5c63a122256540197.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d35b14938de00599b1872f02e187106c4150226e7ab2ca721a2b2b928380a80a +size 22085 diff --git a/data/2025/2503_13xxx/2503.13933/images/601b382df5b516b1ee0757a77850a3c853d0c52e226159856b25d1d8853924a8.jpg b/data/2025/2503_13xxx/2503.13933/images/601b382df5b516b1ee0757a77850a3c853d0c52e226159856b25d1d8853924a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c00fae152dacbc6310c5a29ff5f3d425022b089e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/601b382df5b516b1ee0757a77850a3c853d0c52e226159856b25d1d8853924a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6859b38c1b7fb02fcb92feefc893630f7ffaf1aa92be83b51766f33a9fc3db2e +size 11632 diff --git a/data/2025/2503_13xxx/2503.13933/images/6228caa736b34f1a766b1c32d5292f854237e8521a4f50f7ab19126515009330.jpg b/data/2025/2503_13xxx/2503.13933/images/6228caa736b34f1a766b1c32d5292f854237e8521a4f50f7ab19126515009330.jpg new file mode 100644 index 0000000000000000000000000000000000000000..206846a0e4beee57817378f16c7aa9e576eee0e8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/6228caa736b34f1a766b1c32d5292f854237e8521a4f50f7ab19126515009330.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a52354bcabb581db052fd40e9fb59f743fa820ccfe73d3e6d63d21185ccf3253 +size 5716 diff --git a/data/2025/2503_13xxx/2503.13933/images/64d1ac02f489cfb26fbbdb049f3cb75bdc193bcd709aec42ebcd8aff2a91a9b3.jpg b/data/2025/2503_13xxx/2503.13933/images/64d1ac02f489cfb26fbbdb049f3cb75bdc193bcd709aec42ebcd8aff2a91a9b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e622861d52424e8856905aff6e487f31063c9c96 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/64d1ac02f489cfb26fbbdb049f3cb75bdc193bcd709aec42ebcd8aff2a91a9b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9cc6ba9bd3e77fb2104f4e2dd0a74e279d5720c0f7a8f505540439b2fe90b4c +size 9581 diff --git a/data/2025/2503_13xxx/2503.13933/images/65318179eddcce82c105fefcf6d4e4fb81f40bde17ec3c84afebf28ebb9869b3.jpg b/data/2025/2503_13xxx/2503.13933/images/65318179eddcce82c105fefcf6d4e4fb81f40bde17ec3c84afebf28ebb9869b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54fb61bd4a889af4dbf8e91bee0e5e8cfc7ce992 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/65318179eddcce82c105fefcf6d4e4fb81f40bde17ec3c84afebf28ebb9869b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c98474887ffc23b984f5be48e728a814248ecaabacb81954d67b0e15acb59839 +size 40143 diff --git a/data/2025/2503_13xxx/2503.13933/images/65f7d6cba907050260e3f59100990b220e18b3e661510dbe74314d86ce119fce.jpg b/data/2025/2503_13xxx/2503.13933/images/65f7d6cba907050260e3f59100990b220e18b3e661510dbe74314d86ce119fce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9f5453580ee25e87f8bf32de8996b00ea447ebc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/65f7d6cba907050260e3f59100990b220e18b3e661510dbe74314d86ce119fce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c390dd7057bab90386d1f04e209e3de826510768929740c33dfb2dbb26297619 +size 25743 diff --git a/data/2025/2503_13xxx/2503.13933/images/6ac465c63dfc67960586a79eb1aa82ad47e1316f3b86609bc249f380c60fe7ee.jpg b/data/2025/2503_13xxx/2503.13933/images/6ac465c63dfc67960586a79eb1aa82ad47e1316f3b86609bc249f380c60fe7ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..322c49a3c62bcdddb724093ebf84d2bdae7693b6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/6ac465c63dfc67960586a79eb1aa82ad47e1316f3b86609bc249f380c60fe7ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:497f69ca796e6dd08e1a4ce1a6009072a48233f1ec573ac656c97b9b3ab8bd42 +size 8719 diff --git a/data/2025/2503_13xxx/2503.13933/images/6c2d52bb0f68ce1b2b23d130b8706969e5dd55fc54afb40e93be9e7a63dfc8a8.jpg b/data/2025/2503_13xxx/2503.13933/images/6c2d52bb0f68ce1b2b23d130b8706969e5dd55fc54afb40e93be9e7a63dfc8a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..161f605881e10f3638031aad689cc622099bf37a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/6c2d52bb0f68ce1b2b23d130b8706969e5dd55fc54afb40e93be9e7a63dfc8a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:006584ba80c91bc1a79d5c30e9afda590d8d5e57f4c0a494ecab2e1343ad607d +size 19617 diff --git a/data/2025/2503_13xxx/2503.13933/images/6dccaac9c3bd5c80948b57cdbca225315a06fe5add804187394c7973ca6c6a07.jpg b/data/2025/2503_13xxx/2503.13933/images/6dccaac9c3bd5c80948b57cdbca225315a06fe5add804187394c7973ca6c6a07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8edf74a1d819e894f856764e301b791d1fa232a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/6dccaac9c3bd5c80948b57cdbca225315a06fe5add804187394c7973ca6c6a07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db1fcc7a608ee90fb44c78beb19eca8e7902e6a3a499a0061399375a22ee7be0 +size 9891 diff --git a/data/2025/2503_13xxx/2503.13933/images/716c83297ce005647d99e5f23cc46e6e3024853b336292259b2f47b9f8a009d8.jpg b/data/2025/2503_13xxx/2503.13933/images/716c83297ce005647d99e5f23cc46e6e3024853b336292259b2f47b9f8a009d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..549d8815ba412a2716c4a6c42644bb9f67055689 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/716c83297ce005647d99e5f23cc46e6e3024853b336292259b2f47b9f8a009d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:220f024ecf83850345db22de3f7343245fe214f7b0eab3ee182e14052be21ce5 +size 13199 diff --git a/data/2025/2503_13xxx/2503.13933/images/71b70cc86e81f29f17717ebc5e990a233cea4fd792c202ed7d02aa919c3be394.jpg b/data/2025/2503_13xxx/2503.13933/images/71b70cc86e81f29f17717ebc5e990a233cea4fd792c202ed7d02aa919c3be394.jpg new file mode 100644 index 0000000000000000000000000000000000000000..597ddd45b94b02b436b98d825e41642b0eacb79a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/71b70cc86e81f29f17717ebc5e990a233cea4fd792c202ed7d02aa919c3be394.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63d9d8ae55475ef0ccd89706449a9f79eb66291d11fb2db4ff038a6a66f99132 +size 17301 diff --git a/data/2025/2503_13xxx/2503.13933/images/76a66a1580835227f9b852894f9bfae35d9172a52506596461a341847024aa44.jpg b/data/2025/2503_13xxx/2503.13933/images/76a66a1580835227f9b852894f9bfae35d9172a52506596461a341847024aa44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e28677117c99c0a4e401d19c355123134ef0f626 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/76a66a1580835227f9b852894f9bfae35d9172a52506596461a341847024aa44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81b7bb560a9f19841bc6065d90bcad84a2a0ec3aa93c73beed63ec8fa8632ea3 +size 4846 diff --git a/data/2025/2503_13xxx/2503.13933/images/788d964946bd243c8ce37085ed0bd577906f91cee22b4fe5ceb8bc3ee147c7e1.jpg b/data/2025/2503_13xxx/2503.13933/images/788d964946bd243c8ce37085ed0bd577906f91cee22b4fe5ceb8bc3ee147c7e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36737a805bdd53828c40958903945c779ca3035b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/788d964946bd243c8ce37085ed0bd577906f91cee22b4fe5ceb8bc3ee147c7e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:638e3dbaf29078d3566afc4417b6a44c46f68cd5038845080fb18df92297458c +size 40282 diff --git a/data/2025/2503_13xxx/2503.13933/images/7941905e0df1d4b2af4263592fe2ecf1aa7ed89fb9abe6f6c67bb8b0be136701.jpg b/data/2025/2503_13xxx/2503.13933/images/7941905e0df1d4b2af4263592fe2ecf1aa7ed89fb9abe6f6c67bb8b0be136701.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28ff717cf7446777564eac5949b8e22cea299ca9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/7941905e0df1d4b2af4263592fe2ecf1aa7ed89fb9abe6f6c67bb8b0be136701.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32c7c92f117866f24b16233951547da7b96878e142a63f77c179bf3a06546308 +size 6659 diff --git a/data/2025/2503_13xxx/2503.13933/images/7a2648b8416cf48a0862a7cd414488f41c8312b6904c2eb9ba16212e61367671.jpg b/data/2025/2503_13xxx/2503.13933/images/7a2648b8416cf48a0862a7cd414488f41c8312b6904c2eb9ba16212e61367671.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf9abf2c3872d6ad10b33bb7f3161f85d52e7fb9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/7a2648b8416cf48a0862a7cd414488f41c8312b6904c2eb9ba16212e61367671.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32c7bf53c3cce19cfb6aefdaa002a285bfb7b14b6f8cb6c18ecb4b43be998083 +size 8550 diff --git a/data/2025/2503_13xxx/2503.13933/images/7caeaada43efeb0a507f555c9c0b240ec85768dcd8b9ddf7f5624cd32319ec5c.jpg b/data/2025/2503_13xxx/2503.13933/images/7caeaada43efeb0a507f555c9c0b240ec85768dcd8b9ddf7f5624cd32319ec5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..997f47db78a47abd2306ba1207bb525f4769ab83 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/7caeaada43efeb0a507f555c9c0b240ec85768dcd8b9ddf7f5624cd32319ec5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e327e311d7f1518cfa2a2a5d4373e00c8665adf7e33807bdfc15e0b0882248b +size 45592 diff --git a/data/2025/2503_13xxx/2503.13933/images/7fb0c377642dbc2df6a083a06da8df232e8bdb8c9587d121c766dfaf94864ad3.jpg b/data/2025/2503_13xxx/2503.13933/images/7fb0c377642dbc2df6a083a06da8df232e8bdb8c9587d121c766dfaf94864ad3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0f8fa05e2e628db2087c9d4df2244056146d71b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/7fb0c377642dbc2df6a083a06da8df232e8bdb8c9587d121c766dfaf94864ad3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb8bfdd2e6164b9700665cd524c5c9dee80b9cfb87a3a89eb6f61b66b65fbd2e +size 35129 diff --git a/data/2025/2503_13xxx/2503.13933/images/80785b87073ee4f01723a09c3d4df1c29bb044ecc6d5354a30d900b86e55bf5f.jpg b/data/2025/2503_13xxx/2503.13933/images/80785b87073ee4f01723a09c3d4df1c29bb044ecc6d5354a30d900b86e55bf5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f15c7e851d9b213f7f3bf05ef4e0b343f13ca9f4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/80785b87073ee4f01723a09c3d4df1c29bb044ecc6d5354a30d900b86e55bf5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33b39ad92823dc89ac973e37bd29a48e9df3ab297f169adabd9f08c97e4859b +size 9429 diff --git a/data/2025/2503_13xxx/2503.13933/images/842c666a38794f7be422c207d3fdb3897871159188f9f65baca25190b9f10be2.jpg b/data/2025/2503_13xxx/2503.13933/images/842c666a38794f7be422c207d3fdb3897871159188f9f65baca25190b9f10be2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84af7548e2ba7016ff9c6cf110c49078800e1831 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/842c666a38794f7be422c207d3fdb3897871159188f9f65baca25190b9f10be2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f06bec31ccc7038275fc96f2f7e65522bb723c838f0b85837c1cf542f545acd +size 5746 diff --git a/data/2025/2503_13xxx/2503.13933/images/86142d90d03cef39ede414943671a8caa0869ac0e8d886e003ff95b6ead14135.jpg b/data/2025/2503_13xxx/2503.13933/images/86142d90d03cef39ede414943671a8caa0869ac0e8d886e003ff95b6ead14135.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38cc3f0ab843f7f6111ae4e1d529db8e450df530 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/86142d90d03cef39ede414943671a8caa0869ac0e8d886e003ff95b6ead14135.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f49d7b8fd3f7cdd8d6802b5da38946c5353ffce482cfdf231ad8c5d4add8dc8 +size 6737 diff --git a/data/2025/2503_13xxx/2503.13933/images/88a6dfc3df4ae1b1e46ae11bb2c9bb454322138eaa5dae837daeb486544ab148.jpg b/data/2025/2503_13xxx/2503.13933/images/88a6dfc3df4ae1b1e46ae11bb2c9bb454322138eaa5dae837daeb486544ab148.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bd5b5b00751912f66a04e60a3918c7b8ea08f09 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/88a6dfc3df4ae1b1e46ae11bb2c9bb454322138eaa5dae837daeb486544ab148.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa1b915172f11b42ab05f1cf88eb230646a5a3d3260df62478b68383bbbd26db +size 3312 diff --git a/data/2025/2503_13xxx/2503.13933/images/8979a59922b7c2b633296235f76ce06d2f5c9ebe1c69f2365fd98667227a22a7.jpg b/data/2025/2503_13xxx/2503.13933/images/8979a59922b7c2b633296235f76ce06d2f5c9ebe1c69f2365fd98667227a22a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..545d1029cbb586d06f697d9a7313f70b82d7e4ff --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/8979a59922b7c2b633296235f76ce06d2f5c9ebe1c69f2365fd98667227a22a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50939b6c48d17837bd2f622c45e52db02773649c1558d880f985deeb2c557cc4 +size 9766 diff --git a/data/2025/2503_13xxx/2503.13933/images/917627eb0b2157a4d53686719880b45a818bf05e0967a467fc9e17abc0ca611b.jpg b/data/2025/2503_13xxx/2503.13933/images/917627eb0b2157a4d53686719880b45a818bf05e0967a467fc9e17abc0ca611b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..075b8b22d068df31f2d0c0d28e6e1f9de9247919 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/917627eb0b2157a4d53686719880b45a818bf05e0967a467fc9e17abc0ca611b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b5c4fe8f4985abd88105069ce1e779b38dbb87e454288fa3f414c4bac3e9881 +size 48068 diff --git a/data/2025/2503_13xxx/2503.13933/images/9ca13b889ad3a6cd7cf0bfd6b1e8aa69a5feeb8a4afe538f9672b908de4b2533.jpg b/data/2025/2503_13xxx/2503.13933/images/9ca13b889ad3a6cd7cf0bfd6b1e8aa69a5feeb8a4afe538f9672b908de4b2533.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2416cc0505506559fb4c380352141fd5a7bd4d8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/9ca13b889ad3a6cd7cf0bfd6b1e8aa69a5feeb8a4afe538f9672b908de4b2533.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9aeb89501184676431672fe7a24009ba771eb6fc0568db4e96248d7b4181732 +size 7944 diff --git a/data/2025/2503_13xxx/2503.13933/images/a018defcc9f90fd14ccd98acbd00e8f58264429bc1da13c7878a25904266e95c.jpg b/data/2025/2503_13xxx/2503.13933/images/a018defcc9f90fd14ccd98acbd00e8f58264429bc1da13c7878a25904266e95c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34e2fbb6a6c3e64cdbab9b8dff0d3b9149456909 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a018defcc9f90fd14ccd98acbd00e8f58264429bc1da13c7878a25904266e95c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2b8515132ea6235b94da7ca2a508309bfebae76940bd44503d6fe07ed5eab40 +size 4208 diff --git a/data/2025/2503_13xxx/2503.13933/images/a1b8e9c50d38db24fd87e1a643e8bc202074d77880e788370527c8ee1e181d56.jpg b/data/2025/2503_13xxx/2503.13933/images/a1b8e9c50d38db24fd87e1a643e8bc202074d77880e788370527c8ee1e181d56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edfd5df04fa119120b3c133a17e17c3295e6ae69 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a1b8e9c50d38db24fd87e1a643e8bc202074d77880e788370527c8ee1e181d56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bfff5445c7a14fb7a8bb8056c3c62df1e3a48215bc82570dc21113f0b0aa72a +size 23195 diff --git a/data/2025/2503_13xxx/2503.13933/images/a2e52b63c9dbf211244d1515b6c4a7734bfa87921e17aa4c26c87c44d597b05e.jpg b/data/2025/2503_13xxx/2503.13933/images/a2e52b63c9dbf211244d1515b6c4a7734bfa87921e17aa4c26c87c44d597b05e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec166ab146e9612ce70c17d20931054d01a31492 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a2e52b63c9dbf211244d1515b6c4a7734bfa87921e17aa4c26c87c44d597b05e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72d7be120d5b30574648533d3738b5c121a22c44024d01f17b21122e9ce9408e +size 8248 diff --git a/data/2025/2503_13xxx/2503.13933/images/a2e65bdbb019ea2bd49aacb1c34071c2a67d11b645226f471a8cc7bf4e0dda71.jpg b/data/2025/2503_13xxx/2503.13933/images/a2e65bdbb019ea2bd49aacb1c34071c2a67d11b645226f471a8cc7bf4e0dda71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09301b6b8831178ee024c3addb5074be2ddf053e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a2e65bdbb019ea2bd49aacb1c34071c2a67d11b645226f471a8cc7bf4e0dda71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd1b85f4f4507ac56826b84eb069dce319a084317ce40c22bfffde8704541dc4 +size 16449 diff --git a/data/2025/2503_13xxx/2503.13933/images/a36972000cd774cc5ab556e0900c90f4c6eea0a3a42c11d6c7322779ba30c687.jpg b/data/2025/2503_13xxx/2503.13933/images/a36972000cd774cc5ab556e0900c90f4c6eea0a3a42c11d6c7322779ba30c687.jpg new file mode 100644 index 0000000000000000000000000000000000000000..263b05607d7b914269ec824f79fade4517c68ce1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a36972000cd774cc5ab556e0900c90f4c6eea0a3a42c11d6c7322779ba30c687.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1b5f4483bc363856a8ff54ed36d9625a38ae43bcd1ca2f8a5d1967797be8fa7 +size 21818 diff --git a/data/2025/2503_13xxx/2503.13933/images/a46bd46fc638c2bedfa906d64ab3b8c25c051a5c1d8b29b1a3d52119d168cd44.jpg b/data/2025/2503_13xxx/2503.13933/images/a46bd46fc638c2bedfa906d64ab3b8c25c051a5c1d8b29b1a3d52119d168cd44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89d8614dba25c5b0a6c1c8d32d636cbd18ee6f9c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a46bd46fc638c2bedfa906d64ab3b8c25c051a5c1d8b29b1a3d52119d168cd44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a57f08c1e4561c6f50b9f2bebb404ff30c8ec3f87210e508551cb0029f2a1dd1 +size 13778 diff --git a/data/2025/2503_13xxx/2503.13933/images/a7a3c23c768388495f3481c5af7e9b4de7d239d576ea7b66c1c6800cf8232ab0.jpg b/data/2025/2503_13xxx/2503.13933/images/a7a3c23c768388495f3481c5af7e9b4de7d239d576ea7b66c1c6800cf8232ab0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7e85b6eedc3935595170473b3881ba48ff2e960 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a7a3c23c768388495f3481c5af7e9b4de7d239d576ea7b66c1c6800cf8232ab0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccb89452516ccd25e6f19698c1921d1e3e274d96f66052b8f2f295d98e453db7 +size 4501 diff --git a/data/2025/2503_13xxx/2503.13933/images/a855b7b5af10595c3b507ce75c396e8ed53ba715f08759f5c0f86d5d5ce978ec.jpg b/data/2025/2503_13xxx/2503.13933/images/a855b7b5af10595c3b507ce75c396e8ed53ba715f08759f5c0f86d5d5ce978ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4191ec0d412c48298b57b01ea3243428961405b2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/a855b7b5af10595c3b507ce75c396e8ed53ba715f08759f5c0f86d5d5ce978ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e3a8bc0c21314afb4aec6709fd135e7c0b07fdfcddd1e15776b8d782676b2a6 +size 40959 diff --git a/data/2025/2503_13xxx/2503.13933/images/aa5c83a28c6329ef2d94f536ced88460656363be508afd476967c069343f0768.jpg b/data/2025/2503_13xxx/2503.13933/images/aa5c83a28c6329ef2d94f536ced88460656363be508afd476967c069343f0768.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2e93601545276de23920ad09572d7ff12b44377 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/aa5c83a28c6329ef2d94f536ced88460656363be508afd476967c069343f0768.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c4d06155b5efe995d2b5c281ec2b9e6d4f4c7e3dc20d9d2a7b1988ed3486f6a +size 26728 diff --git a/data/2025/2503_13xxx/2503.13933/images/ab9e37f336fe58f990f7a2f5ec598fd37f49400dbe516bbd8e978f446befc945.jpg b/data/2025/2503_13xxx/2503.13933/images/ab9e37f336fe58f990f7a2f5ec598fd37f49400dbe516bbd8e978f446befc945.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30dbbef8e75c08627abb7ad9994154422c01c7f6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/ab9e37f336fe58f990f7a2f5ec598fd37f49400dbe516bbd8e978f446befc945.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b991d7f321f8c120e5a2a45195ca9aa7ae590ce970ad141ccbff77e6ff67b164 +size 6012 diff --git a/data/2025/2503_13xxx/2503.13933/images/af36e8b78f8671a9d8dbb5b6d0a5894167bc9a3b14b2f2e6073c308d17cc95a0.jpg b/data/2025/2503_13xxx/2503.13933/images/af36e8b78f8671a9d8dbb5b6d0a5894167bc9a3b14b2f2e6073c308d17cc95a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..12a9a21ccbddb6fa82d124b207da4f83fb0d2628 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/af36e8b78f8671a9d8dbb5b6d0a5894167bc9a3b14b2f2e6073c308d17cc95a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e98f8e135cfed09df2cd48f3e64398632aa1a1e7f2e03ee5d4c2f94446eedc8a +size 22133 diff --git a/data/2025/2503_13xxx/2503.13933/images/b1257e053086d2dd69596cd534767d87c78a5bcb112c31e469f474d08790a53b.jpg b/data/2025/2503_13xxx/2503.13933/images/b1257e053086d2dd69596cd534767d87c78a5bcb112c31e469f474d08790a53b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d3fb0f6f1165f40df23d563322b7d6d03073049 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/b1257e053086d2dd69596cd534767d87c78a5bcb112c31e469f474d08790a53b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcf1a6b27988818ed44f867168f6321edc65744f8b906d451506d60e70fa8195 +size 21008 diff --git a/data/2025/2503_13xxx/2503.13933/images/b3c36a8388f62333cee2a3ef19cf1719f149c01e357cf8dd2259290f51072a49.jpg b/data/2025/2503_13xxx/2503.13933/images/b3c36a8388f62333cee2a3ef19cf1719f149c01e357cf8dd2259290f51072a49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23a76db00ad9040455af7a962ccf7fd9e6fbac90 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/b3c36a8388f62333cee2a3ef19cf1719f149c01e357cf8dd2259290f51072a49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef013db6578e3cae8e9462a1f1ef1af6c27dd64eb5673b8a3a4ea0be2ad54896 +size 22160 diff --git a/data/2025/2503_13xxx/2503.13933/images/b884ab7f47eb2ab953e6ac700afe4526438c7953f5553e2fc74b45b168828a0a.jpg b/data/2025/2503_13xxx/2503.13933/images/b884ab7f47eb2ab953e6ac700afe4526438c7953f5553e2fc74b45b168828a0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f17dbae18042246cd774a965e7a1489546cbd56a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/b884ab7f47eb2ab953e6ac700afe4526438c7953f5553e2fc74b45b168828a0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5127ea7a57be92ec09347cf6044ec1269b6bfd514654f85fad5daa3a3abd3367 +size 10936 diff --git a/data/2025/2503_13xxx/2503.13933/images/ba32edcab434e8ad5e0767139009d16908e9de00ed4f1dfddd99493d69460436.jpg b/data/2025/2503_13xxx/2503.13933/images/ba32edcab434e8ad5e0767139009d16908e9de00ed4f1dfddd99493d69460436.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6dd00c924e199a614561ece5cb6fd3639417da8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/ba32edcab434e8ad5e0767139009d16908e9de00ed4f1dfddd99493d69460436.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9198ef4f8ba399b9db20bd9183e77417b5a1890106b74be2c7210dc3de6043c1 +size 38531 diff --git a/data/2025/2503_13xxx/2503.13933/images/be137d8c1c5541279284411afccbfff48b12633210ebbd9a063a2b7d974825c4.jpg b/data/2025/2503_13xxx/2503.13933/images/be137d8c1c5541279284411afccbfff48b12633210ebbd9a063a2b7d974825c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77ca89d855391bcda5866a8f8bf6c2232f0a3514 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/be137d8c1c5541279284411afccbfff48b12633210ebbd9a063a2b7d974825c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b93c4df366e997938d87a4831e0577184d264e1f63cec2204a3942c5ac2aae78 +size 4682 diff --git a/data/2025/2503_13xxx/2503.13933/images/bfc7ed9b115c7244283bc7bd1bb21a1d5f6f4a7f42e6be7d33f9f30e7ab59b32.jpg b/data/2025/2503_13xxx/2503.13933/images/bfc7ed9b115c7244283bc7bd1bb21a1d5f6f4a7f42e6be7d33f9f30e7ab59b32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c408e2a73aa9f1e8a527ca3d0f5b43ac5d494c05 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/bfc7ed9b115c7244283bc7bd1bb21a1d5f6f4a7f42e6be7d33f9f30e7ab59b32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:216d048c3b38f6b3e2ba4555c722b2a437e3db403bcd187e1fe3c954e82d19df +size 9819 diff --git a/data/2025/2503_13xxx/2503.13933/images/c2e33388cf4eff2cfecbd23aa72b77c80f3c2433581d947423a327c19d842950.jpg b/data/2025/2503_13xxx/2503.13933/images/c2e33388cf4eff2cfecbd23aa72b77c80f3c2433581d947423a327c19d842950.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d4f9604a660b946e43371200433e3ffabb17c6e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/c2e33388cf4eff2cfecbd23aa72b77c80f3c2433581d947423a327c19d842950.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a244e07047eee342c80d625e492e113f39c878133d82fa77ea57f98f9fb4d539 +size 5323 diff --git a/data/2025/2503_13xxx/2503.13933/images/c48d2e14614aa736de092b464d98e4b7c011a3e23c358ce4e3e04756f9406bdc.jpg b/data/2025/2503_13xxx/2503.13933/images/c48d2e14614aa736de092b464d98e4b7c011a3e23c358ce4e3e04756f9406bdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..989723f3ab56b07e2808f4b66050d9fd94a5cf0e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/c48d2e14614aa736de092b464d98e4b7c011a3e23c358ce4e3e04756f9406bdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:796753bd04d05e63217c32d3c020e605c09d93388db8f7d332aafdc95fa18076 +size 8230 diff --git a/data/2025/2503_13xxx/2503.13933/images/cc72b3743dea1acce6b7f995d1ed32b295f09a088f75ac84fbe7696ee90234f1.jpg b/data/2025/2503_13xxx/2503.13933/images/cc72b3743dea1acce6b7f995d1ed32b295f09a088f75ac84fbe7696ee90234f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd7f65a4c67f12fbd4f1b576d5d5f218c45adebc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/cc72b3743dea1acce6b7f995d1ed32b295f09a088f75ac84fbe7696ee90234f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44d13f6a3cfd5310a7e8e8b6b58048aff38da0730563ab112b9665c7e652715d +size 19396 diff --git a/data/2025/2503_13xxx/2503.13933/images/d04c9dfbd6fb40636267c6489da8d2b6f53e22a434eca91a1ca5341806ec0fcf.jpg b/data/2025/2503_13xxx/2503.13933/images/d04c9dfbd6fb40636267c6489da8d2b6f53e22a434eca91a1ca5341806ec0fcf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52714b9f8cfecebce3cf4fcacc75ab8d4d0ec7c7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/d04c9dfbd6fb40636267c6489da8d2b6f53e22a434eca91a1ca5341806ec0fcf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d3041e50ec2358d10d1ede7e497136fe45c622c21c6770708e826c99b18f729 +size 15628 diff --git a/data/2025/2503_13xxx/2503.13933/images/d1d0eb95867b86e555e3a95df3d5b5a14cd34d608385d9234dcdbecea9446986.jpg b/data/2025/2503_13xxx/2503.13933/images/d1d0eb95867b86e555e3a95df3d5b5a14cd34d608385d9234dcdbecea9446986.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b342ba8dace15d7984f03630cc55e9719c2df18e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/d1d0eb95867b86e555e3a95df3d5b5a14cd34d608385d9234dcdbecea9446986.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:823e2d8e6079e4e8ce7d2d6e659abd3f6ce665149df1dad7c9053f8d3841ffc7 +size 26487 diff --git a/data/2025/2503_13xxx/2503.13933/images/d4bdcf9536a28885f75ba497e0709c0aa67add7be58df316b5adee9c8e270d11.jpg b/data/2025/2503_13xxx/2503.13933/images/d4bdcf9536a28885f75ba497e0709c0aa67add7be58df316b5adee9c8e270d11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c057454fc2cfad60073cca0c86a7bb90b1ea9b06 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/d4bdcf9536a28885f75ba497e0709c0aa67add7be58df316b5adee9c8e270d11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:382a89bf82f51e979bdd55af40a03b8f9e3f8b81fcd91a9ac0cebcf50151a88a +size 6545 diff --git a/data/2025/2503_13xxx/2503.13933/images/db080711355133edb1442b6cf73a6f27394199d020bb9339f1f18dfa0304bcae.jpg b/data/2025/2503_13xxx/2503.13933/images/db080711355133edb1442b6cf73a6f27394199d020bb9339f1f18dfa0304bcae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6aaa4c2f98667fc34366594cbf93293eae7cc32b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/db080711355133edb1442b6cf73a6f27394199d020bb9339f1f18dfa0304bcae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d681ae7b896864caa4005c10efb18653694ffc0335ee49bb82bb5edb76742a8b +size 13468 diff --git a/data/2025/2503_13xxx/2503.13933/images/dbfc73f7e6cb30bc3b1ff26752803cc253e565c63170aa9d1a505aec26fde8ef.jpg b/data/2025/2503_13xxx/2503.13933/images/dbfc73f7e6cb30bc3b1ff26752803cc253e565c63170aa9d1a505aec26fde8ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e41e61a3d368fbf0fb72b9d121ac582e605cc327 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/dbfc73f7e6cb30bc3b1ff26752803cc253e565c63170aa9d1a505aec26fde8ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:595300debca3a1c1aff3afca4d826659c10db52b55f86cd272eb4d4e0c151b70 +size 7748 diff --git a/data/2025/2503_13xxx/2503.13933/images/e26a208925086b40e365433211679472b0588b4faef6f759f0fae6134d0dc341.jpg b/data/2025/2503_13xxx/2503.13933/images/e26a208925086b40e365433211679472b0588b4faef6f759f0fae6134d0dc341.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9dbb833934fd849c834f4f812f055e5ba0c080a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/e26a208925086b40e365433211679472b0588b4faef6f759f0fae6134d0dc341.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a1bbc6d326b3d30fa125767f9b542e5e353818ef34e37493556c0f2e0dbbb82 +size 37194 diff --git a/data/2025/2503_13xxx/2503.13933/images/e27bd24a7a36c6bfeba0deaabfea42edc50a012e970ff94dc077f09f3eea2588.jpg b/data/2025/2503_13xxx/2503.13933/images/e27bd24a7a36c6bfeba0deaabfea42edc50a012e970ff94dc077f09f3eea2588.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f755dfccee7322bff597ee9a43dac9440139361 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/e27bd24a7a36c6bfeba0deaabfea42edc50a012e970ff94dc077f09f3eea2588.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32644379f885318198ee6f2ceadc8f020c0edd2107395043837a34b358274a7e +size 8164 diff --git a/data/2025/2503_13xxx/2503.13933/images/e86f363bb7a81eae1c35edcbec092851a40b92859376773a56238be9207e674a.jpg b/data/2025/2503_13xxx/2503.13933/images/e86f363bb7a81eae1c35edcbec092851a40b92859376773a56238be9207e674a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18f863cf6af764c8894fe0bed7ba8b593a5596fd --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/e86f363bb7a81eae1c35edcbec092851a40b92859376773a56238be9207e674a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddba761dcae29de6beafad63c8193e54de19238bdfbe605fed861c15d3454b40 +size 6352 diff --git a/data/2025/2503_13xxx/2503.13933/images/ed6e0b362bd77ee3e09eb64dc8b2577b95d3d53dc5f5668730c8cb50d347d5fe.jpg b/data/2025/2503_13xxx/2503.13933/images/ed6e0b362bd77ee3e09eb64dc8b2577b95d3d53dc5f5668730c8cb50d347d5fe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4bd146a0fcde0eb6fe95a46702dff49253a131e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/ed6e0b362bd77ee3e09eb64dc8b2577b95d3d53dc5f5668730c8cb50d347d5fe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0515b571f4b9d2d0eb4b0e8cc784fa4bff488208ac88963edad3792f36df80e9 +size 7001 diff --git a/data/2025/2503_13xxx/2503.13933/images/f062c9e634cd5ae0bb9acc8714a2f98360a99e12636e133d41fb746e7105791c.jpg b/data/2025/2503_13xxx/2503.13933/images/f062c9e634cd5ae0bb9acc8714a2f98360a99e12636e133d41fb746e7105791c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0bbb02030134a75e5e5493e9f88fab1d358ed57 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/f062c9e634cd5ae0bb9acc8714a2f98360a99e12636e133d41fb746e7105791c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68a855538af770ab086b32fac990a457d5923969d5e599188eaff21adcb14633 +size 3336 diff --git a/data/2025/2503_13xxx/2503.13933/images/f365e38f426f2de7a48594f85771730534fee89a6b10a71c5d57f9955b1535a3.jpg b/data/2025/2503_13xxx/2503.13933/images/f365e38f426f2de7a48594f85771730534fee89a6b10a71c5d57f9955b1535a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56ff43778fc5e6e5b0ae3056e0521701d2776999 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/f365e38f426f2de7a48594f85771730534fee89a6b10a71c5d57f9955b1535a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4214686d9278250289daeba193d0ccf95dfb5e02b7cc497c0e34764b560e6cf2 +size 4507 diff --git a/data/2025/2503_13xxx/2503.13933/images/f3f7794d4b1f511a476b118ab520af24a3d418751eb4b82caa0df37700b24280.jpg b/data/2025/2503_13xxx/2503.13933/images/f3f7794d4b1f511a476b118ab520af24a3d418751eb4b82caa0df37700b24280.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44b049b40e200b674311db1a91ef1ef054b44bcc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/f3f7794d4b1f511a476b118ab520af24a3d418751eb4b82caa0df37700b24280.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db353e1dad3e4fe8e2cad0d60d849de9aa18ae7ca8ba6a5d6d7d0a7d3a78a702 +size 5302 diff --git a/data/2025/2503_13xxx/2503.13933/images/f87028d5954edd3bf3a3bd90ca3f21b0a37fa1e24a8f443b8e35ccee6feead39.jpg b/data/2025/2503_13xxx/2503.13933/images/f87028d5954edd3bf3a3bd90ca3f21b0a37fa1e24a8f443b8e35ccee6feead39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aae944721abc90fb33fb488cd7539cf798e53002 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/f87028d5954edd3bf3a3bd90ca3f21b0a37fa1e24a8f443b8e35ccee6feead39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9753b8ccccfb4513a00c29724922dc355ab69f6fcfe6f70121ce47d78cbd76e8 +size 3740 diff --git a/data/2025/2503_13xxx/2503.13933/images/f98896853f42e61200a7ed8271432ba859d822dd2ac2059074917201aeacecd4.jpg b/data/2025/2503_13xxx/2503.13933/images/f98896853f42e61200a7ed8271432ba859d822dd2ac2059074917201aeacecd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0c6f8e17d8e6851e43c0ed67a2d3205b109d431 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/f98896853f42e61200a7ed8271432ba859d822dd2ac2059074917201aeacecd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eae764766b2e32d867f9ef73c7701d157848d3ad2a93277885442072197eeda +size 7049 diff --git a/data/2025/2503_13xxx/2503.13933/images/f9ccd59eb06fa06952a41ddbeabb1229514b05ff8f3cebdcc5d4513deee979da.jpg b/data/2025/2503_13xxx/2503.13933/images/f9ccd59eb06fa06952a41ddbeabb1229514b05ff8f3cebdcc5d4513deee979da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd238611b00d3cd498ab3c345b2eaede4f24df31 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/f9ccd59eb06fa06952a41ddbeabb1229514b05ff8f3cebdcc5d4513deee979da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d934e8600872c481934d02957822147e82af4d8582f3e59f775470580f432270 +size 19386 diff --git a/data/2025/2503_13xxx/2503.13933/images/f9df96ab1e94c3f197c125cc26bdae791e3769bfcd4aa7fddc2a762fde3a337b.jpg b/data/2025/2503_13xxx/2503.13933/images/f9df96ab1e94c3f197c125cc26bdae791e3769bfcd4aa7fddc2a762fde3a337b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c9d2ce10137e1f0a0122f64fd58de6c51f72935 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/f9df96ab1e94c3f197c125cc26bdae791e3769bfcd4aa7fddc2a762fde3a337b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ab9aefcf4acd1f2e2d6f7082bdd89527b6a99c8538be0d2c84da04d897dfc63 +size 8339 diff --git a/data/2025/2503_13xxx/2503.13933/images/fa2ca97b6f3903dd29bd21860b65642768d7856f00c975369e0e7129f9aa8323.jpg b/data/2025/2503_13xxx/2503.13933/images/fa2ca97b6f3903dd29bd21860b65642768d7856f00c975369e0e7129f9aa8323.jpg new file mode 100644 index 0000000000000000000000000000000000000000..634048c3a31e8f99946063e5d576c9584e0aa411 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/fa2ca97b6f3903dd29bd21860b65642768d7856f00c975369e0e7129f9aa8323.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:022ae99d9aeb8c3d3206cc08960c63d2c0bb10857807cb967d434f0d8c5f111f +size 17989 diff --git a/data/2025/2503_13xxx/2503.13933/images/fc1a326bac6804be45fa1c9b3cf27c427a62e03aa83dfcf816fe3cdb17e70be1.jpg b/data/2025/2503_13xxx/2503.13933/images/fc1a326bac6804be45fa1c9b3cf27c427a62e03aa83dfcf816fe3cdb17e70be1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a86a4f4d0bf72b3194526e7e73874a7ca49598fb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/fc1a326bac6804be45fa1c9b3cf27c427a62e03aa83dfcf816fe3cdb17e70be1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73cdd6cd39a7be1760ce8bffb37222c20fc3ce9630e3c1bb4de40044386cfdc2 +size 22050 diff --git a/data/2025/2503_13xxx/2503.13933/images/fcdf1794fb5d67c5bdbc473ff1b75e5a1a997c2b94024e8b3ffb6d00ac66f12a.jpg b/data/2025/2503_13xxx/2503.13933/images/fcdf1794fb5d67c5bdbc473ff1b75e5a1a997c2b94024e8b3ffb6d00ac66f12a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ddcc27fb3fdab7a700d0c044ed6012b893cdadb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/images/fcdf1794fb5d67c5bdbc473ff1b75e5a1a997c2b94024e8b3ffb6d00ac66f12a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eac52b8fc7de6e6190e428bc880f0d3d88903b4f1a7161e77fe2d643b5700788 +size 237487 diff --git a/data/2025/2503_13xxx/2503.13933/layout.json b/data/2025/2503_13xxx/2503.13933/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..212d174e29040dc60293de8e9250797b7dac879d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13933/layout.json @@ -0,0 +1,22045 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 80, + 106, + 516, + 143 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 106, + 516, + 143 + ], + "spans": [ + { + "bbox": [ + 80, + 106, + 516, + 143 + ], + "type": "text", + "content": "Tensor-decomposition-based A Priori Surrogate (TAPS) modeling for ultra large-scale simulations" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "spans": [ + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": "Jiachen Guo" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Gino Domel" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Chanwook Park" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Hantao Zhang" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Ozgur Can Gumus" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Ye Lu" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{c}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Gregory J. Wagner" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Dong Qian" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{d,e}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Jian Cao" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Thomas J.R. Hughes" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{f}" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "text", + "content": ", Wing Kam Liu" + }, + { + "bbox": [ + 69, + 158, + 524, + 183 + ], + "type": "inline_equation", + "content": "^{b,e}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 100, + 191, + 490, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 191, + 490, + 201 + ], + "spans": [ + { + "bbox": [ + 100, + 191, + 490, + 201 + ], + "type": "text", + "content": "aTheoretical and Applied Mechanics Program, Northwestern University, 2145 Sheridan Road, Evanston, 60201, IL, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 201, + 483, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 201, + 483, + 211 + ], + "spans": [ + { + "bbox": [ + 110, + 201, + 483, + 211 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 110, + 201, + 483, + 211 + ], + "type": "text", + "content": "Department of Mechanical Engineering, Northwestern University, 2145 Sheridan Road, Evanston, IL, USA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 81, + 211, + 512, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 211, + 512, + 220 + ], + "spans": [ + { + "bbox": [ + 81, + 211, + 512, + 220 + ], + "type": "inline_equation", + "content": "^{c}" + }, + { + "bbox": [ + 81, + 211, + 512, + 220 + ], + "type": "text", + "content": "Department of Mechanical Engineering, University of Maryland, Baltimore County, 1000 Hilltop Circle, Baltimore, 21250, MD, USA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 98, + 220, + 495, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 220, + 495, + 230 + ], + "spans": [ + { + "bbox": [ + 98, + 220, + 495, + 230 + ], + "type": "inline_equation", + "content": "^{d}" + }, + { + "bbox": [ + 98, + 220, + 495, + 230 + ], + "type": "text", + "content": "Department of Mechanical Engineering, University of Texas, Dallas, 800 W. Campbell Road, Richardson, 75080, TX, USA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 230, + 425, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 230, + 425, + 239 + ], + "spans": [ + { + "bbox": [ + 167, + 230, + 425, + 239 + ], + "type": "inline_equation", + "content": "{}^{e}" + }, + { + "bbox": [ + 167, + 230, + 425, + 239 + ], + "type": "text", + "content": " Co-Founders of HIDENN-AI,LLC,1801 Maple Ave,Evanston,60201,IL,USA" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 239, + 483, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 239, + 483, + 248 + ], + "spans": [ + { + "bbox": [ + 110, + 239, + 483, + 248 + ], + "type": "text", + "content": "fInstitute for Computational Engineering and Sciences, The University of Texas at Austin, 201 East 24th Street, Stop" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 245, + 248, + 348, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 248, + 348, + 258 + ], + "spans": [ + { + "bbox": [ + 245, + 248, + 348, + 258 + ], + "type": "text", + "content": "C0200, Austin, 78712, TX, USA" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 303, + 102, + 314 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 303, + 102, + 314 + ], + "spans": [ + { + "bbox": [ + 61, + 303, + 102, + 314 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 60, + 320, + 533, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 320, + 533, + 477 + ], + "spans": [ + { + "bbox": [ + 60, + 320, + 533, + 477 + ], + "type": "text", + "content": "A data-free, predictive scientific AI model, Tensor-decomposition-based A Priori Surrogate (TAPS), is proposed for tackling ultra large-scale engineering simulations with significant speedup, memory savings, and storage gain. TAPS can effectively obtain surrogate models for high-dimensional parametric problems with equivalent zetta-scale " + }, + { + "bbox": [ + 60, + 320, + 533, + 477 + ], + "type": "inline_equation", + "content": "(10^{21})" + }, + { + "bbox": [ + 60, + 320, + 533, + 477 + ], + "type": "text", + "content": " degrees of freedom (DoFs). TAPS achieves this by directly obtaining reduced-order models through solving governing equations with multiple independent variables such as spatial coordinates, parameters, and time. The paper first introduces an AI-enhanced finite element-type interpolation function called convolution hierarchical deep-learning neural network (C-HiDeNN) with tensor decomposition (TD). Subsequently, the generalized space-parameter-time Galerkin weak form and the corresponding matrix form are derived. Through the choice of TAPS hyperparameters, an arbitrary convergence rate can be achieved. To show the capabilities of this framework, TAPS is then used to simulate a large-scale additive manufacturing process as an example and achieves around 1,370x speedup, 14.8x memory savings, and 955x storage gain compared to the finite difference method with 3.46 billion spatial degrees of freedom (DoFs). As a result, the TAPS framework opens a new avenue for many challenging ultra large-scale engineering problems, such as additive manufacturing and integrated circuit design, among others." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 483, + 106, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 483, + 106, + 494 + ], + "spans": [ + { + "bbox": [ + 61, + 483, + 106, + 494 + ], + "type": "text", + "content": "Keywords:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 495, + 520, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 495, + 520, + 519 + ], + "spans": [ + { + "bbox": [ + 61, + 495, + 520, + 519 + ], + "type": "text", + "content": "Predictive scientific AI, hierarchical neural network finite element interpolation, generalized Galerkin formulation for parametric PDEs, large-scale simulation, additive manufacturing" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 61, + 555, + 132, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 555, + 132, + 567 + ], + "spans": [ + { + "bbox": [ + 61, + 555, + 132, + 567 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 60, + 576, + 533, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 576, + 533, + 721 + ], + "spans": [ + { + "bbox": [ + 60, + 576, + 533, + 721 + ], + "type": "text", + "content": "Precision is a fundamental aspect of scientific and engineering applications, especially in advanced industries such as semiconductor manufacturing. The capability to perform accurate computational simulations for these applications is essential for advancing these fields. Precise simulations enable the optimization of design and manufacturing processes by utilizing virtual prototypes and process simulations. This reduces the need for expensive physical prototypes and tests and provides virtual prototypes in circumstances where physical ones are impractical. Traditional computational methods for engineering simulations, however, suffer from prohibitive computational costs when attempting to accurately predict responses across multiple length and time scales (typically done by increasing mesh resolution), making achieving high precision for large-scale problems challenging. In fact, the random-access memory (RAM) requirement can be far beyond the capability of typical workstations and may require massive parallelization on supercomputers. In other industries, such as additive manufacturing (a term encompassing all forms of 3D printing), the vast design space further exacerbates these limitations, as numerous expensive simulations are required to thoroughly explore the effects of different design parameters." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 257, + 36, + 608 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 257, + 36, + 608 + ], + "spans": [ + { + "bbox": [ + 13, + 257, + 36, + 608 + ], + "type": "text", + "content": "arXiv:2503.13933v1 [cs.CE] 18 Mar 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 740, + 161, + 750 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 740, + 161, + 750 + ], + "spans": [ + { + "bbox": [ + 61, + 740, + 161, + 750 + ], + "type": "text", + "content": "Preprint submitted to Elsevier" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 477, + 740, + 531, + 749 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 740, + 531, + 749 + ], + "spans": [ + { + "bbox": [ + 477, + 740, + 531, + 749 + ], + "type": "text", + "content": "March 19, 2025" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 63, + 126, + 531, + 524 + ], + "blocks": [ + { + "bbox": [ + 259, + 118, + 334, + 126 + ], + "lines": [ + { + "bbox": [ + 259, + 118, + 334, + 126 + ], + "spans": [ + { + "bbox": [ + 259, + 118, + 334, + 126 + ], + "type": "text", + "content": "Table 1: Nomenclature" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 126, + 531, + 524 + ], + "lines": [ + { + "bbox": [ + 63, + 126, + 531, + 524 + ], + "spans": [ + { + "bbox": [ + 63, + 126, + 531, + 524 + ], + "type": "table", + "html": "
VariablesDescription
uh(x)Interpolated scalar field defined inside of an element
AeNodes within element e
AsNodes within patch domain of element e
WiS,a,p,j(x)Convolution patch function at node j for i-th nodal patch with hyperparameters s, a, and p
MTotal number of modes in tensor decomposition (TD)
mIndex for mode
DTotal number of dimensions
dIndex for dimension
xIndependent variable which includes spatial variable xs, parametric variable xp and temporal variable xt
Nd(xd;ad,sd,pd)Global C-HiDeNN shape function for dimension d with dilation parameter ad, patch size sd and reproducing polynomial order pd
bSource function in laser powder bed fusion process
uTDApproximation of the solution field expressed via TD
TTime slab index for space-parameter-time problem
kThermal conductivity
ρMaterial density
cpHeat capacity
ηMaterial absorptivity
PLaser power
rStandard deviation that characterizes the width of the heat source
qHeat flux
qconvHeat flux from convection
qradHeat flux from radiation
qevapHeat flux from evaporative cooling
hconvConvection coefficient
σSBStefan-Boltzman constant
mevapMass evaporation flux
LevapHeat of evaporation
BndShape function derivative
UdSolution matrix (Rnd×M) for dimension d that contains all the modes
", + "image_path": "fcdf1794fb5d67c5bdbc473ff1b75e5a1a997c2b94024e8b3ffb6d00ac66f12a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 60, + 544, + 531, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 544, + 531, + 711 + ], + "spans": [ + { + "bbox": [ + 60, + 544, + 531, + 711 + ], + "type": "text", + "content": "To fulfill the ever-growing challenges in predictive scientific models, data-driven surrogates, especially artificial intelligence (AI)-based models, present an alternative to conventional numerical models by significantly reducing the forward prediction time. These models can be treated as a reasonably accurate, reduced representation of real physics. Once trained properly, they can be used for fast prediction on unseen parameters [1, 2]. However, it is still uncertain whether a data-driven surrogate model can be trained to achieve the level of accuracy required in engineering design. Recently, it has been pointed out by Wolfram Research that standard AI models cannot easily fulfill the high accuracy requirement of predictive scientific tasks [3]. Furthermore, as suggested by Google Deepmind, the real potential of AI models lies in enhancing, rather than thoroughly replacing, well-established classical numerical algorithms [4]. In addition, the current standard data-driven approaches follow an offline-online scheme, where the offline stage involves a huge amount of training data, which can again be prohibitive. For problems with known physics, this data can be obtained by running multiple expensive simulations relying on standard numerical algorithms. In scenarios involving high-dimensional design spaces governed by parameterized partial differential equations (PDEs), such as in additive manufacturing (AM), conducting repetitive simulations with varying parameters in this offline stage becomes exceedingly expensive both in terms of computation time and data storage." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 712, + 531, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 712, + 531, + 724 + ], + "spans": [ + { + "bbox": [ + 76, + 712, + 531, + 724 + ], + "type": "text", + "content": "To avoid the prohibitive offline stage, one can try to obtain a surrogate model directly from governing equations" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 110, + 486, + 206 + ], + "blocks": [ + { + "bbox": [ + 112, + 110, + 486, + 206 + ], + "lines": [ + { + "bbox": [ + 112, + 110, + 486, + 206 + ], + "spans": [ + { + "bbox": [ + 112, + 110, + 486, + 206 + ], + "type": "image", + "image_path": "ba32edcab434e8ad5e0767139009d16908e9de00ed4f1dfddd99493d69460436.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "lines": [ + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "spans": [ + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "text", + "content": "Figure 1: The parameterized PDE is a PDE that includes parameters " + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_p" + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "text", + "content": " that can vary and influence the solution " + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "inline_equation", + "content": "\\mathbf{u}(\\mathbf{x}_s, \\mathbf{x}_p, x_t)" + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_s" + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "inline_equation", + "content": "x_t" + }, + { + "bbox": [ + 60, + 219, + 531, + 268 + ], + "type": "text", + "content": " are the spatial and time variables, respectively. The a priori approach directly finds a surrogate model from the governing parameterized PDE, whereas the data-driven approach has to solve the parameter-fixed PDE on sampled parameters to generate simulation data, followed by training tasks. FEM: Finite Element Method [5], C-HiDeNN: Convolution Hierarchical Deep-learning Neural Network [6], TAPS: Tensor-decomposition-based A Priori Surrogate, PINN: Physics Informed Neural Network [7]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 287, + 531, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 287, + 531, + 360 + ], + "spans": [ + { + "bbox": [ + 60, + 287, + 531, + 360 + ], + "type": "text", + "content": "without generating any data. As shown in Fig. 1 denoted by the words \"A Priori\", this approach aims to find the surrogate model before actually \"seeing\" any data. For example, multilayer perceptron (MLP) architectures have been vastly used in physics-informed neural networks (PINNs) and their variations to approximate solutions to PDEs without requiring data [7, 8]. However, the results of these efforts are underwhelming, as it has been shown that PINN results have often been compared to weak baselines [9], and it is unclear if they guarantee convergence. Moreover, this method is still susceptible to high computational costs for both large-scale and high-dimensional problems [10]." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 66, + 368, + 530, + 540 + ], + "blocks": [ + { + "bbox": [ + 66, + 368, + 530, + 540 + ], + "lines": [ + { + "bbox": [ + 66, + 368, + 530, + 540 + ], + "spans": [ + { + "bbox": [ + 66, + 368, + 530, + 540 + ], + "type": "image", + "image_path": "436e24e7870b3692253c8bb03616ef37d9e6600fc07fc58cdc25ddba38c6aade.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 550, + 531, + 570 + ], + "lines": [ + { + "bbox": [ + 61, + 550, + 531, + 570 + ], + "spans": [ + { + "bbox": [ + 61, + 550, + 531, + 570 + ], + "type": "text", + "content": "Figure 2: Development history of INN [11]. Figures are borrowed from references: HiDeNN [12], HiDeNN-TD [13], C-HiDeNN [14], C-HiDeNN-TD [6]." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 581, + 532, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 581, + 532, + 724 + ], + "spans": [ + { + "bbox": [ + 60, + 581, + 532, + 724 + ], + "type": "text", + "content": "Instead of developing solvers solely based on machine learning concepts, a new class of Hierarchical Deep-learning Neural Networks (HiDeNN) has been developed recently. This network architecture incorporates principles from the finite element method (FEM) to construct their architecture [15, 12]. Originally designed to advance FEM as opposed to solve parameterized PDEs, this approach significantly enhances computational accuracy and efficiency for both linear and nonlinear problems compared to standard FEM [16]. HiDeNN was then enhanced by adding an additional hidden layer in the form of a nonlinear convolutional filter, formulating a new neural network architecture named Convolutional HiDeNN (C-HiDeNN) [6, 14]. C-HiDeNN mimics the structure of the generalized finite element method but leverages machine learning to optimize its hyperparameters to further improve accuracy and efficiency. Arbitrary orders of convergence have been observed for C-HiDeNN despite utilizing a linear finite element mesh [14]. Although these methods offers greater accuracy with fewer DoFs, like FEM, they still encounter computational challenges such as balancing memory usage against mesh resolution, which limits their efficiency in modeling ultra large-scale and high-dimensional problems. Therefore, it becomes necessary to employ model order reduction" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 111, + 221, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 111, + 221, + 122 + ], + "spans": [ + { + "bbox": [ + 61, + 111, + 221, + 122 + ], + "type": "text", + "content": "techniques to address these limitations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 123, + 545, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 123, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 60, + 123, + 545, + 264 + ], + "type": "text", + "content": "Model order reduction techniques have been widely used to tackle the ever-growing challenges from high-dimensional and large-scale problems. For example, proper generalized decomposition (PGD) [17, 18, 19] has been proposed to efficiently solve high-dimensional PDEs. Recently, tensor decomposition (TD) has been successfully leveraged within the HiDeNN framework. For example, Zhang showed that HiDeNN combined with TD (HiDeNN-TD) significantly improved the speed of HiDeNN while maintaining higher accuracy [13]. Li proposed C-HiDeNN combined with TD (C-HiDeNN-TD) for extremely large-scale nested topology optimization problems [20]. Recently, Park generalized the HiDeNN-family networks under the umbrella of Interpolating Neural Networks (INNs) and demonstrated that the network can be used for both data-driven learning and data-free (i.e., a priori) solving [11]. The development history of HiDeNN family networks and INN is summarized in Fig. 2. While INN clearly explains how to construct the network architecture, an efficient optimization scheme for solving ultra large-scale and high-dimensional problems remains underdeveloped. In this paper, ultra large-scale problems refer to problems on the zetta-scale " + }, + { + "bbox": [ + 60, + 123, + 545, + 264 + ], + "type": "inline_equation", + "content": "(10^{21})" + }, + { + "bbox": [ + 60, + 123, + 545, + 264 + ], + "type": "text", + "content": " in terms of DoFs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 60, + 266, + 531, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 266, + 531, + 386 + ], + "spans": [ + { + "bbox": [ + 60, + 266, + 531, + 386 + ], + "type": "text", + "content": "The demand for high-precision engineering simulations and efficient solution schemes highlights the need for innovative modeling approaches that swiftly solve large-scale problems while optimizing the design space. This research aims to fulfill this need by developing tensor-decomposition-based A Priori Surrogate (TAPS), a data-free predictive AI model, which aims to enhance high-resolution capabilities while simultaneously optimizing computational efficiency with a minimal memory footprint, low data storage needs, and fast prediction. The proposed comprehensive framework sets a foundation for scalable, adaptable, and future-proof solutions to counter the ever-growing complexity in simulation-driven advanced industries. TAPS is particularly well-suited for engineering challenges where: 1) the finite element method and other conventional methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 386, + 531, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 386, + 531, + 446 + ], + "spans": [ + { + "bbox": [ + 60, + 386, + 531, + 446 + ], + "type": "text", + "content": "This paper is structured as follows. We first introduce the formulation of TAPS in section 2. In section 3, we examine the numerical convergence of TAPS for both space-time (S-T) and space-parameter-time (S-P-T) problems (i.e., problems that are dependent on spatial, parametric, and temporal inputs). In section 4, TAPS is applied to large-scale additive manufacturing problems that are considered intractable with standard numerical algorithms. This application effectively demonstrates TAPS's capability to address all of the three identified challenges." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 462, + 110, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 462, + 110, + 475 + ], + "spans": [ + { + "bbox": [ + 61, + 462, + 110, + 475 + ], + "type": "text", + "content": "2. Theory" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 482, + 254, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 482, + 254, + 495 + ], + "spans": [ + { + "bbox": [ + 61, + 482, + 254, + 495 + ], + "type": "text", + "content": "2.1. Review of C-HiDeNN interpolation theory" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 497, + 531, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 497, + 531, + 580 + ], + "spans": [ + { + "bbox": [ + 60, + 497, + 531, + 580 + ], + "type": "text", + "content": "Leveraging the universal approximation theorem, multilayer perceptrons (MLPs) have been successfully applied as global basis functions in deep learning-based solvers [7]. However, as shown in Table 2, MLPs have a few potential caveats when approximating PDE solutions. To overcome these limitations, we leverage the Convolutional HiDeNN (C-HiDeNN) interpolation function, which leverages the merits of both locally supported finite element shape functions and the flexibility of machine learning. Note that C-HiDeNN also belongs to the INN category as shown in Fig. 2. C-HiDeNN maintains all the essential finite element approximation properties such as Kronecker delta and partition of unity [14]." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 103, + 608, + 490, + 666 + ], + "blocks": [ + { + "bbox": [ + 148, + 598, + 444, + 608 + ], + "lines": [ + { + "bbox": [ + 148, + 598, + 444, + 608 + ], + "spans": [ + { + "bbox": [ + 148, + 598, + 444, + 608 + ], + "type": "text", + "content": "Table 2: Comparison of MLP and C-HiDeNN as approximation functions of PDE solutions." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 103, + 608, + 490, + 666 + ], + "lines": [ + { + "bbox": [ + 103, + 608, + 490, + 666 + ], + "spans": [ + { + "bbox": [ + 103, + 608, + 490, + 666 + ], + "type": "table", + "html": "
MLPC-HiDeNN
Boundary/initial conditionPenalty term in the loss function [7]Automatic satisfaction [6]
Convergence and stabilityStochastic in nature and not guaranteedShown for different PDEs [6]
Numerical integrationQuasi-Monte Carlo integration [21]Gaussian integration [22]
InterpretabilityBlack-box modelInterpretable [11]
", + "image_path": "043303db4e5c82962bde585e2e5978667fa6324d753cd893b2e7c4471a16091e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 677, + 531, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 677, + 531, + 701 + ], + "spans": [ + { + "bbox": [ + 61, + 677, + 531, + 701 + ], + "type": "text", + "content": "We first review the C-HiDeNN formulation as illustrated in Fig. 3 (a) [14]. A scalar field " + }, + { + "bbox": [ + 61, + 677, + 531, + 701 + ], + "type": "inline_equation", + "content": "u(\\pmb{x})" + }, + { + "bbox": [ + 61, + 677, + 531, + 701 + ], + "type": "text", + "content": " defined in each element within a domain " + }, + { + "bbox": [ + 61, + 677, + 531, + 701 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}}" + }, + { + "bbox": [ + 61, + 677, + 531, + 701 + ], + "type": "text", + "content": " can be approximated using C-HiDeNN interpolation as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 181, + 707, + 531, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 707, + 531, + 734 + ], + "spans": [ + { + "bbox": [ + 181, + 707, + 531, + 734 + ], + "type": "interline_equation", + "content": "u _ {e} ^ {h} (\\boldsymbol {x}) = \\sum_ {i \\in A ^ {e}} N _ {i} (\\boldsymbol {x}) \\sum_ {j \\in A _ {s} ^ {i}} \\mathcal {W} _ {s, a, p, j} ^ {i} (\\boldsymbol {x}) u _ {j} = \\sum_ {k \\in A _ {s} ^ {e}} \\widetilde {N} _ {k} (\\boldsymbol {x}; s, a, p) u _ {k} \\tag {1}", + "image_path": "bfc7ed9b115c7244283bc7bd1bb21a1d5f6f4a7f42e6be7d33f9f30e7ab59b32.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 113, + 454, + 158 + ], + "blocks": [ + { + "bbox": [ + 113, + 113, + 454, + 158 + ], + "lines": [ + { + "bbox": [ + 113, + 113, + 454, + 158 + ], + "spans": [ + { + "bbox": [ + 113, + 113, + 454, + 158 + ], + "type": "image", + "image_path": "26d6d373a42682554486e271777c219d82e17016041bbe53dc8f96b1cfed8012.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 160, + 187, + 167 + ], + "lines": [ + { + "bbox": [ + 118, + 160, + 187, + 167 + ], + "spans": [ + { + "bbox": [ + 118, + 160, + 187, + 167 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 118, + 160, + 187, + 167 + ], + "type": "inline_equation", + "content": "A^e" + }, + { + "bbox": [ + 118, + 160, + 187, + 167 + ], + "type": "text", + "content": ": nodes at element " + }, + { + "bbox": [ + 118, + 160, + 187, + 167 + ], + "type": "inline_equation", + "content": "e" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 309, + 163, + 372, + 169 + ], + "lines": [ + { + "bbox": [ + 309, + 163, + 372, + 169 + ], + "spans": [ + { + "bbox": [ + 309, + 163, + 372, + 169 + ], + "type": "text", + "content": "- s: patch size, integer" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 309, + 171, + 373, + 177 + ], + "lines": [ + { + "bbox": [ + 309, + 171, + 373, + 177 + ], + "spans": [ + { + "bbox": [ + 309, + 171, + 373, + 177 + ], + "type": "text", + "content": "a: dilation parameter" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 112, + 191, + 286, + 306 + ], + "blocks": [ + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "lines": [ + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "spans": [ + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{s}}^{i}" + }, + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "type": "text", + "content": ": patch domain at node " + }, + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "type": "text", + "content": " with patch size " + }, + { + "bbox": [ + 119, + 168, + 241, + 174 + ], + "type": "inline_equation", + "content": "s" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 112, + 191, + 286, + 306 + ], + "lines": [ + { + "bbox": [ + 112, + 191, + 286, + 306 + ], + "spans": [ + { + "bbox": [ + 112, + 191, + 286, + 306 + ], + "type": "image", + "image_path": "788d964946bd243c8ce37085ed0bd577906f91cee22b4fe5ceb8bc3ee147c7e1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 315, + 231, + 324 + ], + "lines": [ + { + "bbox": [ + 113, + 315, + 231, + 324 + ], + "spans": [ + { + "bbox": [ + 113, + 315, + 231, + 324 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 113, + 315, + 231, + 324 + ], + "type": "inline_equation", + "content": "W_{a,p,j}^{i}" + }, + { + "bbox": [ + 113, + 315, + 231, + 324 + ], + "type": "text", + "content": ": convolution interpolant for node " + }, + { + "bbox": [ + 113, + 315, + 231, + 324 + ], + "type": "inline_equation", + "content": "j" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 113, + 324, + 245, + 331 + ], + "lines": [ + { + "bbox": [ + 113, + 324, + 245, + 331 + ], + "spans": [ + { + "bbox": [ + 113, + 324, + 245, + 331 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 113, + 324, + 245, + 331 + ], + "type": "inline_equation", + "content": "R_{i}(x)" + }, + { + "bbox": [ + 113, + 324, + 245, + 331 + ], + "type": "text", + "content": ": radial basis function centered at node " + }, + { + "bbox": [ + 113, + 324, + 245, + 331 + ], + "type": "inline_equation", + "content": "i" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 113, + 332, + 172, + 338 + ], + "lines": [ + { + "bbox": [ + 113, + 332, + 172, + 338 + ], + "spans": [ + { + "bbox": [ + 113, + 332, + 172, + 338 + ], + "type": "text", + "content": "1 " + }, + { + "bbox": [ + 113, + 332, + 172, + 338 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 113, + 332, + 172, + 338 + ], + "type": "text", + "content": " : moment matrix" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 113, + 339, + 223, + 347 + ], + "lines": [ + { + "bbox": [ + 113, + 339, + 223, + 347 + ], + "spans": [ + { + "bbox": [ + 113, + 339, + 223, + 347 + ], + "type": "text", + "content": "1 " + }, + { + "bbox": [ + 113, + 339, + 223, + 347 + ], + "type": "inline_equation", + "content": "\\cdot x^{A_s^i}" + }, + { + "bbox": [ + 113, + 339, + 223, + 347 + ], + "type": "text", + "content": " : nodal coordinates of nodes in " + }, + { + "bbox": [ + 113, + 339, + 223, + 347 + ], + "type": "inline_equation", + "content": "A_{s}^{i}" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 287, + 190, + 480, + 351 + ], + "blocks": [ + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "lines": [ + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "spans": [ + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "type": "inline_equation", + "content": "A_{s}^{e} = \\bigcup_{i\\in A^{e}}A_{s}^{i}" + }, + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "type": "text", + "content": ": patch nodes of element " + }, + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "type": "text", + "content": " with patch size " + }, + { + "bbox": [ + 119, + 175, + 278, + 184 + ], + "type": "inline_equation", + "content": "s" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 309, + 179, + 423, + 185 + ], + "lines": [ + { + "bbox": [ + 309, + 179, + 423, + 185 + ], + "spans": [ + { + "bbox": [ + 309, + 179, + 423, + 185 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 309, + 179, + 423, + 185 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 309, + 179, + 423, + 185 + ], + "type": "text", + "content": " : reproducing polynomial order,integer" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 287, + 190, + 480, + 351 + ], + "lines": [ + { + "bbox": [ + 287, + 190, + 480, + 351 + ], + "spans": [ + { + "bbox": [ + 287, + 190, + 480, + 351 + ], + "type": "image", + "image_path": "2bafb2a9d73b20b8974d709a877de33ff2d86e93482c01b687869081af82ca9e.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 365, + 531, + 386 + ], + "lines": [ + { + "bbox": [ + 61, + 365, + 531, + 386 + ], + "spans": [ + { + "bbox": [ + 61, + 365, + 531, + 386 + ], + "type": "text", + "content": "Figure 3: (a) Covolution patch in 1D C-HiDeNN shape function (b) Construction of convolution patch function (c) C-HiDeNN shape function as MLP with 3 hidden layers" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "spans": [ + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "u_{j}" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " is the nodal value and " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "u_{j} = u(\\pmb{x}_{j})" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "N_{i}" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " is the linear finite element shape function at node " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " centered in " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": "-th nodal patch; " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{s,a,p,j}^{i}" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " is the convolution patch function at node " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " that can be represented with a partially connected MLP as illustrated in Fig. 3 (b). The convolution patch functions are controlled by three hyperparameters: patch size " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " that controls nodal connectivity, dilation parameter " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " that normalizes distances between patch nodes, and reproducing order " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " that defines types/orders of activation functions to be reproduced by the patch functions. Due to the inherent local support nature of both " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "N_{i}" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{s,a,p,j}^{i}" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": ", the C-HiDeNN shape function " + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "inline_equation", + "content": "\\widetilde{N}_k(\\pmb{x};s,a,p)" + }, + { + "bbox": [ + 61, + 402, + 531, + 476 + ], + "type": "text", + "content": " is also locally supported." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 76, + 475, + 446, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 475, + 446, + 486 + ], + "spans": [ + { + "bbox": [ + 76, + 475, + 446, + 486 + ], + "type": "text", + "content": "Similar to standard finite element, the approximation for the solution field can be written as:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 233, + 491, + 531, + 523 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 491, + 531, + 523 + ], + "spans": [ + { + "bbox": [ + 233, + 491, + 531, + 523 + ], + "type": "interline_equation", + "content": "u ^ {h} (\\boldsymbol {x}) = \\sum_ {k} ^ {n n o d e} \\widetilde {N} _ {k} (\\boldsymbol {x}; s _ {k}, a _ {k}, p _ {k}) u _ {k} \\tag {2}", + "image_path": "49cfe79290b02e9e6803436e489e02a9a13c2c0eb602d965d0b2a8f2d1022319.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "spans": [ + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "inline_equation", + "content": "nnode" + }, + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "text", + "content": " is the total number of nodes and " + }, + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "text", + "content": " is the nodal index. It should be noted that the hyperparameters " + }, + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "inline_equation", + "content": "s, a, p" + }, + { + "bbox": [ + 61, + 528, + 531, + 575 + ], + "type": "text", + "content": " can vary across nodes since C-HiDeNN can optimize these hyperparameters like machine learning parameters, rendering an adaptable functional space without altering the number of global nodes or hidden layers. This clearly distinguishes C-HiDeNN from MLP, where the activation functions and network architectures are mostly fixed." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 61, + 575, + 530, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 575, + 530, + 598 + ], + "spans": [ + { + "bbox": [ + 61, + 575, + 530, + 598 + ], + "type": "text", + "content": "The C-HiDeNN shape function " + }, + { + "bbox": [ + 61, + 575, + 530, + 598 + ], + "type": "inline_equation", + "content": "\\widetilde{N}_k(\\pmb{x})" + }, + { + "bbox": [ + 61, + 575, + 530, + 598 + ], + "type": "text", + "content": " satisfies Kronecker-delta property at nodal positions [6] (hyperparameters " + }, + { + "bbox": [ + 61, + 575, + 530, + 598 + ], + "type": "inline_equation", + "content": "s, a, p" + }, + { + "bbox": [ + 61, + 575, + 530, + 598 + ], + "type": "text", + "content": " are dropped for brevity):" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 271, + 597, + 530, + 612 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 597, + 530, + 612 + ], + "spans": [ + { + "bbox": [ + 271, + 597, + 530, + 612 + ], + "type": "interline_equation", + "content": "\\widetilde {N} _ {k} \\left(\\boldsymbol {x} _ {l}\\right) = \\delta_ {k l} \\tag {3}", + "image_path": "295ddd836e79bc713015f9c58a7d2c6ffb9cb7a0b208645ae1e873293965bec6.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 61, + 614, + 224, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 614, + 224, + 625 + ], + "spans": [ + { + "bbox": [ + 61, + 614, + 224, + 625 + ], + "type": "text", + "content": "where the Kronecker delta is defined as:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 246, + 623, + 531, + 650 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 623, + 531, + 650 + ], + "spans": [ + { + "bbox": [ + 246, + 623, + 531, + 650 + ], + "type": "interline_equation", + "content": "\\delta_ {k l} = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} k \\neq l, \\\\ 1 & \\text {i f} k = l. \\end{array} \\right. \\tag {4}", + "image_path": "58230b63f91aab316085467bba07586972fef6aba1811c02d700384871cb156e.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 61, + 651, + 530, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 651, + 530, + 673 + ], + "spans": [ + { + "bbox": [ + 61, + 651, + 530, + 673 + ], + "type": "text", + "content": "Thus, at the Dirichlet boundary node " + }, + { + "bbox": [ + 61, + 651, + 530, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_b" + }, + { + "bbox": [ + 61, + 651, + 530, + 673 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 61, + 651, + 530, + 673 + ], + "type": "inline_equation", + "content": "u(\\mathbf{x}_b) = u_b" + }, + { + "bbox": [ + 61, + 651, + 530, + 673 + ], + "type": "text", + "content": ", C-HiDeNN automatically satisfies the Dirichlet boundary condition:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 238, + 673, + 531, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 673, + 531, + 704 + ], + "spans": [ + { + "bbox": [ + 238, + 673, + 531, + 704 + ], + "type": "interline_equation", + "content": "u ^ {h} \\left(\\boldsymbol {x} _ {b}\\right) = \\sum_ {k} ^ {n n o d e} \\widetilde {N} _ {k} \\left(\\boldsymbol {x} _ {b}\\right) u _ {k} = u _ {b} \\tag {5}", + "image_path": "842c666a38794f7be422c207d3fdb3897871159188f9f65baca25190b9f10be2.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "spans": [ + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "text", + "content": "Going forward, we will employ the C-HiDeNN shape function " + }, + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "inline_equation", + "content": "\\widetilde{N}_k(\\pmb{x})" + }, + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "text", + "content": " as the locally supported basis function for the interpolation." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 739, + 300, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 739, + 300, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 739, + 300, + 748 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 111, + 208, + 123 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 111, + 208, + 123 + ], + "spans": [ + { + "bbox": [ + 61, + 111, + 208, + 123 + ], + "type": "text", + "content": "2.2. Discrete Tensor decomposition" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 126, + 531, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 126, + 531, + 162 + ], + "spans": [ + { + "bbox": [ + 60, + 126, + 531, + 162 + ], + "type": "text", + "content": "Tensor decomposition is a mathematical technique used to break down a high-dimensional tensor, such as a 3D finite element solution, into a set of simpler components, making it easier to analyze, store, and process [23]. It generalizes matrix decomposition methods like singular value decomposition (SVD) to higher-order tensors." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "spans": [ + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "text", + "content": "Consider a cubic spatial domain " + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "inline_equation", + "content": "\\Omega_{x}" + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "text", + "content": " discretized with a regular Cartesian grid where each grid point (or node) stores a scalar value (see Fig. 4). The discrete nodal values can be represented as a 3rd order tensor " + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "inline_equation", + "content": "u_{JK}" + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "inline_equation", + "content": "I = 1,..,n_1;J = 1,\\dots,n_2;K = 1,\\dots,n_3" + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "text", + "content": ". The number of DoFs for this structured mesh is " + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "inline_equation", + "content": "n_1\\times n_2\\times n_3" + }, + { + "bbox": [ + 60, + 163, + 531, + 247 + ], + "type": "text", + "content": ". When high resolution is required for the analysis, as the case in AM simulations, the number of DoFs can be extremely large. To effectively reduce the DoFs, different discrete tensor decomposition methods can be used to project the original 3rd order tensor into lower order tensors. In this paper, we focus on CANDECOMP/PARAFAC (CP) decomposition, where the higher-order tensors are approximated using a finite sum of products of 1D vectors [23]:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 236, + 254, + 531, + 286 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 254, + 531, + 286 + ], + "spans": [ + { + "bbox": [ + 236, + 254, + 531, + 286 + ], + "type": "interline_equation", + "content": "u _ {I J K} \\approx u _ {I J K} ^ {T D} = \\sum_ {m = 1} ^ {M} u _ {I m} ^ {[ 1 ]} u _ {J m} ^ {[ 2 ]} u _ {K m} ^ {[ 3 ]} \\tag {6}", + "image_path": "e86f363bb7a81eae1c35edcbec092851a40b92859376773a56238be9207e674a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "spans": [ + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": " is defined as the total number of modes in CP decomposition; " + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "inline_equation", + "content": "u_{lm}^{[1]}" + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": " refers to the projected 1D vector in the first dimension and " + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": "-th mode; the superscript " + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "inline_equation", + "content": "[d]" + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": " represents the dimension index and " + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "inline_equation", + "content": "d = 1,2,3" + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": "; the 1st subscript " + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": " is the nodal index, and the 2nd subscript " + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 60, + 296, + 531, + 333 + ], + "type": "text", + "content": " refers to the modal index." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 112, + 362, + 259, + 489 + ], + "blocks": [ + { + "bbox": [ + 112, + 362, + 259, + 489 + ], + "lines": [ + { + "bbox": [ + 112, + 362, + 259, + 489 + ], + "spans": [ + { + "bbox": [ + 112, + 362, + 259, + 489 + ], + "type": "image", + "image_path": "65f7d6cba907050260e3f59100990b220e18b3e661510dbe74314d86ce119fce.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 177, + 497, + 190, + 509 + ], + "lines": [ + { + "bbox": [ + 177, + 497, + 190, + 509 + ], + "spans": [ + { + "bbox": [ + 177, + 497, + 190, + 509 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 156, + 523, + 436, + 534 + ], + "lines": [ + { + "bbox": [ + 156, + 523, + 436, + 534 + ], + "spans": [ + { + "bbox": [ + 156, + 523, + 436, + 534 + ], + "type": "text", + "content": "Figure 4: (a) 3D Cartesian mesh. (b) Nodal values can be treated as a 3rd order tensor." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 284, + 346, + 482, + 481 + ], + "blocks": [ + { + "bbox": [ + 284, + 346, + 482, + 481 + ], + "lines": [ + { + "bbox": [ + 284, + 346, + 482, + 481 + ], + "spans": [ + { + "bbox": [ + 284, + 346, + 482, + 481 + ], + "type": "image", + "image_path": "3b85eb658518d72411e3ec79246a5857219aa58d19ca589a83cb951c71802013.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 500, + 357, + 512 + ], + "lines": [ + { + "bbox": [ + 343, + 500, + 357, + 512 + ], + "spans": [ + { + "bbox": [ + 343, + 500, + 357, + 512 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "spans": [ + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "text", + "content": "As can be seen from Eq. 6, with CP decomposition, the total number of DoFs can be reduced from " + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "inline_equation", + "content": "n_1 \\times n_2 \\times n_3" + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "inline_equation", + "content": "M \\times (n_1 + n_2 + n_3)" + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "text", + "content": ". Assuming " + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "text", + "content": " does not increase when the mesh is refined along each dimension, then the solution matrix " + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "inline_equation", + "content": "u_{IJK}" + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "text", + "content": " will have cubic growth, whereas CP decomposition " + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "inline_equation", + "content": "\\sum_{m=1}^{M} u_{Im}^{[1]} u_{Jm}^{[2]} u_{Km}^{[3]}" + }, + { + "bbox": [ + 60, + 546, + 531, + 594 + ], + "type": "text", + "content": " only exhibits linear growth, as shown in Fig. 5 (a). This reduction is paramount to making large-scale simulation achievable." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "spans": [ + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": "As an extension of the previous case, we consider " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": " dimensional general time-dependent parametric problems where the independent variables " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "(x_{1},x_{2},\\ldots ,x_{D})" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": " can be classified into 3 different categories, namely, spatial variables " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "\\pmb{x}_s" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": ", parametric variables " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "\\pmb{x}_p" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": ", and temporal variable " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": ". Spatial variables " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "\\pmb{x}_s" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": " describe the spatial coordinates of the problem. Parametric variables " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "\\pmb{x}_p" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": " can represent any PDE coefficients, initial/boundary conditions, or geometry descriptors as extra-coordinates. The temporal variable " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": " represents time. Assuming the spatial domain " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}_s}" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": " is cubic, the parametric domain " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "\\Omega_{\\pmb{x}_p}" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": " is hypercubic and Cartesian grids are used for discretization, then the nodal solution to these problems can be written as a discrete " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": "-th order tensor " + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "inline_equation", + "content": "u_{I_1I_2,\\dots,I_D}" + }, + { + "bbox": [ + 60, + 594, + 531, + 690 + ], + "type": "text", + "content": ". Similarly, CP decomposition can be used to effectively decompose higher-order tensors into a finite sum of tensor products of 1D vectors." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 214, + 698, + 531, + 729 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 698, + 531, + 729 + ], + "spans": [ + { + "bbox": [ + 214, + 698, + 531, + 729 + ], + "type": "interline_equation", + "content": "u _ {I _ {1} I _ {2}, \\dots , I _ {D}} \\approx u _ {I _ {1} I _ {2}, \\dots , I _ {D}} ^ {T D} = \\sum_ {m = 1} ^ {M} u _ {I _ {1} m} ^ {[ 1 ]} u _ {I _ {2} m} ^ {[ 2 ]} \\dots u _ {I _ {D} m} ^ {[ D ]} \\tag {7}", + "image_path": "2b76570ff52e322aa151ab2d01b26bd55700698f7f22581a405d4ac31235ad8a.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 110, + 291, + 268 + ], + "blocks": [ + { + "bbox": [ + 112, + 110, + 291, + 268 + ], + "lines": [ + { + "bbox": [ + 112, + 110, + 291, + 268 + ], + "spans": [ + { + "bbox": [ + 112, + 110, + 291, + 268 + ], + "type": "image", + "image_path": "27d2162fec1e0e4eba57ca6fdf9b4a421edbc273be308e22965966bcd12bf083.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 269, + 224, + 278 + ], + "lines": [ + { + "bbox": [ + 211, + 269, + 224, + 278 + ], + "spans": [ + { + "bbox": [ + 211, + 269, + 224, + 278 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 122, + 293, + 471, + 304 + ], + "lines": [ + { + "bbox": [ + 122, + 293, + 471, + 304 + ], + "spans": [ + { + "bbox": [ + 122, + 293, + 471, + 304 + ], + "type": "text", + "content": "Figure 5: Comparison of number of DoFs, (a) in terms of mesh size " + }, + { + "bbox": [ + 122, + 293, + 471, + 304 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 122, + 293, + 471, + 304 + ], + "type": "text", + "content": ", (b) in terms of problem dimension " + }, + { + "bbox": [ + 122, + 293, + 471, + 304 + ], + "type": "inline_equation", + "content": "D" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 293, + 110, + 482, + 268 + ], + "blocks": [ + { + "bbox": [ + 293, + 110, + 482, + 268 + ], + "lines": [ + { + "bbox": [ + 293, + 110, + 482, + 268 + ], + "spans": [ + { + "bbox": [ + 293, + 110, + 482, + 268 + ], + "type": "image", + "image_path": "2356fbd6103d0f1224ab953d79b4dd1accc5dfa6c206f9f994a88a02796022c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 400, + 269, + 414, + 280 + ], + "lines": [ + { + "bbox": [ + 400, + 269, + 414, + 280 + ], + "spans": [ + { + "bbox": [ + 400, + 269, + 414, + 280 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "spans": [ + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "text", + "content": "If every dimension is discretized into " + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "text", + "content": " grid points, then a " + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "text", + "content": "-th order tensor will have DoFs of " + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "inline_equation", + "content": "n^D" + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "text", + "content": ", whereas CP decomposition only requires " + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "inline_equation", + "content": "M \\times D \\times n" + }, + { + "bbox": [ + 60, + 323, + 531, + 360 + ], + "type": "text", + "content": " DoFs. Consequently, CP decomposition can dramatically reduce the total DoFs of general high-dimensional parametric problems, as shown in Fig. 5 (b)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 371, + 188, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 371, + 188, + 383 + ], + "spans": [ + { + "bbox": [ + 61, + 371, + 188, + 383 + ], + "type": "text", + "content": "2.3. TD interpolation in TAPS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "spans": [ + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": "Assume that the " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": "-th order tensor " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "u_{I_1I_2,\\dots,I_D}" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": " represents a " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": "-input one-output continuous function " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "u(\\pmb{x})" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": " measured at a Cartesian grid discretized with " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "I_1, I_2, \\dots, I_D" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": " grid points in each input dimension. The discrete tensor decomposition " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "u_{I_1I_2,\\dots,I_D}^{TD}" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": " can only approximate the function " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "u(\\pmb{x})" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": " at these grid points. In this case, how can we measure the value of the function on an arbitrary input " + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 60, + 386, + 532, + 444 + ], + "type": "text", + "content": " with tensor decomposition? A natural answer is using C-HiDeNN interpolation functions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "spans": [ + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "text", + "content": "Similar to standard finite element shape functions, for a 3D spatial problem discretized with a Cartesian grid, a 3D C-HiDeNN interpolation function can be rewritten as a tensor product of one-dimensional C-HiDeNN interpolation functions (hyperparameters " + }, + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 445, + 531, + 483 + ], + "type": "text", + "content": " will be dropped from now on for brevity):" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 214, + 489, + 531, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 489, + 531, + 506 + ], + "spans": [ + { + "bbox": [ + 214, + 489, + 531, + 506 + ], + "type": "interline_equation", + "content": "\\widetilde {N} _ {k} \\left(x _ {1}, x _ {2}, x _ {3}\\right) = \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) \\tag {8}", + "image_path": "d4bdcf9536a28885f75ba497e0709c0aa67add7be58df316b5adee9c8e270d11.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 60, + 513, + 531, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 513, + 531, + 536 + ], + "spans": [ + { + "bbox": [ + 60, + 513, + 531, + 536 + ], + "type": "text", + "content": "where the superscript refers to the dimension of the 1D C-HiDeNN shape function. Therefore, we can rewrite Eq. 2 as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 197, + 546, + 531, + 571 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 546, + 531, + 571 + ], + "spans": [ + { + "bbox": [ + 197, + 546, + 531, + 571 + ], + "type": "interline_equation", + "content": "u ^ {h} \\left(\\boldsymbol {x} _ {s}\\right) = \\sum_ {I} \\sum_ {J} \\sum_ {K} \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) u _ {I J K} \\tag {9}", + "image_path": "7a2648b8416cf48a0862a7cd414488f41c8312b6904c2eb9ba16212e61367671.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 576, + 531, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 576, + 531, + 602 + ], + "spans": [ + { + "bbox": [ + 61, + 576, + 531, + 602 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 576, + 531, + 602 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_s = [x_1, x_2, x_3]" + }, + { + "bbox": [ + 61, + 576, + 531, + 602 + ], + "type": "text", + "content": " is the spatial variable. Plugging the CP decomposition form of the tensor " + }, + { + "bbox": [ + 61, + 576, + 531, + 602 + ], + "type": "inline_equation", + "content": "u_{IJK}^{TD}" + }, + { + "bbox": [ + 61, + 576, + 531, + 602 + ], + "type": "text", + "content": " into Eq. 6 into Eq. 9 and rearranging the terms, we have:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 161, + 609, + 531, + 641 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 609, + 531, + 641 + ], + "spans": [ + { + "bbox": [ + 161, + 609, + 531, + 641 + ], + "type": "interline_equation", + "content": "u ^ {T D} \\left(\\boldsymbol {x} _ {s}\\right) = \\sum_ {m = 1} ^ {M} \\left[ \\sum_ {I} \\widetilde {N} _ {I} ^ {[ 1 ]} \\left(x _ {1}\\right) u _ {I m} ^ {[ 1 ]} \\right] \\left[ \\sum_ {J} \\widetilde {N} _ {J} ^ {[ 2 ]} \\left(x _ {2}\\right) u _ {J m} ^ {[ 2 ]} \\right] \\left[ \\sum_ {K} \\widetilde {N} _ {K} ^ {[ 3 ]} \\left(x _ {3}\\right) u _ {K m} ^ {[ 3 ]} \\right] \\tag {10}", + "image_path": "3eaf36b2f65add23c007434e828372dc2c739c3a4cae5f057eede6b298198cc2.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 60, + 645, + 531, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 645, + 531, + 670 + ], + "spans": [ + { + "bbox": [ + 60, + 645, + 531, + 670 + ], + "type": "text", + "content": "Eq. 10 represents the TD interpolation (with C-HiDeNN) for a 3D spatial problem. Extending this framework to a general " + }, + { + "bbox": [ + 60, + 645, + 531, + 670 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 645, + 531, + 670 + ], + "type": "text", + "content": "-dimensional space-parameter-time (S-P-T) problem with independent variables defined in Eq. 11:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 223, + 682, + 531, + 697 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 682, + 531, + 697 + ], + "spans": [ + { + "bbox": [ + 223, + 682, + 531, + 697 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} = \\left( \\begin{array}{l l l l} x _ {1}, \\dots , x _ {S} & \\underbrace {x _ {S + 1}} _ {\\text {上}} & \\dots & x _ {P} \\end{array} , x _ {t}\\right) \\tag {11}", + "image_path": "216c6378c7e02132273fd396d64406c154b8a9b745b589affd537bb35c61797d.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 246, + 698, + 354, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 698, + 354, + 707 + ], + "spans": [ + { + "bbox": [ + 246, + 698, + 354, + 707 + ], + "type": "text", + "content": "spatial variables parametric variables" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 740, + 299, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 740, + 299, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 740, + 299, + 748 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 111, + 380, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 111, + 380, + 123 + ], + "spans": [ + { + "bbox": [ + 61, + 111, + 380, + 123 + ], + "type": "text", + "content": "Then the TD interpolation to the S-P-T solution field can be written as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 130, + 531, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 130, + 531, + 174 + ], + "spans": [ + { + "bbox": [ + 67, + 130, + 531, + 174 + ], + "type": "interline_equation", + "content": "u ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\underbrace {\\left[ \\sum_ {I _ {1}} \\widetilde {N} _ {I _ {1}} ^ {[ 1 ]} \\left(x _ {1}\\right) u _ {I _ {1} m} ^ {[ 1 ]} \\right] \\cdots \\left[ \\sum_ {I _ {S}} \\widetilde {N} _ {I _ {S}} ^ {[ S ]} \\left(x _ {I _ {S}}\\right) u _ {I _ {S} m} ^ {[ S ]} \\right]} _ {\\text {s p a t i a l}} \\underbrace {\\left[ \\sum_ {I _ {S + 1}} \\widetilde {N} _ {I _ {S + 1}} ^ {[ S + 1 ]} \\left(x _ {S + 1}\\right) u _ {I _ {S + 1} m} ^ {[ S + 1 ]} \\right] \\cdots \\left[ \\sum_ {P} \\widetilde {N} _ {I _ {P}} ^ {[ P ]} \\left(x _ {P}\\right) u _ {I _ {P} m} ^ {[ P ]} \\right]} _ {\\text {p a r a m e t r i c}} \\underbrace {\\left[ \\sum_ {I _ {D}} \\widetilde {N} _ {I _ {D}} ^ {[ D ]} (t) u _ {I _ {D}} ^ {[ D ]} \\right]} _ {\\text {t e m p o r a l}} \\tag {12}", + "image_path": "18ab3d2686f8cae986e5a3e3ca82f2a21d5836b3069fa44a2d1c92029694b446.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 180, + 292, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 180, + 292, + 193 + ], + "spans": [ + { + "bbox": [ + 61, + 180, + 292, + 193 + ], + "type": "text", + "content": "This can be further simplified using the product notation:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 213, + 200, + 531, + 234 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 200, + 531, + 234 + ], + "spans": [ + { + "bbox": [ + 213, + 200, + 531, + 234 + ], + "type": "interline_equation", + "content": "u ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\prod_ {d = 1} ^ {D} \\sum_ {I _ {d}} \\widetilde {N} _ {I _ {d}} ^ {[ d ]} \\left(x _ {d}\\right) u _ {I _ {d} m} ^ {[ d ]} \\tag {13}", + "image_path": "a2e52b63c9dbf211244d1515b6c4a7734bfa87921e17aa4c26c87c44d597b05e.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "spans": [ + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "inline_equation", + "content": "\\widetilde{N}_{I_d}^{[d]}(x_d)" + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "text", + "content": " refers to the 1D C-HiDeNN shape function in the " + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "text", + "content": "-th dimension; " + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "inline_equation", + "content": "u_{I_d m}^{[d]}" + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "text", + "content": " is the nodal solution for dimension " + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "text", + "content": " and mode " + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 60, + 243, + 531, + 269 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 280, + 255, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 280, + 255, + 293 + ], + "spans": [ + { + "bbox": [ + 61, + 280, + 255, + 293 + ], + "type": "text", + "content": "2.4. The General S-P-T Galerkin form of TAPS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 296, + 511, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 296, + 511, + 307 + ], + "spans": [ + { + "bbox": [ + 75, + 296, + 511, + 307 + ], + "type": "text", + "content": "Similar to FEM, TAPS adopts the weighted-sum formulation to solve PDEs. Consider a general S-P-T PDE:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 261, + 319, + 531, + 332 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 319, + 531, + 332 + ], + "spans": [ + { + "bbox": [ + 261, + 319, + 531, + 332 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (u (\\boldsymbol {x})) = f (\\boldsymbol {x}), \\tag {14}", + "image_path": "5dd1f815b0f0d6ce05c783498743769c58a8c3fe64da410116c95bde0cf3ea5e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "spans": [ + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "text", + "content": " is the differential operator; the independent variable vector " + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "inline_equation", + "content": "\\pmb{x} = (x_{s}, x_{p}, x_{t})" + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "inline_equation", + "content": "f(\\pmb{x})" + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "text", + "content": " is the forcing function. Table 3 lists different examples of operator " + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 61, + 338, + 530, + 362 + ], + "type": "text", + "content": " and corresponding dependent and independent variables." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 62, + 390, + 539, + 449 + ], + "blocks": [ + { + "bbox": [ + 164, + 380, + 428, + 390 + ], + "lines": [ + { + "bbox": [ + 164, + 380, + 428, + 390 + ], + "spans": [ + { + "bbox": [ + 164, + 380, + 428, + 390 + ], + "type": "text", + "content": "Table 3: Examples for differential operators, dependent and independent variables" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 62, + 390, + 539, + 449 + ], + "lines": [ + { + "bbox": [ + 62, + 390, + 539, + 449 + ], + "spans": [ + { + "bbox": [ + 62, + 390, + 539, + 449 + ], + "type": "table", + "html": "
PDEDifferential operator LDependent variablexsxpxt
∂2u/∂x12 + ∂2u/∂x22 + ... + ∂2u/∂x2D = f(x)∂2/∂x12 + ∂2/∂x22 + ... + ∂2/∂x2Du(x1, x2, ...,xD)--
μui, jj + (μ + λ)uj,ij + Fi = e(x12+x22+x32)μ(·)i,jj + (μ + λ)(·)j,ijui, i = 1, 2, 3(x1, x2, x3)(λ,μ)-
ρcp,du/dt + k(∂2u/∂x12 + ∂2u/∂x22 + ∂2u/∂x32) = Pe(x12+x22+x32)ρcp,du/dt + k(∂2/∂x12 + ∂2/∂x22 + ∂2/∂x32)u(x1, x2, x3)(ρ, cp, k, P)t
", + "image_path": "7caeaada43efeb0a507f555c9c0b240ec85768dcd8b9ddf7f5624cd32319ec5c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 76, + 460, + 418, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 460, + 418, + 471 + ], + "spans": [ + { + "bbox": [ + 76, + 460, + 418, + 471 + ], + "type": "text", + "content": "The weighted-sum residual form of the PDE with TD interpolation can be written as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 215, + 480, + 531, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 480, + 531, + 506 + ], + "spans": [ + { + "bbox": [ + 215, + 480, + 531, + 506 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} \\delta u ^ {T D} (\\boldsymbol {x}) \\left[ \\mathcal {L} \\left(u ^ {T D} (\\boldsymbol {x})\\right) - f (\\boldsymbol {x}) \\right] d \\Omega = 0 \\tag {15}", + "image_path": "86142d90d03cef39ede414943671a8caa0869ac0e8d886e003ff95b6ead14135.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "spans": [ + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "inline_equation", + "content": "u^{TD}" + }, + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "text", + "content": " is the approximation of the solution (i.e., trial function), " + }, + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "inline_equation", + "content": "\\delta u^{TD}" + }, + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "text", + "content": " is the test function, and " + }, + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "inline_equation", + "content": "d\\Omega = d\\Omega_{x_s}d\\Omega_{x_p}d\\Omega_{x_t}" + }, + { + "bbox": [ + 61, + 509, + 530, + 522 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 60, + 522, + 531, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 522, + 531, + 592 + ], + "spans": [ + { + "bbox": [ + 60, + 522, + 531, + 592 + ], + "type": "text", + "content": "Depending on how " + }, + { + "bbox": [ + 60, + 522, + 531, + 592 + ], + "type": "inline_equation", + "content": "\\delta u^{TD}" + }, + { + "bbox": [ + 60, + 522, + 531, + 592 + ], + "type": "text", + "content": " is adopted, different mathematical formulations can be obtained. If the test function resides in the same function space as the trial function, it becomes the Galerkin formulation. When the test function space differs from the trial function space, it becomes the Petrov-Galerkin formulation [22]. If the Dirac delta function is used for the test function, then Eq. 15 corresponds to the collocation method [24]. In this paper, we employ the Galerkin formulation. However, the proposed framework is versatile and can be extended to accommodate other formulations as well." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 60, + 594, + 531, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 594, + 531, + 665 + ], + "spans": [ + { + "bbox": [ + 60, + 594, + 531, + 665 + ], + "type": "text", + "content": "In Eq. 12, the entire S-P-T domain is approximated using TD interpolation. However, this approach may result in a large system of equations due to the rapid increase in the number of TD modes for certain cases. For example, if the forcing function represents a moving source function in Eq. 14), this complexity may arise. To maintain computational efficiency, we can partition the temporal domain into a series of time slabs. As illustrated in Fig. 6(a), the S-P-T continuum is divided into S-P-T slabs " + }, + { + "bbox": [ + 60, + 594, + 531, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_1,\\mathcal{T}_2,\\dots ,\\mathcal{T}_T" + }, + { + "bbox": [ + 60, + 594, + 531, + 665 + ], + "type": "text", + "content": " . The solution within each time slab is then approximated individually using the TD interpolation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 60, + 666, + 531, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 666, + 531, + 702 + ], + "spans": [ + { + "bbox": [ + 60, + 666, + 531, + 702 + ], + "type": "text", + "content": "Between consecutive S-P-T slabs, either a continuous or discontinuous formulation can be employed. As shown in Fig. 6(b) for the continuous Galerkin scheme, the continuity of the solution in time is enforced by imposing the solution at the end of slab " + }, + { + "bbox": [ + 60, + 666, + 531, + 702 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{i-1}" + }, + { + "bbox": [ + 60, + 666, + 531, + 702 + ], + "type": "text", + "content": " as the initial condition of " + }, + { + "bbox": [ + 60, + 666, + 531, + 702 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_i" + }, + { + "bbox": [ + 60, + 666, + 531, + 702 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 223, + 711, + 531, + 727 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 711, + 531, + 727 + ], + "spans": [ + { + "bbox": [ + 223, + 711, + 531, + 727 + ], + "type": "interline_equation", + "content": "{ } ^ { [ \\mathcal { T } + 1 ] } u ( \\boldsymbol { x } _ { s } , \\boldsymbol { x } _ { p } , 0 ) = { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } , \\boldsymbol { x } _ { p } , x _ { t } ^ { m a x } ) \\tag {16}", + "image_path": "3c2e43dbebb9daba3688af22ec37ecb4d7500b7352d31753f4e28373423f66c8.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 740, + 299, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 740, + 299, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 740, + 299, + 748 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 111, + 531, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 111, + 531, + 136 + ], + "spans": [ + { + "bbox": [ + 61, + 111, + 531, + 136 + ], + "type": "text", + "content": "Discontinuous Galerkin method can be used when a discontinuity is allowed between S-P-T slabs, as illustrated in Fig. 6(c). Discontinuity in time can be modeled using the jump operator " + }, + { + "bbox": [ + 61, + 111, + 531, + 136 + ], + "type": "inline_equation", + "content": "\\llbracket \\dots \\rrbracket" + }, + { + "bbox": [ + 61, + 111, + 531, + 136 + ], + "type": "text", + "content": " [25]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 173, + 144, + 531, + 164 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 144, + 531, + 164 + ], + "spans": [ + { + "bbox": [ + 173, + 144, + 531, + 164 + ], + "type": "interline_equation", + "content": "\\llbracket u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, t\\right) \\rrbracket = \\lim _ {\\epsilon \\rightarrow 0 ^ {+}} \\left(^ {\\mathcal {T} + 1} u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, \\epsilon\\right) - ^ {\\mathcal {T}} u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t} ^ {\\max } - \\epsilon\\right)\\right) \\tag {17}", + "image_path": "0c0195a00540cb3bf109305d5fb451bbe379f9f22a7f1b85b10592b6f1b26c16.jpg" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 87, + 180, + 243, + 284 + ], + "blocks": [ + { + "bbox": [ + 87, + 180, + 243, + 284 + ], + "lines": [ + { + "bbox": [ + 87, + 180, + 243, + 284 + ], + "spans": [ + { + "bbox": [ + 87, + 180, + 243, + 284 + ], + "type": "image", + "image_path": "6c2d52bb0f68ce1b2b23d130b8706969e5dd55fc54afb40e93be9e7a63dfc8a8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 150, + 286, + 161, + 296 + ], + "lines": [ + { + "bbox": [ + 150, + 286, + 161, + 296 + ], + "spans": [ + { + "bbox": [ + 150, + 286, + 161, + 296 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 243, + 179, + 367, + 271 + ], + "blocks": [ + { + "bbox": [ + 243, + 179, + 367, + 271 + ], + "lines": [ + { + "bbox": [ + 243, + 179, + 367, + 271 + ], + "spans": [ + { + "bbox": [ + 243, + 179, + 367, + 271 + ], + "type": "image", + "image_path": "2211d1927532087afa8c9ab6bf9f0e46ebf0f9fcc68e95f91179e9e07f54486f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 285, + 310, + 296 + ], + "lines": [ + { + "bbox": [ + 299, + 285, + 310, + 296 + ], + "spans": [ + { + "bbox": [ + 299, + 285, + 310, + 296 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 308, + 531, + 327 + ], + "lines": [ + { + "bbox": [ + 61, + 308, + 531, + 327 + ], + "spans": [ + { + "bbox": [ + 61, + 308, + 531, + 327 + ], + "type": "text", + "content": "Figure 6: (a) Multiple S-P-T slabs along the temporal dimension. (b) Continuous Galerkin: the solution is continuous across different S-P-T slabs. (c) Discontinuous Galerkin: jumps are allowed across the slab boundaries" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 381, + 179, + 505, + 271 + ], + "blocks": [ + { + "bbox": [ + 381, + 179, + 505, + 271 + ], + "lines": [ + { + "bbox": [ + 381, + 179, + 505, + 271 + ], + "spans": [ + { + "bbox": [ + 381, + 179, + 505, + 271 + ], + "type": "image", + "image_path": "f9df96ab1e94c3f197c125cc26bdae791e3769bfcd4aa7fddc2a762fde3a337b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 438, + 286, + 449, + 296 + ], + "lines": [ + { + "bbox": [ + 438, + 286, + 449, + 296 + ], + "spans": [ + { + "bbox": [ + 438, + 286, + 449, + 296 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 337, + 531, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 337, + 531, + 373 + ], + "spans": [ + { + "bbox": [ + 61, + 337, + 531, + 373 + ], + "type": "text", + "content": "Keeping in mind that this approach can be applied generally to a range of engineering problems, we will demonstrate an example of the Galerkin formulation using a single space-parameter-time partition (S-P-T) slab in the remainder of this section. For illustrative purposes, the transient heat transfer equation will be utilized:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 220, + 380, + 531, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 380, + 531, + 393 + ], + "spans": [ + { + "bbox": [ + 220, + 380, + 531, + 393 + ], + "type": "interline_equation", + "content": "\\rho c _ {p} \\nabla_ {x _ {t}} u - \\nabla_ {\\boldsymbol {x} _ {s}} \\cdot k \\nabla_ {\\boldsymbol {x} _ {s}} u = f (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}) \\tag {18}", + "image_path": "121736df2595f3224598b727c8507c7b14a0a69c6722ac181695a58825c7f8e5.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 400, + 530, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 400, + 530, + 423 + ], + "spans": [ + { + "bbox": [ + 61, + 400, + 530, + 423 + ], + "type": "text", + "content": "as we focus on the example of modeling the laser powder bed fusion (LPBF) process in additive manufacturing (AM). In an LPBF simulation, we adopt the following time-dependent moving heat source function:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 161, + 429, + 531, + 463 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 429, + 531, + 463 + ], + "spans": [ + { + "bbox": [ + 161, + 429, + 531, + 463 + ], + "type": "interline_equation", + "content": "f \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\frac {2 \\eta P}{\\pi r ^ {2} d _ {\\nu}} \\exp \\left(- \\frac {2 \\left((x - x _ {0} (t)) ^ {2} + (y - y _ {0} (t)) ^ {2}\\right)}{r ^ {2}}\\right) \\cdot \\mathbf {1} _ {\\left(x _ {3} \\geq d _ {\\nu}\\right)} \\tag {19}", + "image_path": "b884ab7f47eb2ab953e6ac700afe4526438c7953f5553e2fc74b45b168828a0a.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "spans": [ + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": "Summarizing the independent variables in Eq. 18, there are spatial variables " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_s = (x_1, x_2, x_3)" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": "; parametric variables " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_p = (k, \\rho, c_p, \\eta, P, r, d_v)" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": "; and a temporal variable " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "x_t = t" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": ". Among the parametric variables, " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " is conductivity; " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " is the material density; " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "c_p" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " is heat capacity; " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " is the material absorptivity; " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " represents laser power; " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " is the standard deviation that characterizes the width of the heat source; " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "d_v" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " is the penetration depth of the heat source. In Eq. 19, " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "[x_0(t), y_0(t)]" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " represents the center of the moving heat source; " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{(x_3 \\geq d_v)}" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " is the indicator function where " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{(x_3 \\geq d_v)} = 1" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "x_3 \\geq d_v" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{(x_3 \\geq d_v)} = 0" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "inline_equation", + "content": "x_3 < d_v" + }, + { + "bbox": [ + 61, + 469, + 531, + 551 + ], + "type": "text", + "content": ". Note that the discretization of the material parameters, in particular, in a random field setting, has been previously proposed by Liu et al. [26, 27]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 61, + 553, + 530, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 553, + 530, + 576 + ], + "spans": [ + { + "bbox": [ + 61, + 553, + 530, + 576 + ], + "type": "text", + "content": "As shown in the schematic below, we classify the boundary surfaces into 2 categories: the Dirichlet boundary surface " + }, + { + "bbox": [ + 61, + 553, + 530, + 576 + ], + "type": "inline_equation", + "content": "\\Gamma_{D}" + }, + { + "bbox": [ + 61, + 553, + 530, + 576 + ], + "type": "text", + "content": " and the Neumann boundary surface " + }, + { + "bbox": [ + 61, + 553, + 530, + 576 + ], + "type": "inline_equation", + "content": "\\Gamma_{N}" + }, + { + "bbox": [ + 61, + 553, + 530, + 576 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 61, + 576, + 531, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 576, + 531, + 611 + ], + "spans": [ + { + "bbox": [ + 61, + 576, + 531, + 611 + ], + "type": "text", + "content": "A uniform ambient temperature is used as the initial condition. The bottom of the powder bed is subject to the Dirichlet boundary condition and the Neumann boundary conditions are prescribed on the other surfaces. The initial and boundary conditions are:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 236, + 613, + 315, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 613, + 315, + 624 + ], + "spans": [ + { + "bbox": [ + 236, + 613, + 315, + 624 + ], + "type": "interline_equation", + "content": "u (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, 0) | _ {\\Omega} = u _ {0},", + "image_path": "4780699941f48ed622c9dc594a6387e86809e70653b7ccda7100991196fd833e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 236, + 626, + 530, + 640 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 626, + 530, + 640 + ], + "spans": [ + { + "bbox": [ + 236, + 626, + 530, + 640 + ], + "type": "interline_equation", + "content": "\\left. u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) \\right| _ {\\Gamma_ {D}} = u _ {0}, \\tag {20}", + "image_path": "f87028d5954edd3bf3a3bd90ca3f21b0a37fa1e24a8f443b8e35ccee6feead39.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 236, + 642, + 355, + 655 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 642, + 355, + 655 + ], + "spans": [ + { + "bbox": [ + 236, + 642, + 355, + 655 + ], + "type": "interline_equation", + "content": "\\boldsymbol {n} \\cdot \\boldsymbol {q} | _ {\\Gamma_ {N}} = q _ {\\text {c o n v}} + q _ {\\text {r a d}} + q _ {\\text {e v a p}}", + "image_path": "f062c9e634cd5ae0bb9acc8714a2f98360a99e12636e133d41fb746e7105791c.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "spans": [ + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "inline_equation", + "content": "u_{0}" + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "text", + "content": " is the ambient temperature, " + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "inline_equation", + "content": "q_{conv}" + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "text", + "content": " accounts for free convection, " + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "inline_equation", + "content": "q_{rad}" + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "text", + "content": " accounts for radiation, and " + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "inline_equation", + "content": "q_{evap}" + }, + { + "bbox": [ + 61, + 658, + 531, + 682 + ], + "type": "text", + "content": " imposes evaporative cooling when any material surface reaches the evaporation temperature [28]. Each flux is defined as:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 234, + 691, + 340, + 703 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 691, + 340, + 703 + ], + "spans": [ + { + "bbox": [ + 234, + 691, + 340, + 703 + ], + "type": "interline_equation", + "content": "q _ {c o n v} = h _ {c o n v} [ u (x, t) - u _ {0} ],", + "image_path": "443ce4eba0f6dc0c99596fa2efb800af5593d2e111fec3b3a4777d6acdf943e3.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 235, + 705, + 530, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 705, + 530, + 719 + ], + "spans": [ + { + "bbox": [ + 235, + 705, + 530, + 719 + ], + "type": "interline_equation", + "content": "q _ {r a d} = - \\sigma_ {S B} \\epsilon \\left(u ^ {4} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) - u _ {0} ^ {4}\\right), \\tag {21}", + "image_path": "0e82a5c6d9e2756db10568be6ffab3861b55523fb118976e8de8b681c9bca3cf.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 235, + 722, + 318, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 722, + 318, + 735 + ], + "spans": [ + { + "bbox": [ + 235, + 722, + 318, + 735 + ], + "type": "interline_equation", + "content": "q _ {e v a p} = - m _ {e v a p} L _ {e v a p}.", + "image_path": "2f518732887fd82f432149602affcd4d525bcadbceb4b370fede13fc298c6cf3.jpg" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "spans": [ + { + "bbox": [ + 293, + 740, + 300, + 748 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 207, + 114, + 385, + 220 + ], + "blocks": [ + { + "bbox": [ + 207, + 114, + 385, + 220 + ], + "lines": [ + { + "bbox": [ + 207, + 114, + 385, + 220 + ], + "spans": [ + { + "bbox": [ + 207, + 114, + 385, + 220 + ], + "type": "image", + "image_path": "a36972000cd774cc5ab556e0900c90f4c6eea0a3a42c11d6c7322779ba30c687.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 167, + 231, + 425, + 241 + ], + "lines": [ + { + "bbox": [ + 167, + 231, + 425, + 241 + ], + "spans": [ + { + "bbox": [ + 167, + 231, + 425, + 241 + ], + "type": "text", + "content": "Figure 7: Transient heat transfer with initial condition and boundary conditions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "spans": [ + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "inline_equation", + "content": "\\sigma_{SB}" + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "content": " is the Stefan-Boltzmann constant; " + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "content": " is the material emissivity; " + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "inline_equation", + "content": "u_0" + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "content": " is the ambient temperature; " + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "inline_equation", + "content": "h_{conv}" + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "content": " is the convection coefficient of the surrounding gas, " + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "inline_equation", + "content": "m_{evap}" + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "content": " is the mass evaporation flux and " + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "inline_equation", + "content": "L_{evap}" + }, + { + "bbox": [ + 61, + 261, + 531, + 309 + ], + "type": "text", + "content": " is the heat of evaporation. In the following numerical examples, we only consider the free convection term in the Neumann boundary condition. The solution to Eq. 18 is approximated using TD interpolation function:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 206, + 317, + 531, + 349 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 317, + 531, + 349 + ], + "spans": [ + { + "bbox": [ + 206, + 317, + 531, + 349 + ], + "type": "interline_equation", + "content": "u ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) \\tag {22}", + "image_path": "510d3c85b34ccf42b968db87b2dc61e5a968979d6d9ee0f5bb108c984c52059b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 357, + 531, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 357, + 531, + 393 + ], + "spans": [ + { + "bbox": [ + 61, + 357, + 531, + 393 + ], + "type": "text", + "content": "Here a general notation is employed to represent different types of components in Eq. 22. For example, the spatial component " + }, + { + "bbox": [ + 61, + 357, + 531, + 393 + ], + "type": "inline_equation", + "content": "u_{\\boldsymbol{x}_s}^{(m)}(\\boldsymbol{x}_s)" + }, + { + "bbox": [ + 61, + 357, + 531, + 393 + ], + "type": "text", + "content": " is equivalent to " + }, + { + "bbox": [ + 61, + 357, + 531, + 393 + ], + "type": "inline_equation", + "content": "u_{x_1}^{(m)}(x_1)u_{x_2}^{(m)}(x_2)u_{x_3}^{(m)}(x_3)" + }, + { + "bbox": [ + 61, + 357, + 531, + 393 + ], + "type": "text", + "content": ". The corresponding test function can be obtained using the variational principle:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 410, + 531, + 443 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 410, + 531, + 443 + ], + "spans": [ + { + "bbox": [ + 83, + 410, + 531, + 443 + ], + "type": "interline_equation", + "content": "\\delta u ^ {T D} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\sum_ {m = 1} ^ {M} \\left[ \\delta u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) \\delta u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + u _ {\\boldsymbol {x} _ {s}} ^ {(m)} \\left(\\boldsymbol {x} _ {s}\\right) u _ {\\boldsymbol {x} _ {p}} ^ {(m)} \\left(\\boldsymbol {x} _ {p}\\right) \\delta u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) \\right] \\tag {23}", + "image_path": "d04c9dfbd6fb40636267c6489da8d2b6f53e22a434eca91a1ca5341806ec0fcf.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 450, + 499, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 450, + 499, + 463 + ], + "spans": [ + { + "bbox": [ + 61, + 450, + 499, + 463 + ], + "type": "text", + "content": "Plugging the trial and test functions, the S-P-T Galerkin form of Eq. 18 can be obtained by following Eq. 15:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 196, + 470, + 531, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 470, + 531, + 496 + ], + "spans": [ + { + "bbox": [ + 196, + 470, + 531, + 496 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} \\delta u ^ {T D} \\left[ \\rho c _ {p} \\nabla_ {x _ {t}} u ^ {T D} - \\nabla_ {x _ {s}} \\cdot k \\nabla_ {x _ {s}} u ^ {T D} - f \\right] d \\Omega = 0 \\tag {24}", + "image_path": "27c9b9d37a59318116c0189817ddcf4d426935b9529d8ffcf8d94821ff8726ba.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 499, + 530, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 499, + 530, + 522 + ], + "spans": [ + { + "bbox": [ + 61, + 499, + 530, + 522 + ], + "type": "text", + "content": "Using integration by parts on the diffusion term, we get the corresponding general S-P-T Galerkin weak form in TAPS formulation:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 540, + 531, + 569 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 540, + 531, + 569 + ], + "spans": [ + { + "bbox": [ + 66, + 540, + 531, + 569 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} \\delta u ^ {T D} \\rho c _ {p} \\nabla_ {x _ {t}} u ^ {T D} d \\Omega + \\int_ {\\Omega} \\nabla_ {\\boldsymbol {x} _ {s}} \\delta u ^ {T D} \\cdot k \\nabla_ {\\boldsymbol {x} _ {s}} u ^ {T D} d \\Omega - \\int_ {\\partial \\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, t} \\delta u ^ {T D} \\boldsymbol {n} \\cdot \\boldsymbol {q} | _ {\\Gamma_ {N}} d s d \\Omega_ {\\boldsymbol {x} _ {p}} d \\Omega_ {t} - \\int_ {\\Omega} \\delta u ^ {T D} f (\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}) d \\Omega = 0 \\tag {25}", + "image_path": "0075aeaf6cca885996621046020cbd1c77f74ed6eae9c69767afa526cd023f66.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 576, + 268, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 576, + 268, + 587 + ], + "spans": [ + { + "bbox": [ + 61, + 576, + 268, + 587 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 576, + 268, + 587 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 61, + 576, + 268, + 587 + ], + "type": "text", + "content": " is the heat flux on the Neumann boundary." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 599, + 180, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 599, + 180, + 611 + ], + "spans": [ + { + "bbox": [ + 61, + 599, + 180, + 611 + ], + "type": "text", + "content": "2.5. Discretized matrix form" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "spans": [ + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "text", + "content": "The S-P-T Galerkin weak form shown in Eq. 25 is nonlinear in nature due to the tensor product structure in TD interpolation, necessitating efficient solution schemes. To illustrate the detailed solution approach for the general S-P-T weak form, we simplify the governing equation Eq. 18 by considering a one-dimensional spatial problem where " + }, + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "inline_equation", + "content": "x_{s} = x" + }, + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "text", + "content": ". We assume that the product of density and specific heat capacity " + }, + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "inline_equation", + "content": "\\rho c_{p}" + }, + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "text", + "content": " is equal to 1. Additionally, the forcing term is solely dependent on " + }, + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 61, + 613, + 531, + 673 + ], + "type": "text", + "content": ". Therefore, the simplified governing equation for this example is given by:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 250, + 681, + 531, + 705 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 681, + 531, + 705 + ], + "spans": [ + { + "bbox": [ + 250, + 681, + 531, + 705 + ], + "type": "interline_equation", + "content": "\\frac {\\partial u}{\\partial t} - \\frac {\\partial u}{\\partial x} \\cdot k \\frac {\\partial x}{\\partial x} = f (x) \\tag {26}", + "image_path": "f3f7794d4b1f511a476b118ab520af24a3d418751eb4b82caa0df37700b24280.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "spans": [ + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "text", + "content": "subject to homogeneous boundary conditions and initial conditions. This equation has 3 independent variables (" + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "inline_equation", + "content": "D = 3" + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "text", + "content": "), i.e., spatial variable " + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "inline_equation", + "content": "x_{s} = x_{1} = x" + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "text", + "content": ", parametric variable " + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "inline_equation", + "content": "x_{p} = x_{2} = k" + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "text", + "content": " and temporal variable " + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "inline_equation", + "content": "x_{t} = x_{3} = t" + }, + { + "bbox": [ + 61, + 708, + 531, + 733 + ], + "type": "text", + "content": ". The S-P-T" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 303, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 303, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 303, + 748 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 60, + 111, + 531, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 111, + 531, + 136 + ], + "spans": [ + { + "bbox": [ + 60, + 111, + 531, + 136 + ], + "type": "text", + "content": "Galerkin weak form of this problem can be written as follows according to Eq. 25 (the superscripts \"TD\" for both trial and test functions are omitted for brevity)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 192, + 143, + 531, + 169 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 143, + 531, + 169 + ], + "spans": [ + { + "bbox": [ + 192, + 143, + 531, + 169 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} \\delta u \\nabla_ {t} u d \\Omega + \\int_ {\\Omega} \\nabla_ {x} \\delta u \\cdot k \\nabla_ {x} u d \\Omega - \\int_ {\\Omega} \\delta u f d \\Omega = 0 \\tag {27}", + "image_path": "dbfc73f7e6cb30bc3b1ff26752803cc253e565c63170aa9d1a505aec26fde8ef.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 172, + 370, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 172, + 370, + 185 + ], + "spans": [ + { + "bbox": [ + 61, + 172, + 370, + 185 + ], + "type": "text", + "content": "The corresponding trial and test functions can be obtained using Eqs. 22-23:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 226, + 192, + 531, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 192, + 531, + 224 + ], + "spans": [ + { + "bbox": [ + 226, + 192, + 531, + 224 + ], + "type": "interline_equation", + "content": "u (x, k, t) = \\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t) \\tag {28}", + "image_path": "f98896853f42e61200a7ed8271432ba859d822dd2ac2059074917201aeacecd4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 114, + 232, + 531, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 232, + 531, + 281 + ], + "spans": [ + { + "bbox": [ + 114, + 232, + 531, + 281 + ], + "type": "interline_equation", + "content": "\\delta u (x, k, t) = \\underbrace {\\sum_ {m = 1} ^ {M} \\delta u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\\text {s p a t i a l v a r i a t i o n}} + \\underbrace {\\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) \\delta u _ {k} ^ {(m)} (k) u _ {t} ^ {(m)} (t)} _ {\\text {p a r a m e t r i c v a r i a t i o n}} + \\underbrace {\\sum_ {m = 1} ^ {M} u _ {x} ^ {(m)} (x) u _ {k} ^ {(m)} (k) \\delta u _ {t} ^ {(m)} (t)} _ {\\text {t e m p o r a l v a r i a t i o n}} \\tag {29}", + "image_path": "a1b8e9c50d38db24fd87e1a643e8bc202074d77880e788370527c8ee1e181d56.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "spans": [ + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "text", + "content": "As shown in Eq. 29, the test function is further split into " + }, + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "text", + "content": " variational terms for a general " + }, + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "text", + "content": " dimensional problem (in the current example, " + }, + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "inline_equation", + "content": "D = 3" + }, + { + "bbox": [ + 60, + 285, + 531, + 321 + ], + "type": "text", + "content": "). As an example, we first plug Eq. 28 and the spatial variation term of Eq. 29 into the Galerkin weak form in Eq. 27 to obtain the S-P-T weak form terms corresponding to spatial variation:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 150, + 329, + 442, + 375 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 329, + 442, + 375 + ], + "spans": [ + { + "bbox": [ + 150, + 329, + 442, + 375 + ], + "type": "interline_equation", + "content": "\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\left[ \\nabla \\delta u _ {x} ^ {(m)} (x) \\nabla u _ {x} ^ {(n)} (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) k u _ {k} ^ {(n)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) u _ {t} ^ {(n)} (t) d t \\right]} _ {\\text {d i f f u s i o n t e r m}} +", + "image_path": "1631e568cf76f755a995c303cd8aadd671a1fe046ecd4536bcbb2227090f0a43.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 155, + 377, + 531, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 377, + 531, + 424 + ], + "spans": [ + { + "bbox": [ + 155, + 377, + 531, + 424 + ], + "type": "interline_equation", + "content": "\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\left[ \\delta u _ {x} ^ {(m)} (x) u _ {x} ^ {(n)} (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) u _ {k} ^ {(n)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) \\nabla_ {t} u _ {t} ^ {(n)} (t) d t \\right]} _ {\\text {t i m e d e r i v a t i v e t e r m}} - \\tag {30}", + "image_path": "a2e65bdbb019ea2bd49aacb1c34071c2a67d11b645226f471a8cc7bf4e0dda71.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 199, + 425, + 393, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 425, + 393, + 471 + ], + "spans": [ + { + "bbox": [ + 199, + 425, + 393, + 471 + ], + "type": "interline_equation", + "content": "\\underbrace {\\int_ {\\Omega} \\sum_ {m = 1} ^ {M} \\left[ \\delta u _ {x} ^ {(m)} (x) f (x) d x \\right] \\cdot \\left[ u _ {k} ^ {(m)} (k) d k \\right] \\cdot \\left[ u _ {t} ^ {(m)} (t) d t \\right]} _ {\\text {f o r c i n g t e r m}}", + "image_path": "200e944ecb9a12948443abdf482c6009a2431b4b8b208dfd2e997da50f353ad7.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 476, + 384, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 476, + 384, + 487 + ], + "spans": [ + { + "bbox": [ + 61, + 476, + 384, + 487 + ], + "type": "text", + "content": "We use 1D C-HiDeNN shape functions to approximate each univariate function:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 214, + 507, + 371, + 524 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 507, + 371, + 524 + ], + "spans": [ + { + "bbox": [ + 214, + 507, + 371, + 524 + ], + "type": "interline_equation", + "content": "u _ {d} ^ {(n)} (x _ {d}) = \\widetilde {N} _ {n _ {d} ^ {\\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\\prime} n} ^ {[ d ]} \\quad (\\text {n o s u m o n} d)", + "image_path": "c2e33388cf4eff2cfecbd23aa72b77c80f3c2433581d947423a327c19d842950.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 202, + 526, + 531, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 526, + 531, + 544 + ], + "spans": [ + { + "bbox": [ + 202, + 526, + 531, + 544 + ], + "type": "interline_equation", + "content": "\\delta u _ {d} ^ {(m)} \\left(x _ {d}\\right) = \\widetilde {N} _ {n _ {d}} ^ {[ d ]} \\left(x _ {d}\\right) \\delta u _ {n _ {d} m} ^ {[ d ]} \\quad (\\text {n o s u m o n} d) \\tag {31}", + "image_path": "7941905e0df1d4b2af4263592fe2ecf1aa7ed89fb9abe6f6c67bb8b0be136701.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "spans": [ + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "text", + "content": "where Einstein summation is used. The free index " + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "text", + "content": " refers to dimension and " + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "inline_equation", + "content": "d = x,k" + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "text", + "content": ". The gradient of the interpolated variable can be computed using the shape function derivative " + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "inline_equation", + "content": "\\widetilde{B}_{n_d}^{[d]}(x_d) = \\frac{d\\widetilde{N}_{n_d}^{[d]}(x_d)}{dx_d}" + }, + { + "bbox": [ + 61, + 550, + 531, + 580 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 207, + 596, + 378, + 614 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 596, + 378, + 614 + ], + "spans": [ + { + "bbox": [ + 207, + 596, + 378, + 614 + ], + "type": "interline_equation", + "content": "\\nabla_ {x _ {d}} u _ {d} ^ {(n)} (x _ {d}) = \\widetilde {B} _ {n _ {d} ^ {\\prime}} ^ {[ d ]} (x _ {d}) u _ {n _ {d} ^ {\\prime} n} ^ {[ d ]} \\quad (\\text {n o s u m o n} d)", + "image_path": "6228caa736b34f1a766b1c32d5292f854237e8521a4f50f7ab19126515009330.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 195, + 616, + 530, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 616, + 530, + 633 + ], + "spans": [ + { + "bbox": [ + 195, + 616, + 530, + 633 + ], + "type": "interline_equation", + "content": "\\nabla_ {x _ {d}} \\delta u _ {d} ^ {(m)} (x _ {d}) = \\widetilde {B} _ {n _ {d}} ^ {[ d ]} (x _ {d}) \\delta u _ {n _ {d} m} ^ {[ d ]} \\quad (\\text {n o s u m o n} d) \\tag {32}", + "image_path": "43c0b7c65e4bba26c746c9a922247c0398fa273e4791579a9699590c69d1bac1.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 61, + 640, + 351, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 640, + 351, + 651 + ], + "spans": [ + { + "bbox": [ + 61, + 640, + 351, + 651 + ], + "type": "text", + "content": "Plugging Eq. 31 - 32 into Eq. 30, the diffusion term can be rewritten as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 110, + 661, + 531, + 705 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 661, + 531, + 705 + ], + "spans": [ + { + "bbox": [ + 110, + 661, + 531, + 705 + ], + "type": "interline_equation", + "content": "\\sum_ {m = 1} ^ {M} \\sum_ {n = 1} ^ {M} \\underbrace {\\int_ {\\Omega_ {x}} \\widetilde {B} _ {n _ {x}} (x) \\delta u _ {n _ {x} m} ^ {[ x ]} \\widetilde {B} _ {n _ {x} ^ {\\prime}} (x) u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} d x} _ {\\text {s p a t i a l t e r m}} \\cdot \\underbrace {\\int_ {\\Omega_ {k}} \\widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \\widetilde {N} _ {n _ {k} ^ {\\prime}} (k) u _ {n _ {k} ^ {\\prime} n} ^ {[ k ]} d k} _ {\\text {p a r a m e t r i c t e r m}} \\cdot \\underbrace {\\int_ {\\Omega_ {t}} \\widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \\widetilde {N} _ {n _ {t} ^ {\\prime}} (t) u _ {n _ {t} ^ {\\prime} n} ^ {[ t ]} d t} _ {\\text {t e m p o r a l t e r m}} \\tag {33}", + "image_path": "af36e8b78f8671a9d8dbb5b6d0a5894167bc9a3b14b2f2e6073c308d17cc95a0.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 60, + 708, + 531, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 708, + 531, + 731 + ], + "spans": [ + { + "bbox": [ + 60, + 708, + 531, + 731 + ], + "type": "text", + "content": "As can be readily seen from Eq. 33, after doing 1D integration of each term, the parametric and temporal terms can be treated as coefficient matrices:" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 214, + 128, + 358, + 169 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 128, + 358, + 169 + ], + "spans": [ + { + "bbox": [ + 214, + 128, + 358, + 169 + ], + "type": "interline_equation", + "content": "C _ {m n} ^ {[ k ]} = \\underbrace {\\int_ {\\Omega_ {k}} \\widetilde {N} _ {n _ {k}} (k) u _ {n _ {k} m} ^ {[ k ]} k \\widetilde {N} _ {n _ {k} ^ {\\prime}} (k) u _ {n _ {k} ^ {\\prime} n} ^ {[ k ]} d k} _ {\\text {p a r a m e t r i c t e r m}}", + "image_path": "c48d2e14614aa736de092b464d98e4b7c011a3e23c358ce4e3e04756f9406bdc.jpg" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 228, + 171, + 531, + 211 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 171, + 531, + 211 + ], + "spans": [ + { + "bbox": [ + 228, + 171, + 531, + 211 + ], + "type": "interline_equation", + "content": "C _ {m n} ^ {[ t ]} = \\underbrace {\\int_ {\\Omega_ {t}} \\widetilde {N} _ {n _ {t}} (t) u _ {n _ {t} m} ^ {[ t ]} \\widetilde {N} _ {n _ {t} ^ {\\prime}} (t) u _ {n _ {t} ^ {\\prime} n} ^ {[ t ]} d t} _ {\\text {t e m p o r a l t e r m}} \\tag {34}", + "image_path": "6ac465c63dfc67960586a79eb1aa82ad47e1316f3b86609bc249f380c60fe7ee.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 220, + 531, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 220, + 531, + 243 + ], + "spans": [ + { + "bbox": [ + 61, + 220, + 531, + 243 + ], + "type": "text", + "content": "as the only free indices are " + }, + { + "bbox": [ + 61, + 220, + 531, + 243 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 61, + 220, + 531, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 220, + 531, + 243 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 61, + 220, + 531, + 243 + ], + "type": "text", + "content": ". Substituting the coefficient matrices and rearranging different terms in Eq. 33, we have:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 195, + 252, + 531, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 252, + 531, + 285 + ], + "spans": [ + { + "bbox": [ + 195, + 252, + 531, + 285 + ], + "type": "interline_equation", + "content": "\\sum_ {m = 1} ^ {M} \\delta u _ {n _ {x} m} ^ {[ x ]} \\sum_ {n = 1} ^ {M} \\left[ \\int_ {\\Omega_ {x}} \\widetilde {B} _ {n _ {x}} (x) \\widetilde {B} _ {n _ {x} ^ {\\prime}} (x) d x \\right] \\cdot C _ {m n} ^ {[ k ]} C _ {m n} ^ {[ t ]} \\cdot u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} \\tag {35}", + "image_path": "12677f4cd4093cc99589a613f6ce0b437602b37dec61882d8dff98d389a1e840.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "spans": [ + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "text", + "content": "Like standard FEM, we can define " + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "inline_equation", + "content": "\\int_{x}\\widetilde{B}_{n_x}(x)\\widetilde{B}_{n_x'}(x)dx" + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "text", + "content": " as the 1D stiffness matrix " + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "inline_equation", + "content": "K_{n_x n_x'}^{[x]}" + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "text", + "content": " dimension in Eq. 35. We let " + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "inline_equation", + "content": "C_{mn}^{[x]} = C_{mn}^{[k]}C_{mn}^{[t]}" + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "text", + "content": " with no summation on " + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "inline_equation", + "content": "(m,n)" + }, + { + "bbox": [ + 61, + 289, + 531, + 318 + ], + "type": "text", + "content": ". Furthermore, let us define the following 4-th order tensor:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 256, + 327, + 531, + 344 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 327, + 531, + 344 + ], + "spans": [ + { + "bbox": [ + 256, + 327, + 531, + 344 + ], + "type": "interline_equation", + "content": "A _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} = K _ {n _ {x} n _ {x} ^ {\\prime}} ^ {[ x ] ^ {\\prime}} C _ {m n} ^ {[ x ]} \\tag {36}", + "image_path": "a018defcc9f90fd14ccd98acbd00e8f58264429bc1da13c7878a25904266e95c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "spans": [ + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "inline_equation", + "content": "A_{n_s n_s'mn}^{[x]}" + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "text", + "content": " is a function of solution vectors " + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "inline_equation", + "content": "u_{n_k m}^{[k]}" + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "inline_equation", + "content": "u_{n_m m}^{[t]}" + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "text", + "content": " since the coefficient matrix " + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "inline_equation", + "content": "C_{mn}^{[x]}" + }, + { + "bbox": [ + 61, + 349, + 531, + 398 + ], + "type": "text", + "content": " depends on these solution vectors as shown in Eq. 34. This dependency reflects the interconnected nature of the variables across different dimensions in the S-P-T framework, highlighting how the spatial, parameter, and temporal components influence each other through the coefficients. As a result, Eq. 33 can be further simplified as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 261, + 405, + 531, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 405, + 531, + 423 + ], + "spans": [ + { + "bbox": [ + 261, + 405, + 531, + 423 + ], + "type": "interline_equation", + "content": "\\delta u _ {n _ {x} m} ^ {[ x ]} A _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]} \\tag {37}", + "image_path": "2850ea393a41fa50b8726a0787ed58a81109d8f74bcacd2cf3ae869fd55d92b6.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "spans": [ + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": "where the summation signs are neglected since " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": " become dummy variables. The 4-th order tensor " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "A_{n_x n_x' mn}^{[x]}" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": " can be reshaped as a 2nd order tensor " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "\\mathbb{A}_{IJ}^{[x]}" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": ": the indices " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "n_x" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": " are combined into a single composite index " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": ", and the indices " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "n_x'" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": " are combined into a single composite index " + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 61, + 431, + 531, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 266, + 481, + 530, + 497 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 481, + 530, + 497 + ], + "spans": [ + { + "bbox": [ + 266, + 481, + 530, + 497 + ], + "type": "interline_equation", + "content": "A _ {n _ {x} n _ {x} ^ {\\prime} m n} ^ {[ x ]} = \\mathbb {A} _ {I J} ^ {[ x ]} \\tag {38}", + "image_path": "88a6dfc3df4ae1b1e46ae11bb2c9bb454322138eaa5dae837daeb486544ab148.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 501, + 204, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 501, + 204, + 512 + ], + "spans": [ + { + "bbox": [ + 61, + 501, + 204, + 512 + ], + "type": "text", + "content": "Define the following vectorization:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 240, + 531, + 330, + 549 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 531, + 330, + 549 + ], + "spans": [ + { + "bbox": [ + 240, + 531, + 330, + 549 + ], + "type": "interline_equation", + "content": "\\delta \\mathbb {U} _ {I} ^ {[ x ]} = \\left[ \\operatorname {v e c} \\left(\\delta u _ {n _ {x} m} ^ {[ x ]}\\right) \\right] _ {I}", + "image_path": "1ff792e01f6a4782e6a7a960c755bc1261ea125e23cd82a65e43e7b723d1f98f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 253, + 550, + 530, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 550, + 530, + 567 + ], + "spans": [ + { + "bbox": [ + 253, + 550, + 530, + 567 + ], + "type": "interline_equation", + "content": "\\mathbb {U} _ {J} ^ {[ x ]} = \\left[ \\operatorname {v e c} \\left(u _ {n _ {x} ^ {\\prime} n} ^ {[ x ]}\\right) \\right] _ {J} \\tag {39}", + "image_path": "2070e61fd49ddc5fc7f11d25203116c3b06771ac17d03dde98df5bdaae2bcd76.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 575, + 204, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 575, + 204, + 587 + ], + "spans": [ + { + "bbox": [ + 61, + 575, + 204, + 587 + ], + "type": "text", + "content": "As a result, Eq. 37 is equivalent to:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 267, + 597, + 530, + 611 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 597, + 530, + 611 + ], + "spans": [ + { + "bbox": [ + 267, + 597, + 530, + 611 + ], + "type": "interline_equation", + "content": "\\delta \\mathbb {U} ^ {[ x ] T} \\mathbb {A} ^ {[ x ]} \\mathbb {U} ^ {[ x ]} \\tag {40}", + "image_path": "455e449627117c21f40ecb4a6ab5bd0b4fee5a46f3b2c0e54b88fc6f4f3fa290.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "spans": [ + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "text", + "content": "Following the same procedure, we can obtain matrix forms corresponding to the time derivative term " + }, + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "inline_equation", + "content": "\\delta \\mathbb{U}^{[x]^T}\\mathbb{B}^{[x]}\\mathbb{U}^{[x]}" + }, + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "text", + "content": ", and the forcing term " + }, + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "inline_equation", + "content": "\\delta \\mathbb{U}^{[x]^T}\\mathbb{Q}^{[x]}" + }, + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "text", + "content": " for the spatial variational part of Eq. 30. Similar structures can also be obtained for the parametric and temporal variational parts of the test function in Eq. 29. Denoting " + }, + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "inline_equation", + "content": "\\mathbb{K}^{[d]} = \\mathbb{A}^{[d]} + \\mathbb{B}^{[d]}" + }, + { + "bbox": [ + 60, + 615, + 531, + 665 + ], + "type": "text", + "content": ", the matrix form of the generalized S-P-T Galerkin form in Eq. 27 can be written as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 118, + 671, + 530, + 702 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 671, + 530, + 702 + ], + "spans": [ + { + "bbox": [ + 118, + 671, + 530, + 702 + ], + "type": "interline_equation", + "content": "\\underbrace {\\delta \\mathbb {U} ^ {[ x ] ^ {T}} \\mathbb {K} ^ {[ x ]} \\mathbb {U} ^ {[ x ]} - \\delta \\mathbb {U} ^ {[ x ] ^ {T}} \\mathbb {Q} ^ {[ x ]}} _ {\\text {s p a t i a l v a r i a t i o n a l p a r t}} + \\underbrace {\\delta \\mathbb {U} ^ {[ k ] ^ {T}} \\mathbb {K} ^ {[ k ]} \\mathbb {U} ^ {[ k ]} - \\delta \\mathbb {U} ^ {[ k ] ^ {T}} \\mathbb {Q} ^ {[ k ]}} _ {\\text {p a r a m e t r i c v a r i a t i o n a l p a r t}} + \\underbrace {\\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\mathbb {K} ^ {[ t ]} \\mathbb {U} ^ {[ t ]} - \\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\mathbb {Q} ^ {[ t ]}} _ {\\text {t e m p o r a l v a r i a t i o n a l p a r t}} = 0 \\tag {41}", + "image_path": "1ffca6770318f49f4d362fee97c27f8a0a54ef98fbc6d78dd5736e0864301f88.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 60, + 708, + 531, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 708, + 531, + 732 + ], + "spans": [ + { + "bbox": [ + 60, + 708, + 531, + 732 + ], + "type": "text", + "content": "Eq. 41 is equivalent to the following nonlinear system of equations. Note that the nonlinearity comes from the fact that " + }, + { + "bbox": [ + 60, + 708, + 531, + 732 + ], + "type": "inline_equation", + "content": "\\mathbb{K}^{[d]}" + }, + { + "bbox": [ + 60, + 708, + 531, + 732 + ], + "type": "text", + "content": " is solution dependent:" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 129, + 531, + 169 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 129, + 531, + 169 + ], + "spans": [ + { + "bbox": [ + 78, + 129, + 531, + 169 + ], + "type": "interline_equation", + "content": "\\left[ \\delta \\mathbb {U} ^ {[ x ] ^ {T}}, \\delta \\mathbb {U} ^ {[ k ] ^ {T}}, \\delta \\mathbb {U} ^ {[ t ] ^ {T}} \\right] \\left\\{\\left[ \\begin{array}{c c c} \\mathbb {K} ^ {[ x ]} (\\mathbb {U} ^ {[ k ]}, \\mathbb {U} ^ {[ t ]}) & 0 & 0 \\\\ 0 & \\mathbb {K} ^ {[ k ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ t ]}) & 0 \\\\ 0 & 0 & \\mathbb {K} ^ {[ t ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ k ]}) \\end{array} \\right] \\left[ \\begin{array}{l} \\mathbb {U} ^ {[ x ]} \\\\ \\mathbb {U} ^ {[ k ]} \\\\ \\mathbb {U} ^ {[ t ]} \\end{array} \\right] - \\left[ \\begin{array}{l} \\mathbb {Q} ^ {[ x ]} (\\mathbb {U} ^ {[ k ]}, \\mathbb {U} ^ {[ t ]}) \\\\ \\mathbb {Q} ^ {[ k ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ t ]}) \\\\ \\mathbb {Q} ^ {[ t ]} (\\mathbb {U} ^ {[ x ]}, \\mathbb {U} ^ {[ k ]}) \\end{array} \\right] \\right\\} = 0 \\tag {42}", + "image_path": "b3c36a8388f62333cee2a3ef19cf1719f149c01e357cf8dd2259290f51072a49.jpg" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 177, + 531, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 177, + 531, + 251 + ], + "spans": [ + { + "bbox": [ + 60, + 177, + 531, + 251 + ], + "type": "text", + "content": "where we can treat the solution vector " + }, + { + "bbox": [ + 60, + 177, + 531, + 251 + ], + "type": "inline_equation", + "content": "\\left[\\mathbb{U}^{[x]^T},\\mathbb{U}^{[k]^T},\\mathbb{U}^{[t]^T}\\right]" + }, + { + "bbox": [ + 60, + 177, + 531, + 251 + ], + "type": "text", + "content": " as generalized DoFs like standard FEM. There are many ways to solve Eq. 42. For example, standard linearization schemes such as Newton's method have been used [29]. However, this method may suffer from ill-conditioning since the mismatch of scales for different dimensions can be significant. In this paper, we use the concept of subspace iteration to efficiently approximate the solution by iterating in the subspace of the test function space until a convergence criteria is met [19]. A similar counterpart has been widely adopted as the gold standard in discrete tensor decomposition [23]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 263, + 263, + 275 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 263, + 263, + 275 + ], + "spans": [ + { + "bbox": [ + 61, + 263, + 263, + 275 + ], + "type": "text", + "content": "2.6. Solution scheme of TAPS: subspace iteration" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "spans": [ + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": "For subspace iteration in " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": "-th dimension, only the solution matrix " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "\\mathbb{U}^{[d]}" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": " is treated as unknown while all other functions are considered as known constants. Consequently, the variations of the univariate functions other than " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": "-th dimension will vanish. From Eq. 42, it can be seen that this will lead to a linear system of equations for the unknowns in the " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": "-th dimension. The updated solution matrix " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "\\mathbb{U}^{[d]}" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": " from this process is then used in the next subspace iteration for dimension " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "d + 1" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": " (when " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "d = D" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": ", we come back to the first dimension " + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "inline_equation", + "content": "d = 1" + }, + { + "bbox": [ + 60, + 277, + 531, + 349 + ], + "type": "text", + "content": "). The complete solution scheme for subspace iteration is shown in Algorithm 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 360, + 290, + 372 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 360, + 290, + 372 + ], + "spans": [ + { + "bbox": [ + 63, + 360, + 290, + 372 + ], + "type": "text", + "content": "Algorithm 1 TAPS solution scheme (subspace iteration)" + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "spans": [ + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "text", + "content": "1: Initialize solution vector " + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "inline_equation", + "content": "\\mathbb{U}^{[x_1][0]}" + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "text", + "content": " ..., " + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "inline_equation", + "content": "\\mathbb{U}^{[x_D][0]}" + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "text", + "content": " with random values and compute " + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "inline_equation", + "content": "\\mathbb{K}^{[x_1][0]}" + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 374, + 465, + 387 + ], + "type": "inline_equation", + "content": "\\mathbb{Q}^{[x_1][0]}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 374, + 325, + 481 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 388, + 180, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 388, + 180, + 400 + ], + "spans": [ + { + "bbox": [ + 67, + 388, + 180, + 400 + ], + "type": "text", + "content": "2: for iter = 0 to iter_max do" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 400, + 167, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 400, + 167, + 410 + ], + "spans": [ + { + "bbox": [ + 67, + 400, + 167, + 410 + ], + "type": "text", + "content": "3: for " + }, + { + "bbox": [ + 67, + 400, + 167, + 410 + ], + "type": "inline_equation", + "content": "d = 1" + }, + { + "bbox": [ + 67, + 400, + 167, + 410 + ], + "type": "text", + "content": " to D do" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 411, + 283, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 411, + 283, + 422 + ], + "spans": [ + { + "bbox": [ + 67, + 411, + 283, + 422 + ], + "type": "text", + "content": "4: Update iteration number " + }, + { + "bbox": [ + 67, + 411, + 283, + 422 + ], + "type": "inline_equation", + "content": "\\mathcal{K} = iter\\times D + d" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 422, + 325, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 422, + 325, + 434 + ], + "spans": [ + { + "bbox": [ + 67, + 422, + 325, + 434 + ], + "type": "text", + "content": "5: Solve TD linear system " + }, + { + "bbox": [ + 67, + 422, + 325, + 434 + ], + "type": "inline_equation", + "content": "\\mathbb{K}^{[x_d][\\mathcal{K} - 1]}\\mathbb{U}^{[x_d][\\mathcal{K}]} = \\mathbb{Q}^{[x_d][\\mathcal{K} - 1]}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 434, + 320, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 434, + 320, + 447 + ], + "spans": [ + { + "bbox": [ + 67, + 434, + 320, + 447 + ], + "type": "text", + "content": "6: Update matrices " + }, + { + "bbox": [ + 67, + 434, + 320, + 447 + ], + "type": "inline_equation", + "content": "\\mathbb{K}^{[x_{d + 1}][\\mathcal{K}]}" + }, + { + "bbox": [ + 67, + 434, + 320, + 447 + ], + "type": "text", + "content": " and force vector " + }, + { + "bbox": [ + 67, + 434, + 320, + 447 + ], + "type": "inline_equation", + "content": "\\mathbb{Q}^{[x_{d + 1}][\\mathcal{K}]}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 448, + 127, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 448, + 127, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 448, + 127, + 458 + ], + "type": "text", + "content": "7: end for" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 459, + 174, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 459, + 174, + 471 + ], + "spans": [ + { + "bbox": [ + 67, + 459, + 174, + 471 + ], + "type": "text", + "content": "8: Check convergence" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 471, + 112, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 112, + 481 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 112, + 481 + ], + "type": "text", + "content": "9: end for" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "spans": [ + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "text", + "content": "To illustrate the details of the subspace iteration algorithm, we consider the " + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{K}" + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "text", + "content": "-th subspace iteration (which is on spatial variable " + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "text", + "content": "). Here, we assume that the parametric and temporal solutions have been updated from the previous " + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "inline_equation", + "content": "(\\mathcal{K} - 1)" + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "text", + "content": "-th iteration, leaving the spatial solution as unknown to be solved in " + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "inline_equation", + "content": "\\mathcal{K}" + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "text", + "content": "-th iteration. Moreover, instead of the full variation form of the test function as in Eq. 42, we only consider the subspace " + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 60, + 497, + 531, + 557 + ], + "type": "text", + "content": " of the test function by setting the parametric and temporal variational parts as 0. As a result, we have:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 158, + 566, + 530, + 584 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 566, + 530, + 584 + ], + "spans": [ + { + "bbox": [ + 158, + 566, + 530, + 584 + ], + "type": "interline_equation", + "content": "\\mathbb {K} ^ {[ x ] [ \\mathcal {K} - 1 ]} \\left(\\mathbb {U} ^ {[ k ] [ \\mathcal {K} - 1 ]}, \\mathbb {U} ^ {[ t ] [ \\mathcal {K} - 1 ]}\\right) \\mathbb {U} ^ {[ x ] [ \\mathcal {K} ]} = \\mathbb {Q} ^ {[ x ] [ \\mathcal {K} - 1 ]} \\left(\\mathbb {U} ^ {[ k ] [ \\mathcal {K} - 1 ]}, \\mathbb {U} ^ {[ t ] [ \\mathcal {K} - 1 ]}\\right) \\tag {43}", + "image_path": "64d1ac02f489cfb26fbbdb049f3cb75bdc193bcd709aec42ebcd8aff2a91a9b3.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "spans": [ + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": "which is a linear system of equations with unknown " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "\\mathbb{U}^{[x][\\mathcal{K}]}" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": ". This is a general Sylvester equation which can be solved using many efficient solution schemes [30, 31]. In this paper, sparse direct solvers based on fast diagonalization/complex Schur decomposition methods are adopted [32]. The computational complexity of the sparse direct solver is " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "O(M^3 + M^2 n_d + C_c(n_d))" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": "-th dimension subspace iteration, where " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": " is the total number of modes; " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "n_d" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": " is the number of grid points for " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": "-th dimension; " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "C_c(n_d)" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": " refers to the computational cost of the banded sparse mass/stiffness matrix for " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": "-th dimension with a shape of " + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "inline_equation", + "content": "(n_d \\times n_d)" + }, + { + "bbox": [ + 60, + 589, + 531, + 661 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "spans": [ + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "content": "Once " + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "inline_equation", + "content": "\\mathbb{U}^{[x][\\mathcal{K}]}" + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "content": " is obtained, we then update matrix " + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "inline_equation", + "content": "\\mathbb{K}^{[k][\\mathcal{K}]}(\\mathbb{U}^{[x][\\mathcal{K}]},\\mathbb{U}^{[t][\\mathcal{K}]})" + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "content": " and forcing vector " + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "inline_equation", + "content": "\\mathbb{Q}^{[k][\\mathcal{K}]}(\\mathbb{U}^{[x][\\mathcal{K}]},\\mathbb{U}^{[t][\\mathcal{K}]})" + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "content": ". In the next iteration (for dimension " + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "content": "), we treat " + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "inline_equation", + "content": "\\mathbb{U}^{[k][\\mathcal{K} + 1]}" + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "content": " as the only unknown. Subspace iteration will continue unless the relative change of all solution matrices (for example, " + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 60, + 660, + 531, + 697 + ], + "type": "text", + "content": " norm) is within the tolerance." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 111, + 183, + 122 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 111, + 183, + 122 + ], + "spans": [ + { + "bbox": [ + 61, + 111, + 183, + 122 + ], + "type": "text", + "content": "2.7. Error estimates of TAPS" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 126, + 531, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 126, + 531, + 161 + ], + "spans": [ + { + "bbox": [ + 61, + 126, + 531, + 161 + ], + "type": "text", + "content": "Since the TAPS solution is based on the C-HiDeNN-TD approximation and the generalized Galerkin formulation, we can have the following theoretical results on the error bounds, as demonstrated in our previous work on C-HiDeNN [6]:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 191, + 161, + 530, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 161, + 530, + 174 + ], + "spans": [ + { + "bbox": [ + 191, + 161, + 530, + 174 + ], + "type": "interline_equation", + "content": "\\left\\| u ^ {\\mathrm {C} - \\mathrm {H i D e N N}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\leq \\left\\| u ^ {\\mathrm {T A P S}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\leq \\left\\| u ^ {\\mathrm {F E M}} - u ^ {\\mathrm {e x}} \\right\\| _ {E} \\tag {44}", + "image_path": "ed6e0b362bd77ee3e09eb64dc8b2577b95d3d53dc5f5668730c8cb50d347d5fe.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "spans": [ + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "inline_equation", + "content": "\\| \\cdot \\|_E" + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "text", + "content": " denotes the energy norm, " + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "inline_equation", + "content": "u^{\\mathrm{ex}}" + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "text", + "content": " denotes the exact solution, " + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "inline_equation", + "content": "u^{\\mathrm{C - HiDeNN}}" + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "text", + "content": " denotes the solution obtained by the full C-HiDeNN method without tensor decomposition, " + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "inline_equation", + "content": "u^{\\mathrm{TAPS}}" + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "text", + "content": " denotes the TAPS solution with a sufficient number of modes, " + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "inline_equation", + "content": "u^{\\mathrm{FEM}}" + }, + { + "bbox": [ + 61, + 180, + 531, + 264 + ], + "type": "text", + "content": " denotes the FEM solution. The proof of the above results is based on the fact that the full C-HiDeNN approximation can provide a larger function space and therefore more accurate solutions than conventional FEM [6]. The subspace iteration can be considered as a local (directional) version of the Galerkin formulation and is expected to enable an optimized solution for the tensor decomposition that will converge to the Galerkin-based full C-HiDeNN method." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 282, + 108, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 282, + 108, + 293 + ], + "spans": [ + { + "bbox": [ + 61, + 282, + 108, + 293 + ], + "type": "text", + "content": "3. Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 303, + 254, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 303, + 254, + 316 + ], + "spans": [ + { + "bbox": [ + 61, + 303, + 254, + 316 + ], + "type": "text", + "content": "3.1. Convergence study for moving heat source" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "spans": [ + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "text", + "content": "In this section, we first analyze the convergence of the TAPS solver for a space-time (S-T) transient heat transfer problem. A single NVIDIA RTX A6000 GPU is used for all the following analyses. In Eq. 18, we let " + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "inline_equation", + "content": "\\rho c_{p} = 1" + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "text", + "content": ", and replace the heat source as shown in Eq. 45. In this example, we have the spatial variable " + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "inline_equation", + "content": "x_{s} = (x,y,z)" + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "text", + "content": " and the temporal variable " + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "inline_equation", + "content": "x_{t} = t" + }, + { + "bbox": [ + 61, + 318, + 531, + 365 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 158, + 375, + 530, + 439 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 375, + 530, + 439 + ], + "spans": [ + { + "bbox": [ + 158, + 375, + 530, + 439 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} f \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = 2 (1 - 2 y ^ {2}) \\left(1 - e ^ {- 1 5 t}\\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ + 2 (1 - 2 (1 0 0 t - x - 5) ^ {2}) \\left(1 - e ^ {- 1 5 t}\\right) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} + (1 \\tag {45} \\\\ - e ^ {- 1 5 t}) (2 0 0 x + 1 0 0 0 - 2 0 0 0 0 t) e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ - 1 5 e ^ {- 1 5 t} e ^ {- y ^ {2} - (1 0 0 t - x - 5) ^ {2}} \\\\ \\end{array}", + "image_path": "57bcb0ff36070307aebbf1ca331a3212b7f214e6efa75ee050ee9d20916b0329.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 447, + 315, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 447, + 315, + 459 + ], + "spans": [ + { + "bbox": [ + 61, + 447, + 315, + 459 + ], + "type": "text", + "content": "The analytical solution to the PDE is inherently non-separable." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 222, + 468, + 530, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 468, + 530, + 482 + ], + "spans": [ + { + "bbox": [ + 222, + 468, + 530, + 482 + ], + "type": "interline_equation", + "content": "u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = (1 - e ^ {- 1 5 t}) e ^ {- y ^ {2} - (x - 1 0 0 t - 5) ^ {2}} \\tag {46}", + "image_path": "1d955bb1a1f7889ad13801e8d5887d83d6b136b048d6cff4c4dd8a26eb41a37a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 492, + 224, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 492, + 224, + 503 + ], + "spans": [ + { + "bbox": [ + 61, + 492, + 224, + 503 + ], + "type": "text", + "content": "The initial and boundary conditions are:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 242, + 515, + 292, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 515, + 292, + 525 + ], + "spans": [ + { + "bbox": [ + 242, + 515, + 292, + 525 + ], + "type": "interline_equation", + "content": "u \\left(x _ {s}, 0\\right) = 0,", + "image_path": "51c1fa786235050f5172fb12b02d8471bc9e59b9720701806c1efb90ea6ce7f8.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 242, + 524, + 530, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 524, + 530, + 544 + ], + "spans": [ + { + "bbox": [ + 242, + 524, + 530, + 544 + ], + "type": "interline_equation", + "content": "\\left. u \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\right| _ {\\partial \\Omega} = \\left. u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\right| _ {\\partial \\Omega}. \\tag {47}", + "image_path": "76a66a1580835227f9b852894f9bfae35d9172a52506596461a341847024aa44.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 548, + 224, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 548, + 224, + 559 + ], + "spans": [ + { + "bbox": [ + 61, + 548, + 224, + 559 + ], + "type": "text", + "content": "The relative " + }, + { + "bbox": [ + 61, + 548, + 224, + 559 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 61, + 548, + 224, + 559 + ], + "type": "text", + "content": " norm error is defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 214, + 568, + 530, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 568, + 530, + 597 + ], + "spans": [ + { + "bbox": [ + 214, + 568, + 530, + 597 + ], + "type": "interline_equation", + "content": "\\epsilon_ {L _ {2}} = \\frac {\\| u ^ {T D} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) - u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) \\| _ {L _ {2} \\left(\\Omega_ {\\boldsymbol {x} _ {s}} \\otimes \\Omega_ {x _ {t}}\\right)}}{\\| u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , x _ {t}\\right) \\| _ {L _ {2} \\left(\\Omega_ {\\boldsymbol {x} _ {s}} \\otimes \\Omega_ {x _ {t}}\\right)}} \\tag {48}", + "image_path": "80785b87073ee4f01723a09c3d4df1c29bb044ecc6d5354a30d900b86e55bf5f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "spans": [ + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "text", + "content": "First, we investigate the influence of the number of subspace iterations. As shown in Fig. 8(a), 3 iterations are enough to obtain an accurate result. Next, we investigate the convergence in terms of the number of modes. Here we compare the relative " + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "text", + "content": " norm error for both TAPS and proper generalized decomposition (PGD) methods [17, 18]. To this aim, we use the same discretization for the space-time domain with each dimension discretized by 100 grid points, the same reproducing polynomial order " + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "inline_equation", + "content": "p = 1" + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "text", + "content": " and convolution patch size " + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "inline_equation", + "content": "s = 1" + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "text", + "content": ". As can be seen from Fig. 8(b), TAPS requires a much smaller number of modes than PGD. For TAPS, when the number of modes equals 25, the relative " + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "text", + "content": " norm error decreases to " + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "inline_equation", + "content": "2.5 \\times 10^{-3}" + }, + { + "bbox": [ + 61, + 604, + 531, + 724 + ], + "type": "text", + "content": ". The total solution time is 15.2 s. However, PGD requires 1,000 modes which takes 60.6 s solution time to reach the same level of accuracy. This is because the test function space in PGD is a subspace of TAPS [29]. Furthermore, the modal decomposition obtained from PGD is not optimal and thus requires a larger storage requirement due to the increased number of modes." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 740, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 740, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 740, + 302, + 748 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 112, + 301, + 237 + ], + "blocks": [ + { + "bbox": [ + 109, + 112, + 301, + 237 + ], + "lines": [ + { + "bbox": [ + 109, + 112, + 301, + 237 + ], + "spans": [ + { + "bbox": [ + 109, + 112, + 301, + 237 + ], + "type": "image", + "image_path": "71b70cc86e81f29f17717ebc5e990a233cea4fd792c202ed7d02aa919c3be394.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 223, + 240, + 235, + 250 + ], + "lines": [ + { + "bbox": [ + 223, + 240, + 235, + 250 + ], + "spans": [ + { + "bbox": [ + 223, + 240, + 235, + 250 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 305, + 111, + 482, + 239 + ], + "blocks": [ + { + "bbox": [ + 305, + 111, + 482, + 239 + ], + "lines": [ + { + "bbox": [ + 305, + 111, + 482, + 239 + ], + "spans": [ + { + "bbox": [ + 305, + 111, + 482, + 239 + ], + "type": "image", + "image_path": "fc1a326bac6804be45fa1c9b3cf27c427a62e03aa83dfcf816fe3cdb17e70be1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 404, + 241, + 415, + 250 + ], + "lines": [ + { + "bbox": [ + 404, + 241, + 415, + 250 + ], + "spans": [ + { + "bbox": [ + 404, + 241, + 415, + 250 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 117, + 264, + 475, + 275 + ], + "lines": [ + { + "bbox": [ + 117, + 264, + 475, + 275 + ], + "spans": [ + { + "bbox": [ + 117, + 264, + 475, + 275 + ], + "type": "text", + "content": "Figure 8: Relative L2 norm error with respect to (a) the number of subspace iterations (b) the number of modes" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "spans": [ + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "content": "The spatial and temporal convergence are also studied. In Fig. 9(a), the number of temporal nodes is fixed as 500, and the spatial mesh is refined. It shows the relative " + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "content": " norm error with respect to the number of nodes along each spatial dimension. As can be readily seen from the figure, larger patch size " + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "content": " leads to smaller error given the same reproducing polynomial orders " + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "content": ". Moreover, we can adjust " + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "content": " to control the spatial convergence rate. Similarly, Fig. 9(b) demonstrates the convergence rate in the temporal domain where we fix the spatial discretization as 500 along each spatial dimension. By adjusting " + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 292, + 531, + 364 + ], + "type": "text", + "content": ", we can obtain different temporal convergence rates." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 364, + 531, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 364, + 531, + 400 + ], + "spans": [ + { + "bbox": [ + 60, + 364, + 531, + 400 + ], + "type": "text", + "content": "Finally, we refine the spatial and temporal mesh simultaneously and study the spatio-temporal convergence rate in Fig. 9(c). As can be observed from the figure, higher reproducing polynomial order " + }, + { + "bbox": [ + 60, + 364, + 531, + 400 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 364, + 531, + 400 + ], + "type": "text", + "content": " will lead to a higher-order convergence rate." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 66, + 412, + 227, + 543 + ], + "blocks": [ + { + "bbox": [ + 66, + 412, + 227, + 543 + ], + "lines": [ + { + "bbox": [ + 66, + 412, + 227, + 543 + ], + "spans": [ + { + "bbox": [ + 66, + 412, + 227, + 543 + ], + "type": "image", + "image_path": "2345a49f326f69ec67725fc081645d1b37d8869821b2514f92f47f0a0b9cc7e8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 154, + 544, + 164, + 552 + ], + "lines": [ + { + "bbox": [ + 154, + 544, + 164, + 552 + ], + "spans": [ + { + "bbox": [ + 154, + 544, + 164, + 552 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 230, + 412, + 369, + 544 + ], + "blocks": [ + { + "bbox": [ + 230, + 412, + 369, + 544 + ], + "lines": [ + { + "bbox": [ + 230, + 412, + 369, + 544 + ], + "spans": [ + { + "bbox": [ + 230, + 412, + 369, + 544 + ], + "type": "image", + "image_path": "aa5c83a28c6329ef2d94f536ced88460656363be508afd476967c069343f0768.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 544, + 314, + 552 + ], + "lines": [ + { + "bbox": [ + 304, + 544, + 314, + 552 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 314, + 552 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 371, + 412, + 526, + 543 + ], + "blocks": [ + { + "bbox": [ + 371, + 412, + 526, + 543 + ], + "lines": [ + { + "bbox": [ + 371, + 412, + 526, + 543 + ], + "spans": [ + { + "bbox": [ + 371, + 412, + 526, + 543 + ], + "type": "image", + "image_path": "d1d0eb95867b86e555e3a95df3d5b5a14cd34d608385d9234dcdbecea9446986.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 543, + 464, + 551 + ], + "lines": [ + { + "bbox": [ + 454, + 543, + 464, + 551 + ], + "spans": [ + { + "bbox": [ + 454, + 543, + 464, + 551 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 565, + 531, + 586 + ], + "lines": [ + { + "bbox": [ + 61, + 565, + 531, + 586 + ], + "spans": [ + { + "bbox": [ + 61, + 565, + 531, + 586 + ], + "type": "text", + "content": "Figure 9: Relative " + }, + { + "bbox": [ + 61, + 565, + 531, + 586 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 61, + 565, + 531, + 586 + ], + "type": "text", + "content": " norm error with respect to the number of grid points (a) spatial convergence (b) temporal convergence (c) spatio-temporal convergence" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 606, + 420, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 606, + 420, + 618 + ], + "spans": [ + { + "bbox": [ + 61, + 606, + 420, + 618 + ], + "type": "text", + "content": "3.2. Convergence study of " + }, + { + "bbox": [ + 61, + 606, + 420, + 618 + ], + "type": "inline_equation", + "content": "S-P-T" + }, + { + "bbox": [ + 61, + 606, + 420, + 618 + ], + "type": "text", + "content": " problems up to equivalent zetta-scale (10" + }, + { + "bbox": [ + 61, + 606, + 420, + 618 + ], + "type": "inline_equation", + "content": "^{21}" + }, + { + "bbox": [ + 61, + 606, + 420, + 618 + ], + "type": "text", + "content": ") full models" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "spans": [ + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "text", + "content": "In this example, we study the convergence of the TAPS solver for the time-dependent parametric heat transfer problem in a S-P-T setting. In Eq. 18, we adopt the heat source as shown in Eq. 49. In this example, we have spatial variable " + }, + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_s = (x,y,z)" + }, + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "text", + "content": ", parametric variable " + }, + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_p = (k,P,\\rho ,c_p)" + }, + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "text", + "content": " and temporal variable " + }, + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "inline_equation", + "content": "x_{t} = t" + }, + { + "bbox": [ + 60, + 619, + 531, + 655 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 143, + 663, + 530, + 699 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 663, + 530, + 699 + ], + "spans": [ + { + "bbox": [ + 143, + 663, + 530, + 699 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} f \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = 1 5 \\rho^ {2} c _ {p} ^ {2} k p e ^ {- 1 5 k t} e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\\\ + 5 0 \\rho c _ {p} k p \\left(1 - e ^ {- 1 5 k t}\\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\left[ \\left(1 - 5 0 x ^ {2}\\right) + \\left(1 - 5 0 y ^ {2}\\right) \\right] \\tag {49} \\\\ \\end{array}", + "image_path": "a46bd46fc638c2bedfa906d64ab3b8c25c051a5c1d8b29b1a3d52119d168cd44.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 61, + 700, + 315, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 700, + 315, + 713 + ], + "spans": [ + { + "bbox": [ + 61, + 700, + 315, + 713 + ], + "type": "text", + "content": "The analytical solution to the PDE is inherently non-separable." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 204, + 718, + 530, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 718, + 530, + 734 + ], + "spans": [ + { + "bbox": [ + 204, + 718, + 530, + 734 + ], + "type": "interline_equation", + "content": "u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) = \\rho c _ {p} P \\left(1 - e ^ {- 1 5 k t}\\right) e ^ {- 2 5. 0 x ^ {2} - 2 5. 0 y ^ {2}} \\tag {50}", + "image_path": "ab9e37f336fe58f990f7a2f5ec598fd37f49400dbe516bbd8e978f446befc945.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 292, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 292, + 739, + 302, + 748 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 111, + 224, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 111, + 224, + 123 + ], + "spans": [ + { + "bbox": [ + 61, + 111, + 224, + 123 + ], + "type": "text", + "content": "The initial and boundary conditions are:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 224, + 132, + 291, + 145 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 132, + 291, + 145 + ], + "spans": [ + { + "bbox": [ + 224, + 132, + 291, + 145 + ], + "type": "interline_equation", + "content": "u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, 0\\right) = 0", + "image_path": "2887806201dfdc52940b4f29cfa9a76d0349e2e81f67aec52ce92d0e88fd9b3f.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 226, + 148, + 367, + 164 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 148, + 367, + 164 + ], + "spans": [ + { + "bbox": [ + 226, + 148, + 367, + 164 + ], + "type": "interline_equation", + "content": "u \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) | _ {\\partial \\Omega} = u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s}, \\boldsymbol {x} _ {p}, x _ {t}\\right) | _ {\\partial \\Omega}", + "image_path": "a7a3c23c768388495f3481c5af7e9b4de7d239d576ea7b66c1c6800cf8232ab0.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 169, + 226, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 169, + 226, + 179 + ], + "spans": [ + { + "bbox": [ + 61, + 169, + 226, + 179 + ], + "type": "text", + "content": "The relative " + }, + { + "bbox": [ + 61, + 169, + 226, + 179 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 61, + 169, + 226, + 179 + ], + "type": "text", + "content": " norm error is defined as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 189, + 187, + 531, + 219 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 187, + 531, + 219 + ], + "spans": [ + { + "bbox": [ + 189, + 187, + 531, + 219 + ], + "type": "interline_equation", + "content": "\\epsilon_ {L _ {2}} = \\frac {\\left\\| u ^ {T D} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) - u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) \\right\\| _ {L _ {2} \\left(\\Omega_ {x _ {s}} \\otimes \\Omega_ {x _ {p}} \\otimes \\Omega_ {x _ {t}}\\right)}}{\\left\\| u ^ {\\mathrm {e x}} \\left(\\boldsymbol {x} _ {s} , \\boldsymbol {x} _ {p} , x _ {t}\\right) \\right\\| _ {L _ {2} \\left(\\Omega_ {x _ {s}} \\otimes \\Omega_ {x _ {p}} \\otimes \\Omega_ {x _ {t}}\\right)}} \\tag {51}", + "image_path": "601b382df5b516b1ee0757a77850a3c853d0c52e226159856b25d1d8853924a8.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "spans": [ + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": "To study the convergence of TAPS for S-P-T problems, the number of grid points is refined simultaneously in each dimension and corresponding relative " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": " norm errors are computed as shown in Fig. 10. When the number of grid points in each dimension is 450, the equivalent DoFs of a full model achieves " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "450^{8} = 1.68 \\times 10^{21}" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": ". Consequently, it is equivalent to a zetta-scale " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "(10^{21})" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": " full problem. As can be seen from the figure, a larger patch size " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": " leads to a smaller error and faster convergence. A higher reproducing polynomial order " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": " also leads to a higher convergence rate. It can be noticed that the convergence rate for " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "p = 3" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": " case is smaller than expected " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "p + 1 = 4" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": ". This is attributed to the fact that the S-P-T mesh is not fine enough. However, due to the rounding error in computing the relative " + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 60, + 226, + 531, + 322 + ], + "type": "text", + "content": " norm error, we can only accurately compute the error up to 450 grid points per dimension." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 205, + 333, + 390, + 502 + ], + "blocks": [ + { + "bbox": [ + 205, + 333, + 390, + 502 + ], + "lines": [ + { + "bbox": [ + 205, + 333, + 390, + 502 + ], + "spans": [ + { + "bbox": [ + 205, + 333, + 390, + 502 + ], + "type": "image", + "image_path": "7fb0c377642dbc2df6a083a06da8df232e8bdb8c9587d121c766dfaf94864ad3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 513, + 448, + 524 + ], + "lines": [ + { + "bbox": [ + 144, + 513, + 448, + 524 + ], + "spans": [ + { + "bbox": [ + 144, + 513, + 448, + 524 + ], + "type": "text", + "content": "Figure 10: Relative " + }, + { + "bbox": [ + 144, + 513, + 448, + 524 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 144, + 513, + 448, + 524 + ], + "type": "text", + "content": " norm error with respect to the number of grid points in each dimension" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "spans": [ + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "text", + "content": "In summary, we have the flexibility to choose different " + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "text", + "content": " to control the accuracy of TAPS by directly solving the S-P-T PDE. This is different from other data-driven modeling approaches (for instance, neural networks-based data-driven methods) in two notable ways. First, unlike a black-box neural network interpolator where the accuracy of the model is not guaranteed, our method is built upon the AI-enhanced finite element method, and we can control the convergence rate by choosing suitable hyperparameters " + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 60, + 535, + 532, + 630 + ], + "type": "text", + "content": ". Second, unlike most data-driven reduced-order models for physical problems, our method directly solves the governing PDE by plugging in the TD interpolation without seeing any training data. As a result, we can avoid the most expensive offline data generation stage as opposed to data-driven methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 642, + 326, + 655 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 642, + 326, + 655 + ], + "spans": [ + { + "bbox": [ + 61, + 642, + 326, + 655 + ], + "type": "text", + "content": "3.3. Moving source with solution dependent material parameters" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 60, + 657, + 532, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 657, + 532, + 693 + ], + "spans": [ + { + "bbox": [ + 60, + 657, + 532, + 693 + ], + "type": "text", + "content": "In this section, we model moving heat sources using temperature-dependent material parameters. The solution scheme of this problem is provided in detail in Appendix A. Figure 11(a) illustrates a typical representation of temperature-dependent heat conductivity and capacity for Inconel 718 [33]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "spans": [ + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "text", + "content": "Since the temperature dependency of " + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "inline_equation", + "content": "k(u)" + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "inline_equation", + "content": "\\rho c_{p}(u)" + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "text", + "content": " can be approximated using a linear relationship. As a result, we can directly rewrite " + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "inline_equation", + "content": "k(u(\\boldsymbol{x}_s,x_t))" + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "inline_equation", + "content": "\\rho c_{p}(u(\\boldsymbol{x}_{s},x_{t}))" + }, + { + "bbox": [ + 61, + 693, + 531, + 718 + ], + "type": "text", + "content": " in the TD format." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 303, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 303, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 303, + 748 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 121, + 298, + 241 + ], + "blocks": [ + { + "bbox": [ + 111, + 121, + 298, + 241 + ], + "lines": [ + { + "bbox": [ + 111, + 121, + 298, + 241 + ], + "spans": [ + { + "bbox": [ + 111, + 121, + 298, + 241 + ], + "type": "image", + "image_path": "5e704b162ebdd448550d9186205e95bdb2ff6e27cf2d3fe5c63a122256540197.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 192, + 243, + 207, + 253 + ], + "lines": [ + { + "bbox": [ + 192, + 243, + 207, + 253 + ], + "spans": [ + { + "bbox": [ + 192, + 243, + 207, + 253 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 313, + 149, + 483, + 236 + ], + "blocks": [ + { + "bbox": [ + 313, + 149, + 483, + 236 + ], + "lines": [ + { + "bbox": [ + 313, + 149, + 483, + 236 + ], + "spans": [ + { + "bbox": [ + 313, + 149, + 483, + 236 + ], + "type": "image", + "image_path": "716c83297ce005647d99e5f23cc46e6e3024853b336292259b2f47b9f8a009d8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 372, + 239, + 386, + 253 + ], + "lines": [ + { + "bbox": [ + 372, + 239, + 386, + 253 + ], + "spans": [ + { + "bbox": [ + 372, + 239, + 386, + 253 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 267, + 531, + 287 + ], + "lines": [ + { + "bbox": [ + 60, + 267, + 531, + 287 + ], + "spans": [ + { + "bbox": [ + 60, + 267, + 531, + 287 + ], + "type": "text", + "content": "Figure 11: (a) Temperature dependent material properties for Inconel 718 [33] (b) Schematic of numerical simulation, where the solution along the center line is compared for FEM and TAPS." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 188, + 326, + 401, + 358 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 326, + 401, + 358 + ], + "spans": [ + { + "bbox": [ + 188, + 326, + 401, + 358 + ], + "type": "interline_equation", + "content": "k \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\approx \\sum_ {m = 1} ^ {M} m _ {k} u _ {x _ {1}} ^ {(m)} \\left(x _ {1}\\right) u _ {x _ {2}} ^ {(m)} \\left(x _ {2}\\right) u _ {x _ {3}} ^ {(m)} \\left(x _ {3}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + n _ {k}", + "image_path": "e27bd24a7a36c6bfeba0deaabfea42edc50a012e970ff94dc077f09f3eea2588.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 171, + 360, + 531, + 391 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 360, + 531, + 391 + ], + "spans": [ + { + "bbox": [ + 171, + 360, + 531, + 391 + ], + "type": "interline_equation", + "content": "\\rho c _ {p} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) \\approx \\sum_ {m = 1} ^ {M} m _ {c _ {p}} u _ {x _ {1}} ^ {(m)} \\left(x _ {1}\\right) u _ {x _ {2}} ^ {(m)} \\left(x _ {2}\\right) u _ {x _ {3}} ^ {(m)} \\left(x _ {3}\\right) u _ {x _ {t}} ^ {(m)} \\left(x _ {t}\\right) + n _ {c _ {p}} \\tag {52}", + "image_path": "8979a59922b7c2b633296235f76ce06d2f5c9ebe1c69f2365fd98667227a22a7.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "spans": [ + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "text", + "content": " is the decomposition modes of the TAPS solution; " + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "inline_equation", + "content": "m_{k} = 1.52 \\times 10^{-5} \\mathrm{~W} / (\\mathrm{mmK}^{2})" + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "inline_equation", + "content": "n_{k} = 5.29 \\times 10^{-3} \\mathrm{~W} / (\\mathrm{mmK})" + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "inline_equation", + "content": "m_{c_{p}} = 6.11 \\times 10^{-7} \\mathrm{~mm}^{-3} \\mathrm{~K}^{-2}" + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 61, + 401, + 531, + 426 + ], + "type": "inline_equation", + "content": "n_{c p} = 3.25 \\times 10^{-3} \\mathrm{~mm}^{-3} \\mathrm{~K}^{-1}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "spans": [ + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "text", + "content": "The problem setup is shown in Fig. 11 (b). The spatial domain size is " + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "inline_equation", + "content": "10\\mathrm{mm} \\times 10\\mathrm{mm} \\times 1\\mathrm{mm}" + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "text", + "content": " where homogeneous Dirichlet boundary conditions are assumed for the left and right surfaces; homogeneous Neumann boundary conditions are applied to all other surfaces. As shown in Eq. 19, a moving Gaussian source term " + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "inline_equation", + "content": "f(\\boldsymbol{x}_s, t)" + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "text", + "content": " is applied as a volumetric source term with a radius " + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "inline_equation", + "content": "r = 0.5\\mathrm{mm}" + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "text", + "content": " and moving velocity " + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "inline_equation", + "content": "500\\mathrm{mm/s}" + }, + { + "bbox": [ + 61, + 426, + 531, + 484 + ], + "type": "text", + "content": ". The diameter is discretized using 10 spatial elements." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 485, + 531, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 485, + 531, + 534 + ], + "spans": [ + { + "bbox": [ + 61, + 485, + 531, + 534 + ], + "type": "text", + "content": "Since there is no analytical solution available to this problem, we use implicit finite element analysis as the baseline for validation. JAX-FEM [34] is used to generate the nonlinear FEM solution. For ease of comparison, we use the same time increment as " + }, + { + "bbox": [ + 61, + 485, + 531, + 534 + ], + "type": "inline_equation", + "content": "1.60 \\times 10^{-4}" + }, + { + "bbox": [ + 61, + 485, + 531, + 534 + ], + "type": "text", + "content": " sec for both TAPS and FEM. The solution along the center line, as shown in Fig. 11 (b) is compared. As can be seen from Fig. 12, the result of the nonlinear TAPS solver agrees well with FEM." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 544, + 197, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 544, + 197, + 556 + ], + "spans": [ + { + "bbox": [ + 61, + 544, + 197, + 556 + ], + "type": "text", + "content": "3.4. Simulation of LPBF process" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 60, + 560, + 531, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 560, + 531, + 597 + ], + "spans": [ + { + "bbox": [ + 60, + 560, + 531, + 597 + ], + "type": "text", + "content": "In this section, we use TAPS to efficiently model the laser powder bed fusion process (LPBF) in additive manufacturing. Here we only consider the free convection term in the Neumann boundary condition. The initial condition can be considered by splitting the total solution as a summation of the homogeneous part and the inhomogeneous part." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 233, + 608, + 530, + 621 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 608, + 530, + 621 + ], + "spans": [ + { + "bbox": [ + 233, + 608, + 530, + 621 + ], + "type": "interline_equation", + "content": "u \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) = u _ {0} \\left(\\boldsymbol {x} _ {s}, x _ {t}\\right) + u _ {\\text {i n i t}} \\left(\\boldsymbol {x} _ {s}\\right) \\tag {53}", + "image_path": "be137d8c1c5541279284411afccbfff48b12633210ebbd9a063a2b7d974825c4.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 626, + 531, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 626, + 531, + 650 + ], + "spans": [ + { + "bbox": [ + 61, + 626, + 531, + 650 + ], + "type": "text", + "content": "As a result, " + }, + { + "bbox": [ + 61, + 626, + 531, + 650 + ], + "type": "inline_equation", + "content": "u_{0}(\\pmb{x}_{s}, x_{t})" + }, + { + "bbox": [ + 61, + 626, + 531, + 650 + ], + "type": "text", + "content": " is subject to homogeneous initial conditions. In this section, we assume Ti-6Al-4V is used as the powder bed materials. The detailed material parameters can be found in Table 4." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 61, + 661, + 185, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 661, + 185, + 673 + ], + "spans": [ + { + "bbox": [ + 61, + 661, + 185, + 673 + ], + "type": "text", + "content": "3.4.1. Single-track simulation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 60, + 674, + 531, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 674, + 531, + 723 + ], + "spans": [ + { + "bbox": [ + 60, + 674, + 531, + 723 + ], + "type": "text", + "content": "In this example, we investigate the computational complexity numerically for single-track LPBF simulation with a single S-T slab, as shown in Fig. 13 (a). A single NVIDIA RTX A6000 GPU is used for all the following analyses. To ensure accuracy, the number of modes is adopted as 5 times larger than the number of time steps in the following examples. In the first case, within the S-T slab, the spatial mesh is refined uniformly along each spatial dimension" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 202, + 110, + 386, + 283 + ], + "blocks": [ + { + "bbox": [ + 202, + 110, + 386, + 283 + ], + "lines": [ + { + "bbox": [ + 202, + 110, + 386, + 283 + ], + "spans": [ + { + "bbox": [ + 202, + 110, + 386, + 283 + ], + "type": "image", + "image_path": "3d5930d344ad41e50bbdf9aa4fa400ae67373da8806b2d086f22076597db5e79.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 296, + 456, + 307 + ], + "lines": [ + { + "bbox": [ + 135, + 296, + 456, + 307 + ], + "spans": [ + { + "bbox": [ + 135, + 296, + 456, + 307 + ], + "type": "text", + "content": "Figure 12: Comparison of Nonlinear TAPS solution versus finite element solution at different times." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 63, + 334, + 531, + 409 + ], + "blocks": [ + { + "bbox": [ + 227, + 327, + 365, + 334 + ], + "lines": [ + { + "bbox": [ + 227, + 327, + 365, + 334 + ], + "spans": [ + { + "bbox": [ + 227, + 327, + 365, + 334 + ], + "type": "text", + "content": "Table 4: Parameters used in the simulation" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 334, + 531, + 409 + ], + "lines": [ + { + "bbox": [ + 63, + 334, + 531, + 409 + ], + "spans": [ + { + "bbox": [ + 63, + 334, + 531, + 409 + ], + "type": "table", + "html": "
ParameterVariableValueUnits
Thermal conductivityk22.0W m-1K-1
Densityρ4.27g cm-3
Specific heat capacitycp745J kg-1K-1
Ambient temperatureT0298.15K
Heat convection coefficienthconv14.73W m-2K-1
", + "image_path": "e26a208925086b40e365433211679472b0588b4faef6f759f0fae6134d0dc341.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 428, + 530, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 428, + 530, + 453 + ], + "spans": [ + { + "bbox": [ + 61, + 428, + 530, + 453 + ], + "type": "text", + "content": "while fixing the number of temporal grid points. The computational time for each subspace iteration is plotted in Fig. 13 (b). It can be seen that TAPS has a linear growth of computational complexity when refining the spatial mesh." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "spans": [ + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": "Similarly, we only refine the temporal mesh while fixing the spatial mesh in the second case and plot the computational time for each subspace iteration as in Fig. 13 (c). It can be readily observed that refining the temporal mesh has a much higher computational complexity than refining the spatial mesh. This is because increasing temporal elements will also lead to an increased number of modes " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": ". As mentioned before, the computational cost for the sparse direct solver employed is " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "O(M^3 + M^2 n_d + C_c(n_d))" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": "-th dimension subproblem, where " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": " represents total number of modes; " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "n_d" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": " refers to the total number of grid points in " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": "-th dimension; " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "C_c(n_d)" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": " refers to the computational cost of a banded sparse matrix with a shape of " + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "inline_equation", + "content": "(n_d \\times n_d)" + }, + { + "bbox": [ + 60, + 453, + 531, + 547 + ], + "type": "text", + "content": ". Therefore, the increased number of modes leads to a cubic growth in computational time." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 63, + 576, + 531, + 698 + ], + "blocks": [ + { + "bbox": [ + 207, + 566, + 385, + 576 + ], + "lines": [ + { + "bbox": [ + 207, + 566, + 385, + 576 + ], + "spans": [ + { + "bbox": [ + 207, + 566, + 385, + 576 + ], + "type": "text", + "content": "Table 5: Parameters used in the single-track simulation" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 576, + 531, + 698 + ], + "lines": [ + { + "bbox": [ + 63, + 576, + 531, + 698 + ], + "spans": [ + { + "bbox": [ + 63, + 576, + 531, + 698 + ], + "type": "table", + "html": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Mesh sizeh5μm
", + "image_path": "a855b7b5af10595c3b507ce75c396e8ed53ba715f08759f5c0f86d5d5ce978ec.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 292, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 292, + 739, + 302, + 748 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 88, + 112, + 196, + 237 + ], + "blocks": [ + { + "bbox": [ + 88, + 112, + 196, + 237 + ], + "lines": [ + { + "bbox": [ + 88, + 112, + 196, + 237 + ], + "spans": [ + { + "bbox": [ + 88, + 112, + 196, + 237 + ], + "type": "image", + "image_path": "06d93feedd76d1074ae325710d3e3e673d487dd19e7ca7144df3fe686afb12f4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 250, + 531, + 269 + ], + "lines": [ + { + "bbox": [ + 61, + 250, + 531, + 269 + ], + "spans": [ + { + "bbox": [ + 61, + 250, + 531, + 269 + ], + "type": "text", + "content": "Figure 13: (a) Single-track simulation. (b) Computational time of subspace iteration in refining the spatial mesh: linear growth. (c) Computational time of subspace iteration in refining the temporal mesh in a single space-time slab: cubic growth due to the increased number of modes" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 206, + 111, + 358, + 239 + ], + "blocks": [ + { + "bbox": [ + 206, + 111, + 358, + 239 + ], + "lines": [ + { + "bbox": [ + 206, + 111, + 358, + 239 + ], + "spans": [ + { + "bbox": [ + 206, + 111, + 358, + 239 + ], + "type": "image", + "image_path": "4f647f9527a5dd106e3c64c9d9a5f2ac9e311427cc75722f1e8cd157bd35a51d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 359, + 110, + 504, + 238 + ], + "blocks": [ + { + "bbox": [ + 359, + 110, + 504, + 238 + ], + "lines": [ + { + "bbox": [ + 359, + 110, + 504, + 238 + ], + "spans": [ + { + "bbox": [ + 359, + 110, + 504, + 238 + ], + "type": "image", + "image_path": "fa2ca97b6f3903dd29bd21860b65642768d7856f00c975369e0e7129f9aa8323.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 290, + 183, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 290, + 183, + 301 + ], + "spans": [ + { + "bbox": [ + 61, + 290, + 183, + 301 + ], + "type": "text", + "content": "3.4.2. Multi-track simulation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 302, + 531, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 302, + 531, + 398 + ], + "spans": [ + { + "bbox": [ + 60, + 302, + 531, + 398 + ], + "type": "text", + "content": "A major challenge in simulating multiple tracks in LPBF is the substantial number of time steps needed. To circumvent the cubic growth associated with the increasing number of temporal grid points and modes for moving source problems, we can leverage multiple S-T slabs to break down the original problem with a large number of time steps into smaller slabs. Consequently, this method keeps the total number of modes required in each slab beneath a reasonable threshold, thereby optimizing computational efficiency. The detailed algorithm of simulating multiple space-time (S-T) slabs for LPBF process is shown in Appendix B. Using this method, we first simulate a multi-track LPBF problem and analyze how the total number of slabs influences computational cost. The detailed setup can be found in Table 6. Note that we only simulate the printing process of the final layer in this section." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 63, + 425, + 531, + 560 + ], + "blocks": [ + { + "bbox": [ + 208, + 417, + 384, + 426 + ], + "lines": [ + { + "bbox": [ + 208, + 417, + 384, + 426 + ], + "spans": [ + { + "bbox": [ + 208, + 417, + 384, + 426 + ], + "type": "text", + "content": "Table 6: Parameters used in the multi-track simulation" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 425, + 531, + 560 + ], + "lines": [ + { + "bbox": [ + 63, + 425, + 531, + 560 + ], + "spans": [ + { + "bbox": [ + 63, + 425, + 531, + 560 + ], + "type": "table", + "html": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr50μm
Laser scan speedV500mm s-1
Absorptivityη0.251
LengthL1.5mm
WidthW1.5mm
HeightH1.5mm
Laser penetration depthd50μm
Hatch space sizehs50μm
Mesh sizeh5μm
", + "image_path": "917627eb0b2157a4d53686719880b45a818bf05e0967a467fc9e17abc0ca611b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 60, + 571, + 531, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 571, + 531, + 630 + ], + "spans": [ + { + "bbox": [ + 60, + 571, + 531, + 630 + ], + "type": "text", + "content": "We use different numbers of temporal grids within each S-T slab and compare the computation cost, as shown in Fig. 14. As can be seen from the figure, when each space-time slab contains around 20 temporal grid points, the computational efficiency is optimal. Hence, choosing the optimal number of temporal elements inside each space-time slab is crucial for the overall performance of the TAPS solver for modeling LPBF process. We will adopt 20 temporal grid points per S-T slab as the default for the following multi-track LPBF simulations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 60, + 631, + 531, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 631, + 531, + 678 + ], + "spans": [ + { + "bbox": [ + 60, + 631, + 531, + 678 + ], + "type": "text", + "content": "Next, we compare the performance of TAPS versus the classical explicit finite difference method. To this aim, we use a GPU-accelerated and optimized finite difference code, GAMMA, to model the LPBF process [28]. In this example, we increase the size of the domain while maintaining all other process parameters, as shown in Table 6. The corresponding computation time, GPU memory usage, and required data storage space are plotted in Fig. 15." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "spans": [ + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "text", + "content": "Fig. 15(a) highlights the significant speed advantage of TAPS over GAMMA, especially as the size of the simulation domain increases. GAMMA only can simulate powder bed size up to " + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "inline_equation", + "content": "4.5^{3}\\mathrm{mm}^{3}" + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "text", + "content": " since the GPU memory can only handle up to " + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "inline_equation", + "content": "7.31\\times 10^{8}" + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "text", + "content": " spatial DoFs. For the " + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "inline_equation", + "content": "4.5^{3}\\mathrm{mm}^{3}" + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "text", + "content": " case, TAPS is 85 times faster than GAMMA. On the other hand, TAPS is able to model " + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "inline_equation", + "content": "100^{3}\\mathrm{mm}^{3}" + }, + { + "bbox": [ + 60, + 679, + 531, + 726 + ], + "type": "text", + "content": " powder bed, with its speed benefits becoming even more evident for larger" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 110, + 286, + 256 + ], + "blocks": [ + { + "bbox": [ + 135, + 110, + 286, + 256 + ], + "lines": [ + { + "bbox": [ + 135, + 110, + 286, + 256 + ], + "spans": [ + { + "bbox": [ + 135, + 110, + 286, + 256 + ], + "type": "image", + "image_path": "cc72b3743dea1acce6b7f995d1ed32b295f09a088f75ac84fbe7696ee90234f1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 263, + 210, + 274 + ], + "lines": [ + { + "bbox": [ + 195, + 263, + 210, + 274 + ], + "spans": [ + { + "bbox": [ + 195, + 263, + 210, + 274 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 296, + 120, + 461, + 257 + ], + "blocks": [ + { + "bbox": [ + 296, + 120, + 461, + 257 + ], + "lines": [ + { + "bbox": [ + 296, + 120, + 461, + 257 + ], + "spans": [ + { + "bbox": [ + 296, + 120, + 461, + 257 + ], + "type": "image", + "image_path": "13672870ba3d3e65ac32f7b556569b2691beeea4c17ceda38811f12e48a4d9bc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 263, + 393, + 274 + ], + "lines": [ + { + "bbox": [ + 380, + 263, + 393, + 274 + ], + "spans": [ + { + "bbox": [ + 380, + 263, + 393, + 274 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 63, + 309, + 217, + 452 + ], + "blocks": [ + { + "bbox": [ + 87, + 285, + 504, + 296 + ], + "lines": [ + { + "bbox": [ + 87, + 285, + 504, + 296 + ], + "spans": [ + { + "bbox": [ + 87, + 285, + 504, + 296 + ], + "type": "text", + "content": "Figure 14: (a) Multi-track simulation. (b) Influence of number of temporal grid points in each S-T slab on the computational cost." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 63, + 309, + 217, + 452 + ], + "lines": [ + { + "bbox": [ + 63, + 309, + 217, + 452 + ], + "spans": [ + { + "bbox": [ + 63, + 309, + 217, + 452 + ], + "type": "image", + "image_path": "1e0837c8a03c5dd7bb6ad48fde26cc3fea68510e35b307ac270e07be256a353a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 139, + 453, + 151, + 461 + ], + "lines": [ + { + "bbox": [ + 139, + 453, + 151, + 461 + ], + "spans": [ + { + "bbox": [ + 139, + 453, + 151, + 461 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 473, + 531, + 494 + ], + "lines": [ + { + "bbox": [ + 61, + 473, + 531, + 494 + ], + "spans": [ + { + "bbox": [ + 61, + 473, + 531, + 494 + ], + "type": "text", + "content": "Figure 15: Performance comparison of TAPS and GAMMA for powder bed with different sizes in terms of (a) computational time (b) GPU memory requirement (c) Data storage requirement for each time increment" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 217, + 309, + 366, + 452 + ], + "blocks": [ + { + "bbox": [ + 217, + 309, + 366, + 452 + ], + "lines": [ + { + "bbox": [ + 217, + 309, + 366, + 452 + ], + "spans": [ + { + "bbox": [ + 217, + 309, + 366, + 452 + ], + "type": "image", + "image_path": "f9ccd59eb06fa06952a41ddbeabb1229514b05ff8f3cebdcc5d4513deee979da.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 292, + 453, + 304, + 462 + ], + "lines": [ + { + "bbox": [ + 292, + 453, + 304, + 462 + ], + "spans": [ + { + "bbox": [ + 292, + 453, + 304, + 462 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 368, + 309, + 529, + 451 + ], + "blocks": [ + { + "bbox": [ + 368, + 309, + 529, + 451 + ], + "lines": [ + { + "bbox": [ + 368, + 309, + 529, + 451 + ], + "spans": [ + { + "bbox": [ + 368, + 309, + 529, + 451 + ], + "type": "image", + "image_path": "b1257e053086d2dd69596cd534767d87c78a5bcb112c31e469f474d08790a53b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 451, + 463, + 460 + ], + "lines": [ + { + "bbox": [ + 452, + 451, + 463, + 460 + ], + "spans": [ + { + "bbox": [ + 452, + 451, + 463, + 460 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 60, + 513, + 531, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 513, + 531, + 596 + ], + "spans": [ + { + "bbox": [ + 60, + 513, + 531, + 596 + ], + "type": "text", + "content": "domains. Fig. 15(b) compares the memory requirements, where GAMMA experiences fast growth due to the cubic scaling of total spatial DoFs. In contrast, TAPS benefits from TD, requiring significantly less memory. TAPS uses 13 times smaller GPU memory compared to GAMMA for the " + }, + { + "bbox": [ + 60, + 513, + 531, + 596 + ], + "type": "inline_equation", + "content": "4.5^{3}\\mathrm{mm}^{3}" + }, + { + "bbox": [ + 60, + 513, + 531, + 596 + ], + "type": "text", + "content": " case. Additionally, TAPS can efficiently manage GPU memory usage for larger powder bed simulations by adopting different numbers of temporal grids in each S-T slab. Finally, Fig. 15(c) compares data storage needs where GAMMA's storage requirements grow cubically, whereas TAPS maintains a linear growth pattern. For the " + }, + { + "bbox": [ + 60, + 513, + 531, + 596 + ], + "type": "inline_equation", + "content": "4.5^{3}\\mathrm{mm}^{3}" + }, + { + "bbox": [ + 60, + 513, + 531, + 596 + ], + "type": "text", + "content": " case, the data storage of GAMMA is 2,700 times larger than TAPS." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 608, + 303, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 608, + 303, + 620 + ], + "spans": [ + { + "bbox": [ + 61, + 608, + 303, + 620 + ], + "type": "text", + "content": "3.4.3. Large-scale multi-layer multi-track LPBF simulation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "spans": [ + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "text", + "content": "In this section, the proposed method is used to simulate a large-scale multi-layer multi-track LPBF process. Element birth is used to model newly added layers in the process. Details on element birth can be found in Appendix C. As shown in Fig. 16 (a), the run scenario is the production of a " + }, + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "inline_equation", + "content": "10\\mathrm{mm}" + }, + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "text", + "content": " cube within a " + }, + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "inline_equation", + "content": "12\\mathrm{mm}" + }, + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "text", + "content": " powder bed domain. The base plate height is " + }, + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 60, + 622, + 531, + 681 + ], + "type": "text", + "content": ". The tool path follows the pattern shown on the top surface. Material parameters are taken from Ti-6Al-4V [35]. The detailed parameters for the simulation setup are shown in Table 7." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 60, + 682, + 531, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 682, + 531, + 717 + ], + "spans": [ + { + "bbox": [ + 60, + 682, + 531, + 717 + ], + "type": "text", + "content": "To showcase the capabilities of our approach using TAPS, we employ a fine spatial mesh for the simulation. The spatial element size is " + }, + { + "bbox": [ + 60, + 682, + 531, + 717 + ], + "type": "inline_equation", + "content": "10 \\times 10 \\times 5\\mu m^3" + }, + { + "bbox": [ + 60, + 682, + 531, + 717 + ], + "type": "text", + "content": ". In classical numerical algorithms, this corresponds to " + }, + { + "bbox": [ + 60, + 682, + 531, + 717 + ], + "type": "inline_equation", + "content": "3.46 \\times 10^{9}" + }, + { + "bbox": [ + 60, + 682, + 531, + 717 + ], + "type": "text", + "content": " spatial DoFs, which is unmanageable for typical workstations due to the prohibitive RAM requirements." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 76, + 717, + 531, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 717, + 531, + 729 + ], + "spans": [ + { + "bbox": [ + 76, + 717, + 531, + 729 + ], + "type": "text", + "content": "The simulation result is shown in Fig. 16 (b), where the temperature of the last layer is plotted. In total, it costs 60.7" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 139, + 124, + 286, + 250 + ], + "blocks": [ + { + "bbox": [ + 139, + 124, + 286, + 250 + ], + "lines": [ + { + "bbox": [ + 139, + 124, + 286, + 250 + ], + "spans": [ + { + "bbox": [ + 139, + 124, + 286, + 250 + ], + "type": "image", + "image_path": "0f42907a3d6c93d278eb81dc581820a0260002f7e28abedb7a27b81e2d7be42a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 188, + 259, + 204, + 271 + ], + "lines": [ + { + "bbox": [ + 188, + 259, + 204, + 271 + ], + "spans": [ + { + "bbox": [ + 188, + 259, + 204, + 271 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 298, + 118, + 458, + 250 + ], + "blocks": [ + { + "bbox": [ + 298, + 118, + 458, + 250 + ], + "lines": [ + { + "bbox": [ + 298, + 118, + 458, + 250 + ], + "spans": [ + { + "bbox": [ + 298, + 118, + 458, + 250 + ], + "type": "image", + "image_path": "6dccaac9c3bd5c80948b57cdbca225315a06fe5add804187394c7973ca6c6a07.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 259, + 379, + 271 + ], + "lines": [ + { + "bbox": [ + 364, + 259, + 379, + 271 + ], + "spans": [ + { + "bbox": [ + 364, + 259, + 379, + 271 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 127, + 283, + 465, + 295 + ], + "lines": [ + { + "bbox": [ + 127, + 283, + 465, + 295 + ], + "spans": [ + { + "bbox": [ + 127, + 283, + 465, + 295 + ], + "type": "text", + "content": "Figure 16: (a) Problem statement: LPBF simulation. (b) Temperature solution for printing the final layer" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 63, + 322, + 531, + 421 + ], + "blocks": [ + { + "bbox": [ + 198, + 312, + 394, + 322 + ], + "lines": [ + { + "bbox": [ + 198, + 312, + 394, + 322 + ], + "spans": [ + { + "bbox": [ + 198, + 312, + 394, + 322 + ], + "type": "text", + "content": "Table 7: Parameters used in the large-scale LPBF simulation" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 322, + 531, + 421 + ], + "lines": [ + { + "bbox": [ + 63, + 322, + 531, + 421 + ], + "spans": [ + { + "bbox": [ + 63, + 322, + 531, + 421 + ], + "type": "table", + "html": "
ParameterVariableValueUnits
Laser powerP200W
Laser spot size radiusr100μm
Laser scan speedV500mm s-1
Absorptivityη0.251
Laser penetration depthd50μm
Layer thicknesshl50μm
Hatch space sizehs200μm
", + "image_path": "65318179eddcce82c105fefcf6d4e4fb81f40bde17ec3c84afebf28ebb9869b3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 60, + 440, + 531, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 440, + 531, + 501 + ], + "spans": [ + { + "bbox": [ + 60, + 440, + 531, + 501 + ], + "type": "text", + "content": "hrs to run the simulation. The maximum GPU memory usage is 8.11 GB. The final solution vector size is 1.35 GB. As a comparison, it's estimated that GAMMA will solve the same problem with the same spatial resolution in 3,485 days, with at least 120 GB GPU memory usage and 1.26 TB storage space to store the solution [35]. Consequently, TAPS achieves around 1,370 X speedup, 14.8 X memory footprint savings, and 955 X storage gain compared to the finite difference method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 518, + 122, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 518, + 122, + 529 + ], + "spans": [ + { + "bbox": [ + 61, + 518, + 122, + 529 + ], + "type": "text", + "content": "4. Discussion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 60, + 539, + 532, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 539, + 532, + 622 + ], + "spans": [ + { + "bbox": [ + 60, + 539, + 532, + 622 + ], + "type": "text", + "content": "In the previous sections, we have shown that TAPS tackles two drawbacks of data-driven surrogate modeling approaches which use offline data generated through direct numerical simulation (DNS). Firstly, the proposed TAPS is data-free, which means that it does not require any training data. This is of crucial importance for applications that require ultra high-resolution simulations because offline training data generation can be extremely costly. Our method circumvents expensive offline DNS data generation by directly solving the governing equation. Secondly, TAPS enables solving ultra large-scale problems with significant speedup, minimal memory requirement, and substantial storage gain as compared to standard DNS techniques." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 60, + 623, + 532, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 623, + 532, + 694 + ], + "spans": [ + { + "bbox": [ + 60, + 623, + 532, + 694 + ], + "type": "text", + "content": "The computational speed of the current method can be further improved with the state-of-the-art high-performance numerical solvers and parallel computing on multiple GPUs. Right now, the TAPS linear systems of equations are solved on CPUs which results in additional overhead. With more sparse direct solvers/iterative schemes becoming available on GPU, we expect a further speedup of the current program. Moreover, parallel computing using multiple GPUs can be achieved using Message Passing Interface (MPI) [36]. For ultra large-scale analysis where each dimension contains millions of nodes, an efficient iterative solver with a suitable preconditioner needs to be developed." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 60, + 694, + 532, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 694, + 532, + 731 + ], + "spans": [ + { + "bbox": [ + 60, + 694, + 532, + 731 + ], + "type": "text", + "content": "Variational multiscale methods can be used to further extend the capabilities of the current method to tackle zettascale space-time problems [37, 35]. Moreover, one major computational cost for the current method originates from the increased number of decomposition modes for a large number of time steps. This can be avoided by leveraging" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 60, + 111, + 531, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 111, + 531, + 159 + ], + "spans": [ + { + "bbox": [ + 60, + 111, + 531, + 159 + ], + "type": "text", + "content": "coordinate transformation techniques where the moving source can be transformed into a fixed one. As a result, we expect to greatly improve the computational performance of the current method. Irregular geometry can also be considered using immersed finite element techniques or the Solid Isotropic Material with Penalization (SIMP) method in topology optimization [20, 38, 39, 40, 41]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 177, + 126, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 177, + 126, + 188 + ], + "spans": [ + { + "bbox": [ + 61, + 177, + 126, + 188 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 60, + 198, + 531, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 198, + 531, + 269 + ], + "spans": [ + { + "bbox": [ + 60, + 198, + 531, + 269 + ], + "type": "text", + "content": "In this paper, we propose TAPS as a data-free predictive scientific AI model to simulate ultra large-scale physical problems. This method eliminates the traditional necessity for offline training data generation, thereby exhibiting substantial speedup, memory efficiency, and storage gain as opposed to data-driven methods, making previously unsolvable large-scale and high-dimensional problems manageable. The convergence of the TAPS solver is numerically investigated. As a demonstration of the capabilities of TAPS, we showcase the application of the TAPS solver for a multi-layer multi-track additive manufacturing problem that is intractable with classical numerical algorithms." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 269, + 531, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 269, + 531, + 376 + ], + "spans": [ + { + "bbox": [ + 60, + 269, + 531, + 376 + ], + "type": "text", + "content": "TAPS is well suited for a broad range of science or engineering problems where: 1) the finite element method and other conventional numerical methods are unsuitable due to excessively long simulation times or high RAM and storage demands needed to achieve high accuracy, 2) the model must accommodate design parameters as inputs, or 3) fast prediction is required once the model is obtained. The INN hierarchical neural network interpolants, particularly C-HiDeNN used by TAPS, demonstrate superior performance compared to other machine learning models. For the solving tasks, it has shown superior performance compared to physics-informed neural network (PINN) [7], CP-PINN [42], and Kolmogorov-Arnold Networks (KAN) [43] with orders of magnitude faster solution time, higher accuracy, and better scalability to ultra large-scale and high-dimensional PDEs [44]. INN interpolants can also be effectively used in data-driven training tasks and show better training accuracy compared to MLP, SIREN [45] and KAN [11, 44]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 377, + 531, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 377, + 531, + 483 + ], + "spans": [ + { + "bbox": [ + 60, + 377, + 531, + 483 + ], + "type": "text", + "content": "As illustrated in Fig. 17, the significance of this work in the area of predictive scientific AI models aligns with the trend in other areas in AI, such as language and vision AI models. The evolution of language models has seen dramatic growth, beginning with foundational models like BERT [46], followed by the GPT series [47], which expanded transformer architecture to hundreds of billions of parameters, showcasing powerful generative capabilities. In vision models, AlexNet [48] marked a breakthrough, while advancements like DIT-XL [49] and SORA [50] integrated diffusion models to handle more complex and challenging visual tasks. This trajectory of increasing scale and sophistication from its network architecture (i.e., transformer of language models and diffusion of vision models) is mirrored in predictive scientific AI where TAPS represents a significant advancement in its network architecture, INN." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 485, + 531, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 485, + 531, + 579 + ], + "spans": [ + { + "bbox": [ + 60, + 485, + 531, + 579 + ], + "type": "text", + "content": "A major critical issue in the emerging large AI models is a more sophisticated model will generally lead to a larger amount of training data, more expensive training costs, and longer inference time. The advent of DeepSeek R1 breaks this rule since it has fewer parameters, much less training cost, faster inference speed, yet still comparable accuracy compared to other state-of-the-art models due to its novel architecture and training techniques such as distillation methods [51]. For predictive scientific AI, we face even more pronounced challenges due to strict accuracy demands and the necessity for high-resolution physics for large-scale problems. As a result, the future of predictive scientific AI is still largely untapped. TAPS provides a promising solution to these emerging challenges by delivering a highly accurate, exceptionally fast, and memory and storage efficient scientific AI model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 580, + 531, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 580, + 531, + 628 + ], + "spans": [ + { + "bbox": [ + 60, + 580, + 531, + 628 + ], + "type": "text", + "content": "In conclusion, the proposed TAPS computational framework offers substantial enhancements in computational efficiency, memory consumption, and storage demands for science and engineering simulations. As a result, TAPS paves a new path to address future challenges in ultra large-scale simulations pertinent to complex predictive scientific AI models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 645, + 427, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 645, + 427, + 658 + ], + "spans": [ + { + "bbox": [ + 61, + 645, + 427, + 658 + ], + "type": "text", + "content": "Appendix A. Solving nonlinear S-P-T PDEs: solution dependent material properties" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 60, + 666, + 531, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 666, + 531, + 728 + ], + "spans": [ + { + "bbox": [ + 60, + 666, + 531, + 728 + ], + "type": "text", + "content": "The algorithm 1 works for linear PDEs where the PDE coefficients remain constant. However, in many engineering applications, the PDE coefficients can be solution-dependent. For instance, material properties such as heat conductivity and heat capacity can be a function of temperature in additive manufacturing. In these cases, the PDE becomes non-linear which requires an efficient solution scheme. In this section, we solely focus on the space-time problems formulation of a nonlinear PDE. As a result, the product of density and heat capacity " + }, + { + "bbox": [ + 60, + 666, + 531, + 728 + ], + "type": "inline_equation", + "content": "\\rho c_{p}(u)" + }, + { + "bbox": [ + 60, + 666, + 531, + 728 + ], + "type": "text", + "content": " and conductivity" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 739, + 304, + 749 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 739, + 304, + 749 + ], + "spans": [ + { + "bbox": [ + 290, + 739, + 304, + 749 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 111, + 461, + 306 + ], + "blocks": [ + { + "bbox": [ + 135, + 111, + 461, + 306 + ], + "lines": [ + { + "bbox": [ + 135, + 111, + 461, + 306 + ], + "spans": [ + { + "bbox": [ + 135, + 111, + 461, + 306 + ], + "type": "image", + "image_path": "5ac20a39f47685ebb9378a0c4e12e417a603d78fe820a3ac37f08ecdb86a0b5e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 210, + 320, + 383, + 331 + ], + "lines": [ + { + "bbox": [ + 210, + 320, + 383, + 331 + ], + "spans": [ + { + "bbox": [ + 210, + 320, + 383, + 331 + ], + "type": "text", + "content": "Figure 17: Evolution of AI models for different tasks" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 350, + 532, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 350, + 532, + 375 + ], + "spans": [ + { + "bbox": [ + 60, + 350, + 532, + 375 + ], + "type": "inline_equation", + "content": "k(u)" + }, + { + "bbox": [ + 60, + 350, + 532, + 375 + ], + "type": "text", + "content": " are no longer temperature independent as in Eq. 18. Similar to the linear problem shown before, the generalized Galerkin weak form is used to solve this equation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 381, + 531, + 410 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 381, + 531, + 410 + ], + "spans": [ + { + "bbox": [ + 121, + 381, + 531, + 410 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} \\delta u \\nabla_ {x _ {t}} \\left[ \\rho c _ {p} (u) u \\right] d \\Omega - \\int_ {\\Omega} \\nabla_ {x _ {s}} \\delta u \\cdot k (u) \\nabla_ {x _ {s}} u d \\Omega + \\int_ {\\partial \\Omega_ {x _ {s}} \\otimes \\Omega_ {t}} \\delta u \\boldsymbol {q} \\cdot \\boldsymbol {n} d s d \\Omega_ {x _ {t}} = \\int_ {\\Omega} \\delta u b d \\Omega \\tag {A.1}", + "image_path": "db080711355133edb1442b6cf73a6f27394199d020bb9339f1f18dfa0304bcae.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 412, + 532, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 412, + 532, + 449 + ], + "spans": [ + { + "bbox": [ + 60, + 412, + 532, + 449 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 60, + 412, + 532, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 60, + 412, + 532, + 449 + ], + "type": "text", + "content": " is the heat flux on the Neumann boundary. Since Eq. A.1 is a space-time integral, classical time-stepping based methods can't be directly used to update material parameters. Here we propose a global-local approach similar to the Large Time Increment (LATIN) method to effectively solve the above equations [52]." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 226, + 460, + 365, + 589 + ], + "blocks": [ + { + "bbox": [ + 226, + 460, + 365, + 589 + ], + "lines": [ + { + "bbox": [ + 226, + 460, + 365, + 589 + ], + "spans": [ + { + "bbox": [ + 226, + 460, + 365, + 589 + ], + "type": "image", + "image_path": "200be0b0946761ab17f9c2c59f91eed1c6bccd651b97031cd574a612f7e34b9a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 600, + 399, + 611 + ], + "lines": [ + { + "bbox": [ + 195, + 600, + 399, + 611 + ], + "spans": [ + { + "bbox": [ + 195, + 600, + 399, + 611 + ], + "type": "text", + "content": "Figure A.18: Global-local approach for nonlinear TAPS solver" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "spans": [ + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": "As shown in Fig. A.18, we split the nonlinear problem into 2 stages, a linear global stage and a nonlinear local update stage. In the global stage, we assume the spatio-temporal " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "k(\\pmb{x}_s,x_t)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "\\rho c_{p}(\\pmb{x}_{s},x_{t})" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " are known. As a result, we treat the global problem as a linear problem and obtain " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "u(x_{s},x_{t})" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " using the previously proposed method for linear problems. After " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "u(\\pmb{x}_s,x_t)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " is updated in the global stage, we update " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "k(u)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "\\rho c_{p}(u)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " locally at each Gauss integration point according to material models " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "k(u)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "\\rho c_{p}(u)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": ". We repeat the global-local iteration until the variation of " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "k(u)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "inline_equation", + "content": "\\rho c_{p}(u)" + }, + { + "bbox": [ + 60, + 623, + 532, + 696 + ], + "type": "text", + "content": " between consecutive iterations meets the convergence criteria. The algorithm is summarized in Algorithm 2:" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 67, + 124, + 531, + 269 + ], + "blocks": [ + { + "bbox": [ + 63, + 111, + 423, + 122 + ], + "lines": [ + { + "bbox": [ + 63, + 111, + 423, + 122 + ], + "spans": [ + { + "bbox": [ + 63, + 111, + 423, + 122 + ], + "type": "text", + "content": "Algorithm 2 Nonlinear TAPS solution scheme: PDE with solution dependent coefficients" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "lines": [ + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": "1: Initialize solution matrices with random values and update " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "\\rho_{c_p}(\\pmb{x}_s, x_t)" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "k(\\pmb{x}_s, x_t)" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": ". \n2: for iter" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "_\\gamma" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " = 1 to iter" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "_\\gamma_{max}" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " do \n3: for iter = 1 to iter" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "_max" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " do \n4: Update " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "\\rho_{c_p}(\\pmb{x}_s, x_t)" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "k(\\pmb{x}_s, x_t)" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " \n5: Use Algorithm 1 to solve solution " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "u(\\pmb{x}_s, x_t)" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " \n6: for " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "i = 1" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " to integration points do \n7: " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "\\rho_{c_p}(\\pmb{x}_s, x_t) = \\rho_{c_p}[u(\\pmb{x}_s, x_t)]" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " \n8: " + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "inline_equation", + "content": "k(\\pmb{x}_s, x_t) = k[u(\\pmb{x}_s, x_t)]" + }, + { + "bbox": [ + 67, + 124, + 531, + 269 + ], + "type": "text", + "content": " \n9: end for \n10: Check convergence \n11: end for \n12: end for" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 61, + 289, + 205, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 289, + 205, + 301 + ], + "spans": [ + { + "bbox": [ + 61, + 289, + 205, + 301 + ], + "type": "text", + "content": "Appendix B. Mode compression" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 309, + 531, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 309, + 531, + 346 + ], + "spans": [ + { + "bbox": [ + 60, + 309, + 531, + 346 + ], + "type": "text", + "content": "One significant challenge in multi-track simulation in LPBF is the huge number of time steps required. It is impossible to resolve all the time steps with only a single space-time (S-T) slab. Hence, we split the whole layer scan into multiple S-T slabs and relate each S-T slab using the following equation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 202, + 351, + 531, + 366 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 351, + 531, + 366 + ], + "spans": [ + { + "bbox": [ + 202, + 351, + 531, + 366 + ], + "type": "interline_equation", + "content": "{ } ^ { [ \\mathcal { T } + 1 ] } u ( \\boldsymbol { x } _ { s } , x _ { t } ) = { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } , x _ { t } ^ { m a x } ) + { } ^ { [ \\mathcal { T } + 1 ] } u _ { 0 } ( \\boldsymbol { x } _ { s } , x _ { t } ) \\tag {B.1}", + "image_path": "555f4436ae49bf816ef7e5a627c0b85a2e2e913055dd1ca634be549a8ad7c3e9.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "spans": [ + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "inline_equation", + "content": "[T + 1]u(\\pmb{x}_s, x_t)" + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "content": " refers to the solution at " + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "inline_equation", + "content": "(\\mathcal{T} + 1)" + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "content": "-th space-time slab; " + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "inline_equation", + "content": "[T + 1]u_0(\\pmb{x}_s, x_t)" + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "content": " refers to the solution of the homogeneous initial value problem of " + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "inline_equation", + "content": "(\\mathcal{T} + 1)" + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "content": "-th space-time slab; " + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "inline_equation", + "content": "[T]u(\\pmb{x}_s, x_t^{max})" + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "content": " is the solution of " + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 60, + 374, + 531, + 445 + ], + "type": "text", + "content": "-th space-time slab at the last time increment. As can be seen from Eq. B.1, we impose the last time increment solution of previous space-time slab as the initial condition for the next space-time slab. This is efficiently implemented by adding the TD form of the last increment as new modes in the current space-slab solution. However, for large-scale computations requiring thousands of slabs, directly concatenating modes can result in substantial storage demands." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "spans": [ + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "text", + "content": "In mode compression, we aim to compress the number of modes for " + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "inline_equation", + "content": "\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})" + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "text", + "content": " because of its spatial dependence and naturally low-dimensional structure. Consequently, it can be effectively decomposed using only a few modes. Denote the TD form of the last time step solution of the previous space-time slab as " + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "inline_equation", + "content": "\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})^{TD}" + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "text", + "content": ", we aim to find a compact form that can be represented with much fewer number of modes " + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "inline_equation", + "content": "\\left[{}^{\\mathcal{T}}\\right]u(\\boldsymbol{x}_s,x_t^{max})_F^{TD}" + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "text", + "content": ". For notation simplicity, we omit " + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "inline_equation", + "content": "x_{t}^{max}" + }, + { + "bbox": [ + 60, + 445, + 531, + 507 + ], + "type": "text", + "content": " in the following equations. Consequently, the mode compression problem can be written as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 238, + 512, + 530, + 528 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 512, + 530, + 528 + ], + "spans": [ + { + "bbox": [ + 238, + 512, + 530, + 528 + ], + "type": "interline_equation", + "content": "{ } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } ) _ { F } ^ { T D } - { } ^ { [ \\mathcal { T } ] } u ( \\boldsymbol { x } _ { s } ) ^ { T D } = 0 \\tag {B.2}", + "image_path": "f365e38f426f2de7a48594f85771730534fee89a6b10a71c5d57f9955b1535a3.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 534, + 352, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 534, + 352, + 548 + ], + "spans": [ + { + "bbox": [ + 61, + 534, + 352, + 548 + ], + "type": "text", + "content": "The weighted sum residual form is used to approximate " + }, + { + "bbox": [ + 61, + 534, + 352, + 548 + ], + "type": "inline_equation", + "content": "[T]u(\\pmb{x}_s,x_{t - 1})_F^{TD}" + }, + { + "bbox": [ + 61, + 534, + 352, + 548 + ], + "type": "text", + "content": " .." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 191, + 555, + 531, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 555, + 531, + 581 + ], + "spans": [ + { + "bbox": [ + 191, + 555, + 531, + 581 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega_ {x}} \\delta^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) _ {F} ^ {T D} \\cdot \\left[ ^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) _ {F} ^ {T D} - ^ {[ \\mathcal {T} ]} u \\left(\\boldsymbol {x} _ {s}\\right) ^ {T D} \\right] d \\boldsymbol {x} _ {s} = 0 \\tag {B.3}", + "image_path": "9ca13b889ad3a6cd7cf0bfd6b1e8aa69a5feeb8a4afe538f9672b908de4b2533.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 61, + 587, + 271, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 587, + 271, + 600 + ], + "spans": [ + { + "bbox": [ + 61, + 587, + 271, + 600 + ], + "type": "text", + "content": "Eq. B.3 can be efficiently solved using Algorithm 1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 616, + 185, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 616, + 185, + 629 + ], + "spans": [ + { + "bbox": [ + 61, + 616, + 185, + 629 + ], + "type": "text", + "content": "Appendix C. Element birth" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 60, + 636, + 531, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 636, + 531, + 708 + ], + "spans": [ + { + "bbox": [ + 60, + 636, + 531, + 708 + ], + "type": "text", + "content": "In the LPBF process, once the printing is finished for the current layer, a new layer of powder is deposited on top of the existing layer. This necessitates modeling the new layer with additional elements. Various studies have investigated different approaches for element birth techniques. While some researchers opt for activating small sections of geometry incrementally, others apply the technique by spreading the deposition across an entire layer or multiple layers simultaneously. The most widely adopted approach is to activate an entire layer and then scan the heat source over it [53]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "spans": [ + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "text", + "content": "In TAPS, we propose a new scheme to generate new layers of elements. In this scheme, new elements are added only in the " + }, + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "inline_equation", + "content": "x_{3}" + }, + { + "bbox": [ + 60, + 708, + 531, + 733 + ], + "type": "text", + "content": " direction, since the plan dimension doesn't change in the printing process. Therefore, as opposed to" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 60, + 111, + 532, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 111, + 532, + 148 + ], + "spans": [ + { + "bbox": [ + 60, + 111, + 532, + 148 + ], + "type": "text", + "content": "full-scale classical numerical methods, TAPS enables marginal overhead in generating new layers of elements with extra grid points added only in the " + }, + { + "bbox": [ + 60, + 111, + 532, + 148 + ], + "type": "inline_equation", + "content": "x_{3}" + }, + { + "bbox": [ + 60, + 111, + 532, + 148 + ], + "type": "text", + "content": " dimension. The solution scheme for multi-layer multi-track LPBF simulation using TAPS can be summarized in Algorithm 3." + } + ] + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 67, + 173, + 390, + 364 + ], + "blocks": [ + { + "bbox": [ + 63, + 158, + 333, + 171 + ], + "lines": [ + { + "bbox": [ + 63, + 158, + 333, + 171 + ], + "spans": [ + { + "bbox": [ + 63, + 158, + 333, + 171 + ], + "type": "text", + "content": "Algorithm 3 Multi-layer multi-track LPBF simulation using TAPS" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "lines": [ + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "spans": [ + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": "1: for " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "n_{layer} = 1" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "n_{layerTotal}" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " do \n2: Initialize solution matrices with random values for the new layer \n3: Compute the updated stiffness matrix and force vector for the " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "x_{3}" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " direction \n4: for " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "n_{track} = 1" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "n_{tracktotal}" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " do \n5: for " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "iter = 1" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "iter_{max}" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " do \n6: for " + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "inline_equation", + "content": "d = 1" + }, + { + "bbox": [ + 67, + 173, + 390, + 364 + ], + "type": "text", + "content": " to dimension do \n7: Compute solution vectors according to Algorithm 1 or 2 \n8: end for \n9: Check convergence \n10: end for \n11: Compress modes \n12: Concatenate compressed modes to previous tracks as new modes \n13: end for \n14: Compress modes \n15: Concatenate compressed modes to previous layers as new modes \n16: end for" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "bbox": [ + 62, + 395, + 111, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 395, + 111, + 407 + ], + "spans": [ + { + "bbox": [ + 62, + 395, + 111, + 407 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 415, + 532, + 728 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 66, + 415, + 531, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 415, + 531, + 434 + ], + "spans": [ + { + "bbox": [ + 66, + 415, + 531, + 434 + ], + "type": "text", + "content": "[1] Zongyi Li, Nikola Kovachki, Kamyar Azizzadenesheli, Burigede Liu, Kaushik Bhattacharya, Andrew Stuart, and Anima Anandkumar. Fourier neural operator for parametric partial differential equations. arXiv preprint arXiv:2010.08895, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 435, + 531, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 435, + 531, + 453 + ], + "spans": [ + { + "bbox": [ + 66, + 435, + 531, + 453 + ], + "type": "text", + "content": "[2] Owen Huang, Sourav Saha, Jiachen Guo, and Wing Kam Liu. An introduction to kernel and operator learning methods for homogenization by self-consistent clustering analysis. Computational Mechanics, 72(1):195-219, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 454, + 518, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 454, + 518, + 463 + ], + "spans": [ + { + "bbox": [ + 66, + 454, + 518, + 463 + ], + "type": "text", + "content": "[3] Can AI Solve Science? https://writings.sthenwolfram.com/2024/03/can-ai-solve-science/. [Accessed 03-04-2025]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 464, + 531, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 464, + 531, + 481 + ], + "spans": [ + { + "bbox": [ + 66, + 464, + 531, + 481 + ], + "type": "text", + "content": "[4] A new golden age of discovery — deepmind.google. https://deepmind.google/public-policy/ai-for-science/. [Accessed 03-04-2025]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 482, + 532, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 482, + 532, + 501 + ], + "spans": [ + { + "bbox": [ + 66, + 482, + 532, + 501 + ], + "type": "text", + "content": "[5] Wing Kam Liu, Shaofan Li, and Harold S Park. Eighty years of the finite element method: Birth, evolution, and future. Archives of Computational Methods in Engineering, 29(6):4431-4453, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 502, + 531, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 502, + 531, + 528 + ], + "spans": [ + { + "bbox": [ + 66, + 502, + 531, + 528 + ], + "type": "text", + "content": "[6] Ye Lu, Hengyang Li, Lei Zhang, Chanwook Park, Satyajit Mojumder, Stefan Knapik, Zhongsheng Sang, Shaoqiang Tang, Daniel W Apley, Gregory J Wagner, et al. Convolution hierarchical deep-learning neural networks (c-hidenn): finite elements, isogeometric analysis, tensor decomposition, and beyond. Computational Mechanics, 72(2):333-362, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 66, + 529, + 531, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 529, + 531, + 548 + ], + "spans": [ + { + "bbox": [ + 66, + 529, + 531, + 548 + ], + "type": "text", + "content": "[7] Maziar Raissi, Paris Perdikaris, and George E Karniadakis. Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations. Journal of Computational physics, 378:686-707, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 66, + 549, + 531, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 549, + 531, + 566 + ], + "spans": [ + { + "bbox": [ + 66, + 549, + 531, + 566 + ], + "type": "text", + "content": "[8] Enrui Zhang, Ming Dao, George Em Karniadakis, and Subra Suresh. Analyses of internal structures and defects in materials using physics-informed neural networks. Science advances, 8(7):eabk0644, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 66, + 567, + 531, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 567, + 531, + 586 + ], + "spans": [ + { + "bbox": [ + 66, + 567, + 531, + 586 + ], + "type": "text", + "content": "[9] Nick McGreavy and Ammar Hakim. Weak baselines and reporting biases lead to overoptimism in machine learning for fluid-related partial differential equations. Nature Machine Intelligence, 6(10):1256-1269, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 63, + 587, + 531, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 587, + 531, + 604 + ], + "spans": [ + { + "bbox": [ + 63, + 587, + 531, + 604 + ], + "type": "text", + "content": "[10] Junwoo Cho, Seungtae Nam, Hyunmo Yang, Seok-Bae Yun, Youngjoon Hong, and Eunbyung Park. Separable physics-informed neural networks. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 63, + 605, + 531, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 605, + 531, + 632 + ], + "spans": [ + { + "bbox": [ + 63, + 605, + 531, + 632 + ], + "type": "text", + "content": "[11] Chanwook Park, Sourav Saha, Jiachen Guo, Hantao Zhang, Xiaoyu Xie, Miguel A Bessa, Dong Qian, Wei Chen, Gregory J Wagner, Jian Cao, et al. Engineering software 2.0 by interpolating neural networks: unifying training, solving, and calibration. arXiv preprint arXiv:2404.10296, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 63, + 633, + 531, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 633, + 531, + 651 + ], + "spans": [ + { + "bbox": [ + 63, + 633, + 531, + 651 + ], + "type": "text", + "content": "[12] Lei Zhang, Lin Cheng, Hengyang Li, Jiaying Gao, Cheng Yu, Reno Domel, Yang Yang, Shaoqiang Tang, and Wing Kam Liu. Hierarchical deep-learning neural networks: finite elements and beyond. Computational Mechanics, 67:207-230, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 63, + 652, + 531, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 652, + 531, + 671 + ], + "spans": [ + { + "bbox": [ + 63, + 652, + 531, + 671 + ], + "type": "text", + "content": "[13] Lei Zhang, Ye Lu, Shaoqiang Tang, and Wing Kam Liu. Hidenn-td: reduced-order hierarchical deep learning neural networks. Computer Methods in Applied Mechanics and Engineering, 389:114414, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 63, + 671, + 531, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 671, + 531, + 698 + ], + "spans": [ + { + "bbox": [ + 63, + 671, + 531, + 698 + ], + "type": "text", + "content": "[14] Chanwook Park, Ye Lu, Sourav Saha, Tianju Xue, Jiachen Guo, Satyajit Mojumder, Daniel W Apley, Gregory J Wagner, and Wing Kam Liu. Convolution hierarchical deep-learning neural network (c-hidenn) with graphics processing unit (gpu) acceleration. Computational Mechanics, 72(2):383-409, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 63, + 699, + 531, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 699, + 531, + 728 + ], + "spans": [ + { + "bbox": [ + 63, + 699, + 531, + 728 + ], + "type": "text", + "content": "[15] Sourav Saha, Zhengtao Gan, Lin Cheng, Jiaying Gao, Orion L Kafka, Xiaoyu Xie, Hengyang Li, Mahsa Tajdari, H Alicia Kim, and Wing Kam Liu. Hierarchical deep learning neural network (hidenn): an artificial intelligence (ai) framework for computational science and engineering. Computer Methods in Applied Mechanics and Engineering, 373:113452, 2021." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 290, + 739, + 302, + 748 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 63, + 112, + 531, + 728 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 63, + 112, + 531, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 112, + 531, + 132 + ], + "spans": [ + { + "bbox": [ + 63, + 112, + 531, + 132 + ], + "type": "text", + "content": "[16] Yingjian Liu, Chanwook Park, Ye Lu, Satyajit Mojumder, Wing Kam Liu, and Dong Qian. Hidenn-fem: a seamless machine learning approach to nonlinear finite element analysis. Computational Mechanics, 72(1):173-194, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 133, + 530, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 133, + 530, + 151 + ], + "spans": [ + { + "bbox": [ + 63, + 133, + 530, + 151 + ], + "type": "text", + "content": "[17] Francisco Chinesta, Amine Ammar, Adrien Leygue, and Roland Keunings. An overview of the proper generalized decomposition with applications in computational rheology. Journal of Non-Newtonian Fluid Mechanics, 166(11):578-592, 2011." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 63, + 152, + 530, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 152, + 530, + 169 + ], + "spans": [ + { + "bbox": [ + 63, + 152, + 530, + 169 + ], + "type": "text", + "content": "[18] Francisco Chinesta, Roland Keunings, and Adrien Leygue. The proper generalized decomposition for advanced numerical simulations: a primer. Springer Science & Business Media, 2013." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 63, + 170, + 530, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 170, + 530, + 188 + ], + "spans": [ + { + "bbox": [ + 63, + 170, + 530, + 188 + ], + "type": "text", + "content": "[19] Anthony Nouy. A priori model reduction through proper generalized decomposition for solving time-dependent partial differential equations. Computer Methods in Applied Mechanics and Engineering, 199(23-24):1603-1626, 2010." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 189, + 530, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 189, + 530, + 217 + ], + "spans": [ + { + "bbox": [ + 63, + 189, + 530, + 217 + ], + "type": "text", + "content": "[20] Hengyang Li, Stefan Knapik, Yangfan Li, Chanwook Park, Jiachen Guo, Satyajit Mojumder, Ye Lu, Wei Chen, Daniel W Apley, and Wing Kam Liu. Convolution hierarchical deep-learning neural network tensor decomposition (c-hidenn-td) for high-resolution topology optimization. Computational Mechanics, 72(2):363-382, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 63, + 218, + 530, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 218, + 530, + 236 + ], + "spans": [ + { + "bbox": [ + 63, + 218, + 530, + 236 + ], + "type": "text", + "content": "[21] Ehsan Kharazmi, Zhongqiang Zhang, and George Em Karniadakis. hp-vpinns: Variational physics-informed neural networks with domain decomposition. Computer Methods in Applied Mechanics and Engineering, 374:113547, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 237, + 530, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 237, + 530, + 245 + ], + "spans": [ + { + "bbox": [ + 63, + 237, + 530, + 245 + ], + "type": "text", + "content": "[22] Thomas JR Hughes. The finite element method: linear static and dynamic finite element analysis. Courier Corporation, 2003." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 246, + 448, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 246, + 448, + 254 + ], + "spans": [ + { + "bbox": [ + 63, + 246, + 448, + 254 + ], + "type": "text", + "content": "[23] Tamara G Kolda and Brett W Bader. Tensor decompositions and applications. SIAM review, 51(3):455-500, 2009." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 63, + 255, + 455, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 255, + 455, + 264 + ], + "spans": [ + { + "bbox": [ + 63, + 255, + 455, + 264 + ], + "type": "text", + "content": "[24] Junuthula Narasimha Reddy. An introduction to the finite element method, volume 3. McGraw-Hill New York, 2005." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 63, + 264, + 530, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 264, + 530, + 283 + ], + "spans": [ + { + "bbox": [ + 63, + 264, + 530, + 283 + ], + "type": "text", + "content": "[25] Thomas JR Hughes and Gregory M Hulbert. Space-time finite element methods for elastodynamics: formulations and error estimates. Computer methods in applied mechanics and engineering, 66(3):339-363, 1988." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 63, + 284, + 530, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 284, + 530, + 302 + ], + "spans": [ + { + "bbox": [ + 63, + 284, + 530, + 302 + ], + "type": "text", + "content": "[26] Wing Kam Liu, Ted Belytschko, and A. Mani. Probabilistic finite elements for nonlinear structural dynamics. Computer Methods in Applied Mechanics and Engineering, 56(1):61-81, 1986." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 63, + 303, + 530, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 303, + 530, + 320 + ], + "spans": [ + { + "bbox": [ + 63, + 303, + 530, + 320 + ], + "type": "text", + "content": "[27] Wing Kam Liu, Ted Belytschko, and A. Mani. Random field finite elements. International Journal for Numerical Methods in Engineering, 23(3):1831-1845, 1986." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 63, + 321, + 530, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 321, + 530, + 339 + ], + "spans": [ + { + "bbox": [ + 63, + 321, + 530, + 339 + ], + "type": "text", + "content": "[28] Shuheng Liao, Ashkan Golgoon, Mojtaba Mozaffar, and Jian Cao. Efficientgpu-accelerated thermomechanical solver for residual stress prediction in additive manufacturing. Computational Mechanics, 71(5):879-893, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 63, + 340, + 530, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 340, + 530, + 367 + ], + "spans": [ + { + "bbox": [ + 63, + 340, + 530, + 367 + ], + "type": "text", + "content": "[29] Amine Ammar, Bechir Mokdad, Francisco Chinesta, and Roland Keunings. A new family of solvers for some classes of multidimensional partial differential equations encountered in kinetic theory modeling of complex fluids. Journal of non-Newtonian fluid Mechanics, 139(3): 153-176, 2006." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 63, + 368, + 530, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 368, + 530, + 386 + ], + "spans": [ + { + "bbox": [ + 63, + 368, + 530, + 386 + ], + "type": "text", + "content": "[30] Abderrahman Bouhamidi and Khalide Jbilou. A note on the numerical approximate solutions for generalized sylvester matrix equations with applications. Applied Mathematics and Computation, 206(2):687-694, 2008." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 63, + 387, + 530, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 387, + 530, + 406 + ], + "spans": [ + { + "bbox": [ + 63, + 387, + 530, + 406 + ], + "type": "text", + "content": "[31] Ya-Jun Xie and Chang-Feng Ma. The scaling conjugate gradient iterative method for two types of linear matrix equations. Computers & Mathematics with Applications, 70(5):1098-1113, 2015." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 63, + 407, + 530, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 407, + 530, + 425 + ], + "spans": [ + { + "bbox": [ + 63, + 407, + 530, + 425 + ], + "type": "text", + "content": "[32] Ulrich Langer and Marco Zank. Efficient direct space-time finite element solvers for parabolic initial-boundary value problems in anisotropic sobolev spaces. SIAM Journal on Scientific Computing, 43(4):A2714-A2736, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 63, + 426, + 530, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 426, + 530, + 444 + ], + "spans": [ + { + "bbox": [ + 63, + 426, + 530, + 444 + ], + "type": "text", + "content": "[33] A Sh Agazhanov, DA Samoshkin, and Yu M Kozlovskii. Thermophysical properties of inconel 718 alloy. In Journal of Physics: Conference Series, volume 1382, page 012175. IOP Publishing, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 63, + 444, + 530, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 444, + 530, + 471 + ], + "spans": [ + { + "bbox": [ + 63, + 444, + 530, + 471 + ], + "type": "text", + "content": "[34] Tianju Xue, Shuheng Liao, Zhengtao Gan, Chanwook Park, Xiaoyu Xie, Wing Kam Liu, and Jian Cao. Jax-fem: A differentiablegpu-accelerated 3d finite element solver for automatic inverse design and mechanistic data science. Computer Physics Communications, 291: 108802, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 63, + 473, + 530, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 473, + 530, + 491 + ], + "spans": [ + { + "bbox": [ + 63, + 473, + 530, + 491 + ], + "type": "text", + "content": "[35] Joseph P Leonor and Gregory J Wagner. Go-melt: GPU-optimized multilevel execution of lpbf thermal simulations. Computer Methods in Applied Mechanics and Engineering, 426:116977, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 63, + 491, + 530, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 491, + 530, + 518 + ], + "spans": [ + { + "bbox": [ + 63, + 491, + 530, + 518 + ], + "type": "text", + "content": "[36] Dana Jacobsen, Julien Thibault, and Inanc Senocak. An mpi-cuda implementation for massively parallel incompressible flow computations on multi-gpu clusters. In 48th AIAA Aerospace Sciences Meeting Including the New Horizons Forum and Aerospace Exposition, page 522, 2010." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 63, + 519, + 530, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 519, + 530, + 538 + ], + "spans": [ + { + "bbox": [ + 63, + 519, + 530, + 538 + ], + "type": "text", + "content": "[37] Thomas JR Hughes, Gonzalo R Feijóo, Luca Mazzei, and Jean-Baptiste Quincy. The variational multiscale method—a paradigm for computational mechanics. Computer methods in applied mechanics and engineering, 166(1-2):3-24, 1998." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 63, + 539, + 530, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 539, + 530, + 557 + ], + "spans": [ + { + "bbox": [ + 63, + 539, + 530, + 557 + ], + "type": "text", + "content": "[38] Xingshi Wang and Lucy T Zhang. Modified immersed finite element method for fully-coupled fluid-structure interactions. Computer methods in applied mechanics and engineering, 267:150–169, 2013." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 63, + 558, + 530, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 558, + 530, + 586 + ], + "spans": [ + { + "bbox": [ + 63, + 558, + 530, + 586 + ], + "type": "text", + "content": "[39] Wing Kam Liu, Yaling Liu, David Farrell, Lucy Zhang, X Sheldon Wang, Yoshio Fukui, Neelesh Patankar, Yongjie Zhang, Chandrajit Bajaj, Junghoon Lee, et al. Immersed finite element method and its applications to biological systems. Computer methods in applied mechanics and engineering, 195(13-16):1722-1749, 2006." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 63, + 587, + 530, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 587, + 530, + 603 + ], + "spans": [ + { + "bbox": [ + 63, + 587, + 530, + 603 + ], + "type": "text", + "content": "[40] Wing Kam Liu, Do Wan Kim, and Shaoqiang Tang. Mathematical foundations of the immersed finite element method. Computational Mechanics, 39:211-222, 2007." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 63, + 605, + 530, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 605, + 530, + 624 + ], + "spans": [ + { + "bbox": [ + 63, + 605, + 530, + 624 + ], + "type": "text", + "content": "[41] Adrian M Kopacz, Woon-Hong Yeo, Jae-Hyun Chung, and Wing Kam Liu. Nanoscale sensor analysis using the immersed molecular electrokinetic finite element method. Nanoscale, 4(16):5189-5194, 2012." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 63, + 624, + 530, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 624, + 530, + 643 + ], + "spans": [ + { + "bbox": [ + 63, + 624, + 530, + 643 + ], + "type": "text", + "content": "[42] Sai Karthikeya Vemuri, Tim Büchner, Julia Niebling, and Joachim Denzler. Functional tensor decompositions for physics-informed neural networks. In International Conference on Pattern Recognition, pages 32-46. Springer, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 63, + 644, + 530, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 644, + 530, + 661 + ], + "spans": [ + { + "bbox": [ + 63, + 644, + 530, + 661 + ], + "type": "text", + "content": "[43] Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljačić, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 63, + 662, + 530, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 662, + 530, + 689 + ], + "spans": [ + { + "bbox": [ + 63, + 662, + 530, + 689 + ], + "type": "text", + "content": "[44] Jiachen Guo, Xiaoyu Xie, Chanwook Park, Hantao Zhang, Matthew Politis, Gino Domel, T.J.R Hughes, and Wing Kam Liu. Interpolation neural network-tensor decomposition (inn-td): a scalable and interpretable approach for large-scale physics-based problems. arXiv preprint arXiv:2503.02041, 2025." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 63, + 690, + 530, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 690, + 530, + 708 + ], + "spans": [ + { + "bbox": [ + 63, + 690, + 530, + 708 + ], + "type": "text", + "content": "[45] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. Advances in neural information processing systems, 33:7462-7473, 2020." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 63, + 709, + 530, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 709, + 530, + 728 + ], + "spans": [ + { + "bbox": [ + 63, + 709, + 530, + 728 + ], + "type": "text", + "content": "[46] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 740, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 740, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 740, + 302, + 748 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 63, + 112, + 531, + 236 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 80, + 112, + 347, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 112, + 347, + 122 + ], + "spans": [ + { + "bbox": [ + 80, + 112, + 347, + 122 + ], + "type": "text", + "content": "language technologies, volume 1 (long and short papers), pages 4171-4186, 2019." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 123, + 531, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 123, + 531, + 132 + ], + "spans": [ + { + "bbox": [ + 63, + 123, + 531, + 132 + ], + "type": "text", + "content": "[47] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 63, + 133, + 530, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 133, + 530, + 150 + ], + "spans": [ + { + "bbox": [ + 63, + 133, + 530, + 150 + ], + "type": "text", + "content": "[48] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25, 2012." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 63, + 152, + 530, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 152, + 530, + 169 + ], + "spans": [ + { + "bbox": [ + 63, + 152, + 530, + 169 + ], + "type": "text", + "content": "[49] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 170, + 530, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 170, + 530, + 188 + ], + "spans": [ + { + "bbox": [ + 63, + 170, + 530, + 188 + ], + "type": "text", + "content": "[50] Yixin Liu, Kai Zhang, Yuan Li, Zhiling Yan, Chujie Gao, Ruoxi Chen, Zhengqing Yuan, Yue Huang, Hanchi Sun, Jianfeng Gao, et al. Sora: A review on background, technology, limitations, and opportunities of large vision models. arXiv preprint arXiv:2402.17177, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 63, + 189, + 530, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 189, + 530, + 207 + ], + "spans": [ + { + "bbox": [ + 63, + 189, + 530, + 207 + ], + "type": "text", + "content": "[51] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 208, + 494, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 208, + 494, + 217 + ], + "spans": [ + { + "bbox": [ + 63, + 208, + 494, + 217 + ], + "type": "text", + "content": "[52] Pierre Ladevèze. On reduced models in nonlinear solid mechanics. European Journal of Mechanics-A/Solids, 60:227-237, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 218, + 530, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 218, + 530, + 236 + ], + "spans": [ + { + "bbox": [ + 63, + 218, + 530, + 236 + ], + "type": "text", + "content": "[53] Richard J Williams, Catrin M Davies, and Paul A Hooper. A pragmatic part scale model for residual stress and distortion prediction in powder bed fusion. Additive Manufacturing, 22:416-425, 2018." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 291, + 739, + 302, + 748 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_content_list.json b/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..41c3fe084d91889fc1ea638058a64eae53b61a5f --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_content_list.json @@ -0,0 +1,2164 @@ +[ + { + "type": "text", + "text": "VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation", + "text_level": 1, + "bbox": [ + 250, + 128, + 746, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shoubin Yu $^{1,3*}$ Difan Liu $^{1*}$ Ziqiao Ma $^{1,2*}$ Yicong Hong $^{1}$ Yang Zhou $^{1}$ Hao Tan $^{1}$ Joyce Chai $^{2}$ Mohit Bansal $^{3}$", + "bbox": [ + 202, + 202, + 794, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Adobe Research $^{2}$ University of Michigan $^{3}$ UNC Chapel Hill", + "bbox": [ + 235, + 255, + 759, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://veggie-gen.github.io/", + "bbox": [ + 351, + 276, + 640, + 292 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/248f43d2a8a5d0f6aaf3651344b22c259b8cc08d13d17fa22f292d515bdb478e.jpg", + "image_caption": [ + "Figure 1. We propose VEGGIE, a unified and versatile video generative model that handles various tasks for both video concept grounding and editing according to user instructions. With VEGGIE, users can locate, add, delete, and change concepts in a given video through diverse instruction formats (direct referring instruction or reasoning-demanding questions). Users can also edit videos with multimodal instruction empowered by MLLM, enabling applications like video editing from a reference image." + ], + "image_footnote": [], + "bbox": [ + 132, + 308, + 857, + 556 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 635, + 326, + 651 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While recent video diffusion models enable video editing, unifying diverse instructional editing tasks (e.g., add, remove, modify) under a single framework remains a significant challenge. In this paper, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions, a simple end-to-end framework that unifies video concept editing, grounding, and reasoning based on diverse user instructions. Specifically, given a video and text query, VEGGIE first utilizes an MLLM to interpret user intentions in instructions and ground them to the video contexts, generating framespecific grounded task queries for pixel-space responses. A diffusion model then renders these plans and generates edited videos that align with user intent. To support diverse", + "bbox": [ + 88, + 667, + 485, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tasks and complex instructions, we employ a curriculum learning strategy: first aligning the MLLM and video diffusion model with large-scale instructional image editing data, followed by end-to-end fine-tuning on high-quality multitask video data. Additionally, we introduce a novel data synthesis pipeline to generate paired instructional video editing data for model training. It transforms static image data into diverse, high-quality video editing samples by leveraging Image-to-Video models to inject dynamics. VEGGIE shows strong performance in instructional video editing with different editing skills, outperforming the best instructional baseline as a versatile model, while other models struggle with multi-tasking. VEGGIE also excels in video object grounding and reasoning segmentation, where other baselines fail. We further reveal how the multiple tasks help each other and highlight promising applications like zero-shot multimodal instructional and in-context video editing.", + "bbox": [ + 511, + 637, + 908, + 895 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 107, + 875, + 220, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\dagger$ Work done during internship at Adobe Research.", + "bbox": [ + 109, + 887, + 372, + 898 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.14350v3 [cs.CV] 25 Oct 2025", + "bbox": [ + 22, + 277, + 58, + 719 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 89, + 222, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Building on the advances in Video Diffusion Models (VIDDMs) [2, 4, 24, 25, 74], video editing methods have emerged as video design tools, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 86] have been developed, using triples of text prompts, source videos, and target videos for training. Due to their limited performance in understanding user intent and multimodal semantics [17], several methods have incorporated multimodal large language models (MLLMs) to handle complex instructions/reasoning [17, 29, 75].", + "bbox": [ + 89, + 114, + 485, + 296 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, existing methods fall short of the goal of a simple, versatile video concept editor, facing three primary challenges. First, most methods are not end-to-end, requiring intermediate layout/mask/human or model caption guidance [35, 53, 63, 75], which adds workload on users and disrupts a seamless editing experience. Second, existing pipelines connecting MLLMs to VidDMs require multiple training objectives beyond simple pixel-space diffusion loss, such as language loss [75] or mask losses [66]. This increases optimization difficulty and often requires additional hyperparameter tuning or annotations. Third, existing video editing models, both instructional and non-instructional, struggle with handling other diverse editing tasks, ranging from addition, and deletion to stylization. For example, LGVI [66] fails in global edits such as stylization and color change, while VidToMe [41] struggles with local edits such as adding or removing objects. These methods also struggle with input videos that contain multiple objects or when user instructions require complex reasoning.", + "bbox": [ + 89, + 296, + 485, + 583 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These challenges result from two limitations: First, there is a lack of multitasking fine-tuning on well-curated instructional video editing datasets that span a broad range of skills. Second, models often lack two critical capabilities needed to interpret user intentions and accurately locate concepts: multimodal reasoning to infer the intended modification from the user's instruction; and grounding language to the input video to precisely identify the region or object to be edited. For example, in Figure 1, one can effortlessly locate the girl given \"identify the little girl.\" When asked to \"add a hat to the little girl,\" we intuitively imagine the hat placed on her head from commonsense, even without seeing an actual hat.", + "bbox": [ + 89, + 584, + 485, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these challenges, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions. VEGGIE unifies video concept grounding and editing without relying on additional layout, mask guidance, or intermediate caption [35, 39, 42, 75, 78, 79]. Instead, we formulate the problem as end-to-end grounded generation in pixel space, using only a diffusion loss. Specifically, given a video and a text query, VEGGIE first leverages an MLLM to interpret complex instructions, generating frame-wise con", + "bbox": [ + 89, + 765, + 485, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ditions. Unlike prior methods [43, 75] that use discrete text tokens as conditions, which disconnect the pipeline and block gradient propagation, VEGGIE employs continuous, learnable task query embeddings per frame. This enables end-to-end training and effectively captures grounded task representations for diffusion model conditioning. To handle diverse tasks and accurately interpret complex queries, we employ a curriculum learning strategy that begins by aligning MLLMs with diffusion models using massive paired instructional image editing data and then fine-tuning the model end-to-end on high-quality multitask video data to adapt video. Unlike tool-use methods [16, 35, 75], VEGGIE formulates both video grounding and instructional editing in the same video-to-video task formulation, enabling efficient handling through a unified single model. To further support end-to-end training, we introduce a novel automatic instructional video data generation pipeline that lifts high-quality instructional image editing data into the video domain using image-to-video and video evaluation tools.", + "bbox": [ + 511, + 90, + 906, + 378 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Existing video editing benchmarks do not provide wide and uniform coverage of diverse editing skills [59, 68]. To address this gap, we contribute VEG-Bench, an instructional video editing benchmark that spans 8 editing skills: concept addition, removal, object changing, environment background changing, visual feature changing, stylization, object grounding, and reasoning segmentation. Each skill is evaluated using a dedicated suite of metrics. We assess the proposed VEGGIE alongside 6 baselines on VEG-Bench. VEGGIE demonstrates strong performance across diverse editing skills, outperforming the best instructional baseline as a versatile, all-in-one model, while other models struggle with multi-tasking. Additionally, VEGGIE excels in video object grounding and reasoning segmentation tasks, where other baselines fall short. We show further analysis of how multi-task learning enhances our framework and highlight applications such as zero-shot multimodal instructional following (Fig. 2) and few-shot in-context editing (Fig. 3). Our contributions are summarized as follows:", + "bbox": [ + 511, + 381, + 908, + 667 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose VEGGIE, an end-to-end model that integrates an MLLM and a VidDM. VEGGIE is a versatile framework that handles diverse instructional requests for editing and grounding various video concepts. Unlike existing work that achieves multitasking via tool use, VEGGIE unifies diverse tasks in a single model, thus simplifying the training with only diffusion loss.", + "- We propose a data synthesis pipeline, scaling high-quality instructional video editing data for future work.", + "- We propose VEG-Bench, an instructional video editing benchmark that spans 8 editing skills with dedicated metrics for each skill.", + "- VEGGIE achieves strong performance across diverse editing skills compared with SoTA methods, and shows potentials for multimodal instruction and in-context following." + ], + "bbox": [ + 516, + 674, + 908, + 900 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/63aee7531a8352ad2237d744edc2c4244b764cdceaf3ac7d28b1d1aaff353c32.jpg", + "image_caption": [ + "Reference" + ], + "image_footnote": [], + "bbox": [ + 161, + 89, + 243, + 142 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8ebc41e6884b7f069ed5ee7ac23e1d302d303b3d20ae0c2837436d84bb8719e2.jpg", + "image_caption": [ + "Transfer the style in the reference image.", + "Add the object in the reference image on the women." + ], + "image_footnote": [], + "bbox": [ + 248, + 90, + 493, + 218 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/82fc676d2e8301dd302c138b4fb2e8579575fc1c2873220621aa9287a9876ed6.jpg", + "image_caption": [ + "Reference" + ], + "image_footnote": [], + "bbox": [ + 500, + 99, + 583, + 142 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a52aa43228df85914871541a07c98e72868322f9fb8f14dbe66c5c1a9614cc60.jpg", + "image_caption": [ + "Figure 2. Multimodal instruction following emerges in VEGGIE, allowing for style transfer or object addition from reference images." + ], + "image_footnote": [], + "bbox": [ + 586, + 90, + 831, + 217 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/15a599d498c84b1cb1fee7c11dacd6d52aee7bacf1257730ce3353e7016fc924.jpg", + "image_caption": [ + "Figure 3. In-context editing emerges in VEGGIE, allowing for few-shot learning of editing tasks with paired image demonstrations." + ], + "image_footnote": [], + "bbox": [ + 161, + 262, + 831, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 89, + 448, + 230, + 463 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instructional Video Editing Video Diffusion Models (VidDMs) [2, 4, 6, 7, 15, 26, 49, 67, 71], enable high-quality video generation across a wide range of video concepts. Building on these advances, video editing methods have emerged as tools for video design, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 70, 86] have been developed, using triples of video instructions, source videos, and target videos for training. These methods demonstrate limited performance when complex multimodal reasoning is required, as noted by previous research on instructional image editing [17]. Moreover, they struggle with diverse editing tasks, from addition and deletion to stylization. For example, LGVI [66] is primarily designed for removal tasks, while TokenFlow [20] struggles with local edits such as adding, removing, or changing objects. We address this limitation with pixel-level multitasking fine-tuning on well-curated instructional video editing datasets covering various grounding and editing skills.", + "bbox": [ + 89, + 474, + 483, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Video Grounding and Segmentation Visual grounding requires models to connect language to its corresponding visual concept in the visual context [40, 47]. This is commonly evaluated via the language-guided semantic localization tasks, ranging from simple referring expressions in RefCOCO series [48, 77] and their generalized variant [44] that takes no-target and multi-target into account. Recently, grounded multimodal large lan", + "bbox": [ + 89, + 779, + 483, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "guage models (MLLMs) are trained for object grounding to bounding boxes [8, 51, 52, 76, 82, 87] and segmentation masks [56, 69, 81, 84] using text-image pairs with fine-grained annotations linking phrases to entities. These models unlock the potential of reasoning segmentation [11, 36], bringing language-informed reasoning into semantic segmentation. Instead of using dedicated object detection or segmentation modules, we achieve video grounding through end-to-end training with only diffusion loss.", + "bbox": [ + 511, + 450, + 906, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Our Method: VEGGIE", + "text_level": 1, + "bbox": [ + 511, + 599, + 725, + 614 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper, we introduce a Video Editor with Grounded Generation from Instructions (VEGGIE), a unified and versatile generative video model. It combines the complex instruction understanding and reasoning capabilities of MLLMs with the generative capacity of VidDMs. The model is trained end-to-end with diffusion loss only. VEGGIE efficiently handles diverse user inputs, including direct instructions, complex questions requiring in-depth reasoning, and multimodal conditioning. It performs various pixel-level manipulations, enabling tasks such as video concept addition, removal, changing, stylization, grounding, and reasoning segmentation based on user instructions. We elaborate on the model design (Sec. 3.1), training and inference process (Sec. 3.2), and data curation (Sec. 3.3).", + "bbox": [ + 511, + 626, + 908, + 837 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Model Architecture", + "text_level": 1, + "bbox": [ + 511, + 848, + 700, + 862 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "VEGGIE consists of four main components (see Fig. 4): (1) a multimodal large language model, (2) a set of learnable", + "bbox": [ + 511, + 869, + 906, + 901 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a5d3f638a01dbbb52a6cc9b53f3d35262c056bd0b75f46a28b987aa706886c70.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 200, + 92, + 225, + 109 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b55dc2a0ef4463e2084d920193d930a31ab28d573d9f0d5ccfef321fdc867476.jpg", + "image_caption": [ + "Figure 4. Overview of our proposed end-to-end VEGGIE framework. Our Multimodal Large Language Model first understands input video frames and diverse user instructions, then it generates frame-wise reasoning queries that maintain per-frame editing conditions for the video diffusion model. The video diffusion model will render the MLLM-generated conditions to the pixel space for diverse tasks, including video editing, video grounding, and video reasoning segmentation with questions. We only apply diffusion loss for the whole pipeline training." + ], + "image_footnote": [], + "bbox": [ + 112, + 118, + 883, + 354 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "grounded task queries, (3) an alignment network (single-layer MLP) that projects the MLLM output into the condition space of the diffusion model, and (4) a video diffusion model initialized from an instructional image editing model [83]. Our model first generates latent conditions for target video frames by querying multimodal context using an MLLM, then renders these conditions at the pixel level through a video diffusion model, as detailed below.", + "bbox": [ + 88, + 443, + 485, + 563 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MLLM for Generating Grounded Task Guidance. As illustrated in the left of Fig. 4, given a video consisting of a sequence of frames $V = [f_{1}, \\dots, f_{n}]$ , where $n$ is the frame number of the given video, a user instruction/question $I$ , our goal is to obtain the response $\\widehat{V} = [\\widehat{f}_{1}, \\dots, \\widehat{f}_{n}]$ at pixel space that faithfully reflects user instruction about the given video. The MLLM module processes both the input video $V$ and a user instruction $I$ to generate a sequence of grounded task tokens per frame: $C = [c_{1}, \\dots, c_{n}]$ , which are input and output in parallel. These tokens serve as task guidance and implicitly encode the target manipulation, such as object attributes, spatial relationships, or style transfer parameters. The MLLM ensures the model captures both explicit user instructions and implicit reasoning needs.", + "bbox": [ + 88, + 565, + 485, + 779 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VidDM for Rendering MLLM Guidance at Pixel Space. As illustrated in the right of Table 4, the VidDM takes the original video $V$ and the grounded task tokens $C$ as conditions to synthesize the target video $\\widehat{V}$ . The original video is concatenated with the noise volume, and the task tokens are input to the cross-attention. With grounded task guidance in denoising steps, the generation process ensures that the output faithfully follows user instructions while preserving the", + "bbox": [ + 88, + 779, + 485, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "video's structure and motion dynamics. Through iterative denoising, it refines each frame while maintaining temporal consistency, applying pixel modifications coherently for a smooth and visually consistent output video $\\widehat{V}$ .", + "bbox": [ + 511, + 443, + 906, + 505 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Curriculum Learning from Images to Videos", + "text_level": 1, + "bbox": [ + 511, + 515, + 893, + 531 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training the model directly on video tasks presents two key challenges: (1) misalignment between MLLM and diffusion model representations, making it difficult for the diffusion model to interpret MLLM-generated task queries with limited fine-tuning data, and (2) the diffusion model's lack of multitasking capability, even for image tasks, due to insufficient training on diverse tasks. Our initial experiments also found the model collapsed when the whole pipeline was directly trained with all data. These challenges/observations underscore the need for pre-alignment between MLLM and the diffusion model to enable seamless adaptation from language-space task queries to pixel-space modifications. To this end, we adopt a two-stage curriculum learning strategy for the proposed VEGGIE framework.", + "bbox": [ + 511, + 537, + 908, + 748 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Stage 1: Aligning Diffusion and Language Spaces. In the first stage, we align the diffusion model with the MLLM using large-scale image-level instructional editing data. The MLLM remains frozen while we update the alignment network, grounded task queries, and diffusion UNet. This process fine-tunes the diffusion model weights to align with the language space, enabling the model to interpret MLLM-generated guidance and translate user instructions into pixel-level edits while preserving the MLLM's strong ability to understand instructions and user intentions.", + "bbox": [ + 511, + 750, + 910, + 900 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "VEGGIE: A Unified and Versatile Instructional Video Generative Model", + "bbox": [ + 225, + 92, + 761, + 108 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/c6886a9d5898513643b38cb38f91e2f9594828a09e7f5582e736d37ec64105ca.jpg", + "table_caption": [], + "table_footnote": [ + "Table 1. Summary of our data for training. R.: Reasoning, E.: Editing, G.: Grounding. #Img/Vid: the number of images/videos, and #Ins.: the number of instruction-image/video pairs." + ], + "table_body": "
TypeSourceR.E.G.# Img./Vid.# Ins.
VideoROVI [66]4.3K27.4K
VPLM [75]4.3K5.5K
GroundMoRe [12]1.3K5.5K
RVoS [60]1.9K6.1K
MeViS [14]1.5K17.1K
InstructV2V [9]68.3K68.3K
VEG-Edit (Ours)4.0K6.2K
Total136.1K
ImageSeed-Data-Edit [18]3M3M
LISA [37]0.2K1.3K
gRefCoCo [44]13.6K73.4K
PhraseCut [65]310.8K310.8K
EraseDraw [5]64.9K42.4K
MagicBrush [83]9.3K9.3K
SmartEdit [29]0.5K0.9K
Total3438.1K
", + "bbox": [ + 93, + 88, + 478, + 311 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stage 2: Enhancing Temporal Consistency and Dynamics. With the MLLM and diffusion model aligned, fine-tuning diverse instructional video editing data becomes more effective for improved instruction following at pixel-space including temporal consistency, dynamic coherence, and editing faithfulness. In this stage, we fine-tune the framework with the MLLM, including the alignment network, grounded task queries, and all 3 dimensions in diffusion UNet, end-to-end with carefully curated multitasking instructional video editing data. Following prior work [22, 67], we inflated the 2D UNet from Stage 1 with temporal attention layers for video adaptation. For both stages 1 and 2, we optimize the framework with a single diffusion loss, enabling unified learning for improved instructional video editing performance while maintaining simplicity and efficiency.", + "bbox": [ + 88, + 383, + 485, + 609 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Classifier-Free Guidance during Testing. We employ classifier-free guidance to balance quality and diversity in diffusion-generated samples. Following prior work [3, 17], we apply classifier-free guidance to instructional visual editing considering two conditions: the grounded task tokens and the original video. To obtain unconditional guidance, we set null values $(\\varnothing)$ for both task tokens and input video. In this case, our score estimate is:", + "bbox": [ + 88, + 614, + 486, + 734 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {e _ {\\theta}} (z _ {t}, c _ {T}, c _ {V}) = e _ {\\theta} (z _ {t}, \\varnothing , \\varnothing) \\\\ + g _ {T} \\cdot \\left(e _ {\\theta} \\left(z _ {t}, c _ {V}, c _ {T}\\right) - e _ {\\theta} \\left(z _ {t}, c _ {V}, \\varnothing\\right)\\right) \\\\ + g _ {V} \\cdot \\left(e _ {\\theta} \\left(z _ {t}, c _ {V}, \\varnothing\\right) - e _ {\\theta} \\left(z _ {t}, \\varnothing , \\varnothing\\right)\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 750, + 472, + 805 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\theta$ represents the model parameters, $C_T$ and $C_V$ denote the task tokens and video conditions, $\\varnothing$ is the null value, $z_t$ is the noised latent at timestamp $t$ , and $g_T$ and $g_V$ are the task guidance and video guidance scales, respectively. More training details are included later in Appendix.", + "bbox": [ + 88, + 824, + 483, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Data Curation Pipeline", + "text_level": 1, + "bbox": [ + 513, + 90, + 728, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Existing video editing models, both instructional and non-instructional, struggle with diverse editing skills due to the lack of high-quality multitasking fine-tuning data. In this section, we introduce our data curation strategy to support VEGGIE in achieving versatile video editing skills. As listed in Tab. 1, we collect 3.4M image and 133.9K video data from diverse sources to support our VEGGIE curriculum learning as discussed in Sec. 3.2. We create our training dataset from two sources: (1) collecting existing image and video data and converting it into an instructional editing format, and (2) synthesizing new instructional video editing samples using existing datasets and generative models.", + "bbox": [ + 511, + 112, + 906, + 294 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Collecting Diverse Multitask Image and Video Data. We bring together instructional editing data from both image (Seed-Data-Edit [18], MagicBrush [83], EraseDraw [5]) and video (InstructV2V [9], VPLM [75]) sources. These datasets provide pairs of original and edited visual contents with user instructions. The tasks include adding, removing, and changing objects, stylizing, and performing global/local edits. Beyond editing datasets, we incorporate segmentation data at both the image level (gRefCoCo [44] and Phrase-Cut [65]) and the video level (RVoS and MeViS). These segmentation tasks are reformulated as color-filling challenges, which guide the model in learning referring grounding (i.e., understanding which object or region to edit) and strengthen its conceptual learning. To further unlock complex instruction understanding via MLLM, we include data that requires more advanced reasoning and implicit referencing. Specifically, we include: reasoning segmentation (LISA [37]), reasoning editing (SmartEdit [29]), interactive video inpainting (LGVI [66]), and motion-grounded video reasoning (GroundMoRe [12]). These tasks help VEGGIE learn implicit references and reasoning.", + "bbox": [ + 511, + 295, + 908, + 613 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Synthesizing Instructional Video Editing Data via Image-to-Video Animation. Recent methods [3, 54] generate synthetic instructional video-editing data by first creating text instructions with LLM, then getting edited videos via T2V models and prompt-to-prompt editing [23]. While these methods adapt image-based editing pipelines [9] to videos, the generated data suffer from temporal-consistency issues. To address this gap, we propose a novel image-to-video animation strategy that leverages the abundance of high-quality image-level instructional editing datasets [64, 83], which provide well-annotated instructions, paired edited images, and well-organized editing skill categories. As illustrated in Fig. 5, given an original image $I$ , an edited image $\\bar{I}$ , and an instruction from an instructional image editing dataset [64], our approach involves three key steps. First, we use an offline MLLM [61, 72] to generate an image caption and an animation prompt that describes plausible motion within the image. Next, an image-to-video (I2V) model animates the image into a video $V$ . Finally,", + "bbox": [ + 511, + 613, + 908, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e788f4f4b9713f7aa04e3b0723c0bd508449a3c41f0753126f94891fa627fb93.jpg", + "image_caption": [ + "Figure 5. Our data generation pipeline for synthetic instructional video editing data. It injects dynamics into well-constructed instructional image editing datasets via the Image-to-Video (I2V) Model, and generates paired video data for instruction editing." + ], + "image_footnote": [], + "bbox": [ + 119, + 87, + 450, + 357 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "we generate the corresponding edited video $\\bar{V}$ using a first-frame-conditioned video editing model [35], leveraging $\\bar{I}$ as a strong prior to ensure consistent edits across frames. Finally, to ensure data quality, we evaluate each original-edited video pair with automatic video quality evaluation metrics [30], which assess the generated videos from diverse dimensions, e.g., motion smoothness, image quality, and background consistency. This pipeline transforms carefully curated image-based datasets into instructional video-editing resources while preserving the precision of the original edits. As a result, our data method expands the availability of high-quality synthetic video-editing data, supporting a wider range of editing tasks in our end-to-end unified framework. More details on data generation, prompting, examples, and pre/post-processing are in the Appendix.", + "bbox": [ + 88, + 430, + 483, + 657 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 89, + 672, + 223, + 690 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first introduce the VEG-Bench Benchmark and then demonstrate the superiority of VEGGIE across diverse video instructional editing skills. More experiments, visualization, and implementation details are in the Appendix.", + "bbox": [ + 89, + 699, + 485, + 761 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. VEG-Bench and Metrics", + "text_level": 1, + "bbox": [ + 89, + 771, + 313, + 786 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As no existing benchmark is designed for fine-grained instructional video editing skills, we manually collect and annotate VEG-Bench, containing 132 video-instruction pairs that balanced cover 8 different video generative skills (15-20 for each). Beyond standard metrics, including text-to-video alignment (CLIP-Text [55]), video smoothness (CLIP-F [55]), and image quality (MUSIQ [32]), we also", + "bbox": [ + 89, + 794, + 485, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "first introduce MLLM-as-a-Judge to give a holistic evaluation score according to the given original video, edited video, and user instruction. It is achieved by prompting GPT4o [21] to evaluate whether the requested semantic change has been fulfilled, using a scale from 1 to 10. For addition and removal, we also introduce an object detector (GroundingDiNo [45]) to detect if the object is added/removed faithfully. For grounding and reasoning segmentation, we following video grounding tasks [12, 14, 33, 58] and adopt the Jaccard index $(\\mathcal{I})$ [31], F-measure $(\\mathcal{F})$ [13], and their mean $(\\mathcal{J} \\& \\mathcal{F})$ . We also compute SSIM between the generated video and the original video masked with GT masks. More evaluation/metrics details are included in Appendix.", + "bbox": [ + 511, + 90, + 908, + 287 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Experimental Results", + "text_level": 1, + "bbox": [ + 511, + 300, + 714, + 316 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Instructional Video Editing over Diverse Skills. As shown in Tab. 2, we evaluate 7 different models on VEG-Bench across 8 distinct editing skills. Overall, VEGGIE demonstrates the best performance among instructional video editing models. Compared to VEGGIE, non-instructional models often struggle with concept removal and addition. This limitation arises because these models rely on attention control or additional conditions (e.g., depth maps) that impose strong priors, constraining the model and making object addition or removal challenging. We also observe that InsV2V achieves high scores in quality and smoothness metrics, but underperforms in alignment and MLLM judgment, which demand faithful semantic changes. Qualitative examples in Fig. 6 illustrate that InsV2V often makes minimal changes to the input video, resulting in high video quality but unfaithful outputs. In contrast, VEGGIE strikes a better balance, delivering both high-quality visuals and accurate semantic alignment with the intended edits.", + "bbox": [ + 511, + 323, + 908, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Can Multi-Task Help Each Other? To test the previous hypothesis, we train our model on the VPLM [75] dataset, which includes paired grounding and removal tasks (approximately 5.5K samples for each task). We focus on these tasks as representative examples due to their straightforward evaluation against ground truth. As shown in Table 3, multitask training yields a lower FVD score and a higher SSIM score, demonstrating that learning to locate and remove a video concept can mutually reinforce performance. We show an example in Fig. 7. However, this conclusion only holds with a balanced data combination. We also observe that an excessive amount of grounding data can introduce more artifacts and negatively impact visual editing skills.", + "bbox": [ + 511, + 597, + 908, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Emergent Zero-shot Multimodal Instruction Following. We also highlight the emergent behavior of VEGGIE on multi-modal instruction following, even without dedicated training data for this specific editing instruction. Notably, VEGGIE demonstrates the ability to perform zero-shot multimodal instructional video editing. As illustrated in Fig. 2, VEGGIE can transfer styles or add objects from a reference", + "bbox": [ + 511, + 795, + 908, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/769f5b258ad2a40f74015e9fe4b7b6b606d38b4bc700fb57fbc9df38e8c93b20.jpg", + "image_caption": [ + "[Addition] Please add a ball in the given video frames." + ], + "image_footnote": [], + "bbox": [ + 114, + 99, + 367, + 339 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/00f32512d007bed96281648f160b4774880b37f5f9ff4dfdf7bdfebeb8280ea6.jpg", + "image_caption": [ + "[Removal] Please remove the man in black in given video frames." + ], + "image_footnote": [], + "bbox": [ + 370, + 99, + 625, + 339 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d5a4bed054201a04c482dc9e52feec17fb85da977718e3c6b1ff3aedd5e32725.jpg", + "image_caption": [ + "[Swap] Replace golden building with a white mountain." + ], + "image_footnote": [], + "bbox": [ + 627, + 99, + 882, + 339 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fc7eb7072f3ac10455a05af86de057f09bf8de77d3070a1df4c5cdecfb45e595.jpg", + "image_caption": [ + "[Environment] Make it on the beach." + ], + "image_footnote": [], + "bbox": [ + 114, + 351, + 367, + 589 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/34b49ae75a067c46555833a6dcf6b163d985c2eb0b4af622d4cafc6f1c4f5a16.jpg", + "image_caption": [ + "[Color] Make the swan white." + ], + "image_footnote": [], + "bbox": [ + 370, + 351, + 625, + 589 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cedd9c5e501fb9c67b83bffd4cdbec3face648d9021835024e06f5277e66ca55.jpg", + "image_caption": [ + "[Texture] Make the rhinocero furry." + ], + "image_footnote": [], + "bbox": [ + 627, + 351, + 882, + 589 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d119609c69d9e8f8e016ca37c53edbeabac37f9d31df9ffab230fed4f1bd87fa.jpg", + "image_caption": [ + "[Style] Make it chinese ink style.", + "Figure 6. Qualitative comparison of editing results across 8 different abilities (splitting visual features into color and texture). We provide zoom-in details for a more detailed comparison. Best viewed in color. More in Appendix." + ], + "image_footnote": [], + "bbox": [ + 114, + 606, + 367, + 845 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ece2036c5a6eb87cd9ef520c69ff7ac773f46d71db767926851aa27e027f4caa.jpg", + "image_caption": [ + "[Grounding] Could you locate the knife" + ], + "image_footnote": [], + "bbox": [ + 370, + 599, + 625, + 845 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7fa0623cc60d54f9ad0f0fcf8df5957362b4f09b3621f95288c1e4201f7170bf.jpg", + "image_caption": [ + "[Reasoning] What can be used for heating food?" + ], + "image_footnote": [], + "bbox": [ + 625, + 599, + 882, + 845 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fd2127f64af65331f9ef175acf2c164e15803575e2f78951103783d194d03106.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methods\nGeneratorNon-Instructional Editing ModelInstructional Editing Model
VidToMe [41] (SD1.5)TokenFlow [20] (SD2.1)Flatten [10] (SD2.1)InstructDiff [19] (SD1.5)LGVI [66] (SD1.5)InsV2V [9] (SD1.5)VEGGIE (Ours) (SD1.5)
Concept Addition
MLLM-Judge (↑)5.005.806.627.262.735.697.44
Alignment (↑)27.8029.3028.2228.1025.0628.2729.27
Smoothness (↑)96.7997.2695.7493.6696.0996.9494.93
Quality (↑)62.8165.6255.4554.0841.5956.2461.31
Detection (↑)47.9849.5349.7455.3614.4248.0157.96
Concept Removal
MLLM-Judge (↑)2.603.734.466.126.592.785.07
Alignment (↑)75.0175.9978.4075.5175.6774.4175.63
Smoothness (↑)96.1396.4795.8291.8397.0396.9995.04
Quality (↑)66.3271.5250.7755.0842.3158.7950.99
Detection (↑)34.3155.1670.9164.8178.4025.6470.22
Object Changing
MLLM-Judge (↑)5.006.537.377.002.066.606.63
Alignment (↑)25.6928.7627.0627.3622.1726.6027.77
Smoothness (↑)96.2397.2196.1392.0795.6696.7495.44
Quality (↑)64.0669.9759.3755.0138.2060.9058.15
Environment & Background Changing
MLLM-Judge (↑)5.817.357.376.052.376.607.18
Alignment (↑)28.1730.0030.0428.0321.9428.2729.15
Smoothness (↑)95.7696.9695.9089.8595.6696.0394.58
Quality (↑)61.9567.0654.5853.0638.9754.9454.25
Visual Feature Changing (Color & Texture)
MLLM-Judge (↑)5.866.856.606.432.147.537.33
Alignment (↑)27.9929.2529.4627.5423.1828.8828.69
Smoothness (↑)95.9397.1095.8391.7194.7596.6694.52
Quality (↑)65.8069.3153.3258.2936.2759.3657.91
Stylization
MLLM-Judge (↑)7.237.628.317.413.718.078.26
Alignment (↑)29.8430.2529.0027.7422.8029.1429.38
Smoothness (↑)96.3197.2396.7188.9795.6296.5095.69
Quality (↑)64.0568.2253.1854.1535.7662.5957.00
Object Grounding
SSIM (↑)40.4750.4647.2137.9866.8449.6570.90
Jaccard Index J (↑)13.8519.2925.6219.881.5213.8937.74
F-measure F (↑)15.5016.8617.6012.813.0717.3721.83
Reasoning Segmentation
SSIM (↑)---32.3944.4759.8668.41
Jaccard Index J (↑)---14.0210.1216.8922.53
F-measure F (↑)---8.079.0610.4515.97
Avg. Ranking2.611.411.963.003.212.001.78
", + "bbox": [ + 148, + 88, + 841, + 512 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/72a21b6c9815d103f96dd8cd3baa1af72c116e890688bd264c8b890c2b98b54f.jpg", + "table_caption": [ + "Table 2. Comparison of video editing task with instructional / non-instructional models on VEG-Bench. -: the task is not capable of non-instructional models. We gray out numbers of non-instructional models that are in different categories." + ], + "table_footnote": [], + "table_body": "
SettingsRemoval (FVD ↓)Grounding (SSIM ↑)
Grd.-only-52.34
Rmv.-only1098.52-
Mixed987.8055.21
", + "bbox": [ + 117, + 556, + 464, + 621 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. An ablation study on whether multi-task learning provides transferable benefits that enhance performance across tasks. We focus on removal and grounding tasks as representative examples.", + "bbox": [ + 89, + 632, + 482, + 674 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/26fbd8a917fd53e373c29389d3c2a59e6baaf3dc3521674392c00f0a449b214f.jpg", + "image_caption": [ + "Figure 7. Comparison between single- and multi-skill models with different data training. We find tasks can help each other." + ], + "image_footnote": [], + "bbox": [ + 114, + 686, + 460, + 828 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "image into the input video based on instructions.", + "bbox": [ + 511, + 559, + 834, + 573 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Emergent Few-shot In-Context Editing. As shown in Fig. 3, VEGGIE can effectively utilize a few example image pairs to transfer the intended editing changes seamlessly to the input video. We observe that VEGGIE exhibits in-context learning for image editing without the need for language instructions. Instead, it uses image pairs as examples to infer and apply the desired editing intention directly.", + "bbox": [ + 511, + 574, + 906, + 680 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 694, + 633, + 709 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present VEGGIE, a unified end-to-end model for instructional video editing that handles diverse pixel-level tasks. VEGGIE leverages MLLM for robust instruction understanding and employs a video diffusion model to execute pixel-level edits. Our framework uses a single diffusion loss for end-to-end optimization across varied tasks/skills. We also introduce a novel synthetic data generation pipeline and VEG-Bench, a benchmark that assesses a broad range of editing skills. Our VEGGIE outperforms previous methods as a versatile, all-in-one solution. We hope our model, data, and benchmark to advance research on instructional generative video models.", + "bbox": [ + 511, + 719, + 908, + 887 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 89, + 187, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Zechen Bai, Tong He, Haiyang Mei, Pichao Wang, Ziteng Gao, Joya Chen, Lei Liu, Zheng Zhang, and Mike Zheng Shou. One token to seg them all: Language instructed reasoning segmentation in videos. arXiv preprint arXiv:2409.19603, 2024. 2", + "[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 2, 3", + "[3] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18392-18402, 2023. 5", + "[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. Video generation models as world simulators. OpenAI, https://openai.com/research/video-generation-models-as-world-simulators, 2024.2.3", + "[5] Alper Canberk, Maksym Bondarenko, Ege Ozguroglu, Ruoshi Liu, and Carl Vondrick. Erasedraw: Learning to insert objects by erasing them from images. arXiv preprint arXiv:2409.00522, 2024. 5", + "[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23206-23217, 2023. 3", + "[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23040-23050, 2023. 3", + "[8] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 3", + "[9] Jiaxin Cheng, Tianjun Xiao, and Tong He. Consistent video-to-video transfer using synthetic dataset. In The Twelfth International Conference on Learning Representations, 2024. 5, 8, 1, 2", + "[10] Yuren Cong, Mengmeng Xu, christian simon, Shoufa Chen, Jiawei Ren, Yanping Xie, Juan-Manuel Perez-Rua, Bodo Rosenhahn, Tao Xiang, and Sen He. FLATTEN: optical FLOW-guided ATTENtion for consistent text-to-video editing. In The Twelfth International Conference on Learning Representations, 2024. 8, 1", + "[11] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Mohit Bansal, and Chen Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. arXiv preprint arXiv:2411.09921, 2024. 3", + "[12] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Bansal Mohit, and Chen. Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. 2024. 5, 6, 2" + ], + "bbox": [ + 93, + 114, + 485, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Lee R Dice. Measures of the amount of ecologic association between species. Ecology, 26(3):297-302, 1945. 6", + "[14] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In ICCV, pages 2694-2703, 2023. 5, 6", + "[15] Patrick Esser, Johnathan Chiu, Parmida Atighehchian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In CVPR, pages 7346-7356, 2023. 3", + "[16] Hao Fei, Shengqiong Wu, Hanwang Zhang, Tat-Seng Chua, and Shuicheng Yan. Vitron: A unified pixel-level vision llm for understanding, generating, segmenting, editing, 2024. 2", + "[17] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. arXiv preprint arXiv:2309.17102, 2023. 2, 3, 5", + "[18] Yuying Ge, Sijie Zhao, Chen Li, Yixiao Ge, and Ying Shan. Seed-data-edit technical report: A hybrid dataset for instructional image editing. arXiv preprint arXiv:2405.04007, 2024. 5", + "[19] Zigang Geng, Binxin Yang, Tiankai Hang, Chen Li, Shuyang Gu, Ting Zhang, Jianmin Bao, Zheng Zhang, Houqiang Li, Han Hu, et al. Instructdiffusion: A generalist modeling interface for vision tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12709-12720, 2024. 8, 1, 2", + "[20] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 2, 3, 8, 1", + "[21] gpt 4o. https://openai.com/index/hello-gpt-4o/.2024.6", + "[22] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 5, 1", + "[23] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022.5", + "[24] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2", + "[25] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Advances in Neural Information Processing Systems, 2022. 2", + "[26] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. In The Eleventh International Conference on Learning Representations, 2023. 3", + "[27] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. ICLR, 1(2):3, 2022. 1" + ], + "bbox": [ + 516, + 92, + 906, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Jiahao Hu, Tianxiong Zhong, Xuebo Wang, Boyuan Jiang, Xingye Tian, Fei Yang, Pengfei Wan, and Di Zhang. Vivid-10m: A dataset and baseline for versatile and interactive video local editing. arXiv preprint arXiv:2411.15260, 2024. 2", + "[29] Yuzhou Huang, Liangbin Xie, Xintao Wang, Ziyang Yuan, Xiaodong Cun, Yixiao Ge, Jiantao Zhou, Chao Dong, Rui Huang, Ruimao Zhang, et al. Smartedit: Exploring complex instruction-based image editing with multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8362-8371, 2024. 2, 5", + "[30] Ziqi Huang, Yinan He, Jiashuo Yu, Fan Zhang, Chenyang Si, Yuming Jiang, Yuanhan Zhang, Tianxing Wu, Qingyang Jin, Nattapol Chanpaisit, et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21807-21818, 2024. 6, 2", + "[31] Paul Jaccard. The distribution of the flora in the alpine zone. 1. New phytologist, 11(2):37-50, 1912. 6", + "[32] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 6", + "[33] Anna Khoreva, Anna Rohrbach, and Bernt Schiele. Video object segmentation with language referring expressions. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part IV 14, pages 123–141. Springer, 2019. 6", + "[34] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2", + "[35] Max Ku, Cong Wei, Weiming Ren, Harry Yang, and Wenhu Chen. Anyv2v: A tuning-free framework for any video-to-video editing tasks. arXiv preprint arXiv:2403.14468, 2024. 2, 6", + "[36] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023. 3", + "[37] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024. 5", + "[38] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 1", + "[39] Jialu Li, Shoubin Yu, Han Lin, Jaemin Cho, Jaehong Yoon, and Mohit Bansal. Training-free guidance in text-to-video generation via multimodal planning and structured noise initialization. arXiv preprint arXiv:2504.08641, 2025. 2", + "[40] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language" + ], + "bbox": [ + 91, + 90, + 485, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 3", + "[41] Xirui Li, Chao Ma, Xiaokang Yang, and Ming-Hsuan Yang. Vidthome: Video token merging for zero-shot video editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7486-7495, 2024. 2, 8, 1", + "[42] Long Lian, Baifeng Shi, Adam Yala, Trevor Darrell, and Boyi Li. Llm-grounded video diffusion models. arXiv preprint arXiv:2309.17444, 2023. 2", + "[43] Han Lin, Abhay Zala, Jaemin Cho, and Mohit Bansal. Videodirectorgpt: Consistent multi-scene video generation via llm-guided planning. arXiv preprint arXiv:2309.15091, 2023. 2", + "[44] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023. 3, 5", + "[45] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. 6", + "[46] Shaoteng Liu, Tianyu Wang, Jui-Hsien Wang, Qing Liu, Zhifei Zhang, Joon-Young Lee, Yijun Li, Bei Yu, Zhe Lin, Soo Ye Kim, et al. Generative video propagation. arXiv preprint arXiv:2412.19761, 2024. 2", + "[47] Ziqiao Ma, Jiayi Pan, and Joyce Chai. World-to-words: Grounded open vocabulary acquisition through fast mapping in vision-language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 524–544, 2023. 3", + "[48] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11-20, 2016. 3", + "[49] Kangfu Mei and Vishal Patel. Vidm: Video implicit diffusion models. In Proceedings of the AAAI conference on artificial intelligence, pages 9117-9125, 2023. 3", + "[50] Bo Miao, Mohammed Bennamoun, Yongsheng Gao, Mubarak Shah, and Ajmal Mian. Towards temporally consistent referring video object segmentation. https://arxiv.org/abs/2403.19407, 2024. 2", + "[51] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Qixiang Ye, and Furu Wei. Grounding multimodal large language models to the world. In The Twelfth International Conference on Learning Representations, 2024. 3", + "[52] Renjie Pi, Jiahui Gao, Shizhe Diao, Rui Pan, Hanze Dong, Jipeng Zhang, Lewei Yao, Jianhua Han, Hang Xu, Lingpeng Kong, et al. Detgpt: Detect what you need via reasoning. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14172-14189, 2023. 3", + "[53] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + " attentions for zero-shot text-based video editing. In ICCV, pages 15932-15942, 2023. 2, 3", + "[54] Bosheng Qin, Juncheng Li, Siliang Tang, Tat-Seng Chua, and Yueting Zhuang. Instructvid2vid: Controllable video editing with natural language instructions. In 2024 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2024. 2, 3, 5", + "[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 6", + "[56] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Erix Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3", + "[57] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1", + "[58] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020. 6", + "[59] Shangkun Sun, Xiaoyu Liang, Songlin Fan, Wenxu Gao, and Wei Gao. Ve-bench: Subjective-aligned benchmark suite for text-driven video editing quality assessment. In Proceedings of the AAAI Conference on Artificial Intelligence, 2025. 2", + "[60] Carles Ventura, Miriam Bellver, Andreu Girbau, Amaia Salvador, Ferran Marques, and Xavier Giro-i Nieto. Rvos: End-to-end recurrent network for video object segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5277-5286, 2019. 5", + "[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5, 2", + "[62] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 2, 3", + "[63] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal IIm as an agent for unified image generation and editing. arXiv preprint arXiv:2407.05600, 2024. 2", + "[64] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image edit" + ], + "bbox": [ + 91, + 90, + 483, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 5, 2", + "[65] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasecut: Language-based image segmentation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10216-10225, 2020. 5", + "[66] Jianzong Wu, Xiangtai Li, Chenyang Si, Shangchen Zhou, Jingkang Yang, Jiangning Zhang, Yining Li, Kai Chen, Yunhai Tong, Ziwei Liu, et al. Towards language-driven video inpainting via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12501-12511, 2024. 2, 3, 5, 8, 1", + "[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 3, 5", + "[68] Jay Zhangjie Wu, Xiuyu Li, Difei Gao, Zhen Dong, Jinbin Bai, Aishani Singh, Xiaoyu Xiang, Youzeng Li, Zuwei Huang, Yuanxi Sun, et al. Cvpr 2023 text guided video editing competition. arXiv preprint arXiv:2310.16003, 2023. 2", + "[69] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3", + "[70] Zhen Xing, Qi Dai, Zihao Zhang, Hui Zhang, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Vidiff: Translating videos via multi-modal instructions with diffusion models. arXiv preprint arXiv:2311.18837, 2023. 3", + "[71] Wilson Yan, Yunzhi Zhang, Pieter Abbeel, and Aravind Srinivas. Videogpt: Video generation using vq-vae and transformers. arXiv preprint arXiv:2104.10157, 2021. 3", + "[72] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 5", + "[73] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 1", + "[74] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 2" + ], + "bbox": [ + 516, + 90, + 906, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[75] Jaehong Yoon, Shoubin Yu, and Mohit Bansal. Raccoon: Remove, add, and change video content with auto-generated narratives. arXiv preprint arXiv:2405.18406, 2024. 2, 5, 6", + "[76] Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. In The Twelfth International Conference on Learning Representations, 2023. 3", + "[77] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016. 3", + "[78] Shoubin Yu, Jacob Zhiyuan Fang, Jian Zheng, Gunnar Sigurdsson, Vicente Ordonez, Robinson Piramuthu, and Mohit Bansal. Zero-shot controllable image-to-video animation via motion decomposition. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 3332-3341, 2024. 2", + "[79] Tao Yu, Runseng Feng, Ruoyu Feng, Jinming Liu, Xin Jin, Wenjun Zeng, and Zhibo Chen. Inpaint anything: Segment anything meets image inpainting. arXiv preprint arXiv:2304.06790, 2023. 2, 3", + "[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 1", + "[81] Hao Zhang, Hongyang Li, Feng Li, Tianhe Ren, Xueyan Zou, Shilong Liu, Shijia Huang, Jianfeng Gao, Lei Zhang, Chunyuan Li, et al. Llava-grounding: Grounded visual chat with large multimodal models. arXiv preprint arXiv:2312.02949, 2023. 3", + "[82] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 3", + "[83] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. Advances in Neural Information Processing Systems, 36, 2024. 4, 5, 1", + "[84] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3", + "[85] Zhixing Zhang, Bichen Wu, Xiaoyan Wang, Yaqiao Luo, Luxin Zhang, Yinan Zhao, Peter Vajda, Dimitris Metaxas, and Licheng Yu. Avid: Any-length video inpainting with diffusion model. arXiv preprint arXiv:2312.03816, 2023. 2, 3", + "[86] Zhenghao Zhang, Zuozhuo Dai, Long Qin, and Weizhi Wang. Effived: Efficient video editing via text-instruction diffusion models. arXiv preprint arXiv:2403.11568, 2024. 2, 3", + "[87] Yang Zhao, Zhijie Lin, Daquan Zhou, Zilong Huang, Jiashi Feng, and Bingyi Kang. Bubogpt: Enabling visual grounding in multi-modal llms. arXiv preprint arXiv:2307.08581, 2023. 3" + ], + "bbox": [ + 91, + 92, + 483, + 897 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation", + "text_level": 1, + "bbox": [ + 250, + 88, + 746, + 136 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 145, + 614, + 165 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6. Appendix", + "text_level": 1, + "bbox": [ + 89, + 181, + 197, + 200 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this Appendix, we provide extra details on", + "bbox": [ + 89, + 209, + 382, + 224 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Implementation details of VEGGIE training, and evaluation and baseline evaluations.", + "- Extra details on our data generation pipeline, including each module's details, prompts for each promptable module, data filtering, and visualization.", + "- Extra visualizations for each task and the comparison with the other 6 strong baseline models.", + "- Limitation and future work discussion." + ], + "bbox": [ + 89, + 228, + 482, + 345 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6.1. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 361, + 307, + 378 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Model Architecture. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. It is a strong MLLM consisting of Qwen2 [73] LLM with 32K context window, SigLIP [80] visual encoder, and a 2-layer-MLP projector. LLaVA-OV can handle diverse visual-language tasks (including interleaved-frame, video). It provides a good starting point for our VEGGIE to understand complex user instructions and can respond with multiple frame-wise implicit planning thanks to its long context window. Our video diffusion model is initialized from the instructional image editing model, MagicBrush [83]. We further inflated 2D convolution layers to 3D form and inserted temporal attention layers followingAnimateDiff [22] to adapt videos. Our alignment network is a single-layer MLP. We set 32 grounded task tokens for each frame.", + "bbox": [ + 89, + 383, + 483, + 609 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Training Details. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. Our VidDM is initialized from the instructional image editing model, MagicBrush [83] with Stable Diffusion v1.5 backbone [57]. We further inflated 2D convolution layers with temporal attention layers, followingAnimateDiff [22] to adapt videos. Our VEGGIE adopts a 2-stage curriculum training strategy (Sec. 3.2). In the first stage, we fully fine-tune the 2D convolution layers in the UNet, the alignment network, and the task query tokens in the MLLM on image data, with 862M trainable parameters. In the second stage, we train all 3 dimensions in the UNet, the alignment network, the task query tokens, and a LoRA in the MLLM, leading to 1.3B trainable parameters. Both stages are trained end-to-end with only a diffusion loss. More details are in the Appendix.", + "bbox": [ + 89, + 612, + 483, + 838 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We keep the VAE encoder and decoder frozen during the entire training process. In the first stage, we keep the MLLM (including visual encoder, MLP projector, and LLM) frozen, and fully fine-tune learnable grounded task queries,", + "bbox": [ + 89, + 839, + 483, + 901 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "alignment network, and diffusion model, leading to around 800M training parameters. We set $1e^{-4}$ learning rate, and 96 batch size on each GPU. We use 16 A100 GPUs for the first stage of fine-tuning with 25K steps. In the second stage, we insert LoRA [27] modules into the LLM backbone, and inflate diffusion models by inserting extra temporal layers as inAnimateDiff [22]. We fine-tune LoRA, alignment network, learnable grounded task query tokens, and the diffusion model, leading to around 1.3B trainable parameters. We set $5e^{-4}$ learning rate, and 1 batch size with 8 gradient accumulation steps on 32 A100 GPUs. For LoRA, we set lora rank 64, lora alpha 16, and lora dropout 0.05. We train the second stage video model 2.5K step with 8 uniformly sampled frames.", + "bbox": [ + 511, + 183, + 906, + 393 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation and Baseline Details. We primarily compare our model with strong instructional editing models [9, 19, 66]. Additionally, we include non-instructional editing models [10, 20, 41] for completeness, although these are not fair baselines since they are not end-to-end and rely on additional conditions, such as depth maps or intermediate captions.", + "bbox": [ + 511, + 398, + 908, + 503 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We randomly sample 3 seeds for both our method and baseline methods. In our experiments, we use different classifier-free guidance scores ( $g_{T}$ and $g_{V}$ in Sec. 3.2) for different skills. Specifically, we set $g_{T} = 14.5$ and $g_{V} = 1.5$ for grounding and reasoning segmentation, while for other editing skills, we use $g_{T} = 10.5$ and $g_{V} = 2.0$ .", + "bbox": [ + 511, + 508, + 906, + 599 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For baseline methods, we adopt their default settings (e.g., diffusion steps, guidance scores, frame numbers) as provided in their GitHub repositories. To ensure fair evaluation, we sample the same eight frames from each method's video editing results.", + "bbox": [ + 511, + 602, + 906, + 678 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For alignment and smoothness metrics, we use CLIP-B/32 to measure text-image and image-image similarity, averaging across all frames to obtain video-level scores. For detection metrics, we use GroundingDINO (Swin-T OGC) to detect target objects frame by frame, averaging confidence scores across all frames for the final video-level metric.", + "bbox": [ + 511, + 681, + 908, + 772 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For the removal task, where fewer detected objects and lower alignment with the original text prompt are desired, we compute alignment and detection metrics as $1 -$ value.", + "bbox": [ + 511, + 775, + 908, + 821 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We compare the model judged best for each video sample. The agreement between human and MLLM judgments is 0.74, whereas the agreement between human and CLIP is only 0.45. We conducted 5 times of the MLLM evaluation and took an average.", + "bbox": [ + 511, + 825, + 908, + 901 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/efd4caf16a889c87b28d59462f54107cedcabbbf03aacab091df80dc1de2ac82.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsGroundingReasoning
JFJ&FJFJ&F
Segmentation Models
HTR [50]47.1147.6047.3520.0128.0224.01
VideoLISA [1]53.2354.3753.8038.4839.2038.84
MoRA [12]57.7353.6355.6838.9237.4840.36
Generative Editing Models
InstructDiff [19]19.8812.8116.3514.028.0711.05
InsV2V [9]13.8917.3715.6316.8910.4513.67
VEGGIE (Ours)37.7421.8329.7922.5315.9719.25
", + "bbox": [ + 91, + 88, + 480, + 232 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 4. Comparison of video concept grounding and reasoning segmentation tasks with other instructional generative models and expert segmentation models.", + "bbox": [ + 89, + 242, + 483, + 285 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6.2. Data Collection Details", + "text_level": 1, + "bbox": [ + 89, + 316, + 305, + 333 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As mentioned in the earlier Sec. 3.3, beyond collecting existing data, we proposed a novel data synthesis pipeline to generate instructional video data by animating images in the instructional image dataset.", + "bbox": [ + 89, + 343, + 483, + 404 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Specifically, we first select images from Omni-Edit [64], an instructional image editing dataset with carefully designed tasks/skills.", + "bbox": [ + 89, + 407, + 483, + 453 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We first use QWen2-VL [61] to caption the original image and give an animation prompt to animate the image via CogVideX1.5-I2V [74]. Please refer Tab. 5 and Tab. 6 to our prompt for caption and animation. After getting the animated video, we utilize AnyV2V [35] to edit the video based on the reference image (edited image from image dataset). The reference image gives a strong prior to maintaining the image dataset's high-quality edit and thus transfer it to the video via the video editing model.", + "bbox": [ + 88, + 455, + 483, + 593 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Next, we filter out videos by evaluating VBenchmark metrics [30], including aesthetic quality, motion smoothness, image quality, subject consistency, and background consistency. We set thresholds at 0.6 for aesthetic quality, 65 for imaging quality, 0.9 for motion smoothness, subject consistency, and background consistency. We provide our generated data visualization in Fig. 9.", + "bbox": [ + 89, + 597, + 485, + 704 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6.3. More Quantative Results & Discussion", + "text_level": 1, + "bbox": [ + 89, + 724, + 423, + 739 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Video Concept Grounding & Reasoning Segmentation We include additional results on video concept grounding and reasoning segmentation in Tab. 4. VEGGIE outperforms the diffusion-based baseline by a significant margin, showcasing its superior ability to accurately locate fine-grained object references and handle complex reasoning tasks. We hypothesize that through grounded generation, VEGGIE demonstrates remarkable precision in concept editing. For example, as shown in Fig. 11 in the Appendix, VEGGIE can remove the woman without altering the nearby girl.", + "bbox": [ + 89, + 750, + 483, + 902 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/f3c07174590d1a47445555b05acfd26b07ef6ca5174fecceef5bc55c8bdd1138.jpg", + "image_caption": [ + "Figure 8. t-SNE Visualization of different task query distribution. Different colors represent different tasks/skills. Best view in color." + ], + "image_footnote": [], + "bbox": [ + 547, + 90, + 874, + 277 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6.4. Limitation and Future Works", + "text_level": 1, + "bbox": [ + 511, + 345, + 777, + 361 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Our current method, VEGGIE, is built upon Stable-Diffusion 1.5, which inevitably constrains its editing quality compared to cutting-edge video generation models that rely on DiT or flow-based architectures. In addition, the video outputs we produce are relatively short, lagging behind some recent state-of-the-art methods in terms of length and temporal consistency. Furthermore, we observe increased editing artifacts when incorporating large amounts of grounding data, suggesting that multi-task data mixture strategies play a key role in maintaining high-quality edits.", + "bbox": [ + 511, + 368, + 906, + 520 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Despite these limitations, our results demonstrate promising directions for improvement in terms of model design, data curation, and evaluation. Future work could explore integrating more advanced base architectures (e.g., DiT [34, 74] or flow-based models), extending the maximum video duration, developing more systematic data [28] with more advanced method [46] and carefully designed mixture strategies to balance fidelity and flexibility, and conducting scalable training. We hope our findings will inspire further research into these directions, pushing the boundaries of instructional video editing performance.", + "bbox": [ + 511, + 521, + 908, + 686 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Task Query Visualization & Analysis via t-SNE. To analyze task/skill correlations, we project their grounded queries into lower-dimensional spaces using PCA and t-SNE. As shown in Fig. 8, distinct clusters form for each category (e.g., Addition), indicating effective differentiation by the model. Reasoning and Grounding appear together on the right. It may be because they both require cognitive/semantic understanding or logical reference. Color, Env, and Change clusters are closer to each other, indicating that the model views them as similar operations focusing on changing different visual attributes. Style lies in the lower-left region but remains relatively close to Color, Env, and Change. This proximity may reflect that \"stylization\" is conceptually similar to these visual attribute tasks, although it targets different", + "bbox": [ + 511, + 688, + 910, + 900 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/06c86a484814fdc576446edcb73b36614248174c9f2efbcb21db237e6b677342.jpg", + "table_caption": [ + "Table 5. Qwen2-VL prompt for Image caption." + ], + "table_footnote": [], + "table_body": "
Please describe this image shortly, try to capture main details in the image.\nHere are some examples of image caption styles:\n1. A Couple In A Public Display Of Affection\n2. A kitten turning its head on a wooden floor\n3. An Old Man Doing Exercises For The Body And Mind\n4. Man Walking\nNow, please describe the given image briefly in one sentence, please do not say something like 'The image shows...' or 'The image depicts...'
", + "bbox": [ + 93, + 194, + 903, + 369 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "transformations. Removal stands apart on the top, especially distant from Addition, indicating the model perceives them as distinct rather than inverse operations. In contrast, Addition lies closer to tasks like Reasoning and Grounding. It suggests that the act of adding elements may rely on similar semantic or referential processes (e.g., deciding what to add and how to reference the newly added element).", + "bbox": [ + 89, + 401, + 485, + 508 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6.5. Extra Visualization", + "text_level": 1, + "bbox": [ + 89, + 517, + 277, + 532 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We provide extra visualization in Figs. 10 to 16", + "bbox": [ + 89, + 539, + 406, + 554 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/8e960bad5a16bce703b5fcb70a569c7d16770762185ba1588063f1036611943d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 117, + 785, + 252 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/735c86bcdb4d0957b6364d6294b5509271fe312247cae910fd452e2351e8cbed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 266, + 785, + 401 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/df0f11d18bdfdc3b9c21b6fa529d5ccddccb0ca06469997d62359d8a0bfa75fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 407, + 785, + 547 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b679e36fe777360ebc2be8336cb67c250d84b550eb22f8ebeb7bb187463a7703.jpg", + "image_caption": [ + "Instruction: transform the setting to a snowy scene" + ], + "image_footnote": [], + "bbox": [ + 210, + 555, + 785, + 676 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f3bc012cf2e6a9d62134a6b3ee005b89853723b1c73534b08c36befb6f9ba667.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 703, + 785, + 762 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/96751fe61944764a81c1e8306ccadab6aa99b31163bed4354804d679da1bd518.jpg", + "image_caption": [ + "Figure 9. Examples of our generated instructional video editing data." + ], + "image_footnote": [], + "bbox": [ + 210, + 768, + 785, + 842 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 6. Qwen2-VL prompt for generating animation prompt.", + "bbox": [ + 313, + 184, + 679, + 199 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "I want to animate this image using an Image-Text-to-Video model. Your task is to generate a detailed and reasonable text prompt that describes how the image should be animated.", + "bbox": [ + 114, + 215, + 870, + 247 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Guidelines:", + "bbox": [ + 114, + 262, + 194, + 275 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Clarity & Realism - The animation description should be logical based on the given image, ensuring the movement makes sense for the scene.", + "2. Short & Vivid Description - Use expressive language to guide the animation model effectively, ensuring high-quality and visually engaging results." + ], + "bbox": [ + 114, + 292, + 870, + 368 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Ensure that your animation prompt aligns with the content of the provided image and describes a visually compelling motion sequence.", + "bbox": [ + 114, + 383, + 870, + 412 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Do not output animation prompts that contain objects/scenes not included in the given image.", + "bbox": [ + 114, + 428, + 728, + 443 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Make sure the prompt is short in 1-2 sentences.", + "bbox": [ + 114, + 458, + 428, + 473 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cff8fe39e7c04b9e27b42c5b598f301ac5e411f00152bdff7bf3bb1b67d1da5a.jpg", + "image_caption": [ + "Figure 10. More Examples of Concept Addition." + ], + "image_footnote": [], + "bbox": [ + 94, + 525, + 901, + 859 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 7. GPT-4o prompt for MLLM-as-a-Judge for automatic instructional video editing evaluation.", + "bbox": [ + 202, + 275, + 792, + 289 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "User", + "text_level": 1, + "bbox": [ + 116, + 306, + 153, + 320 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You are an evaluator for instructional video editing tasks. Your job is to assess how well the edited video fulfills the user's specific instructions.", + "bbox": [ + 116, + 323, + 870, + 352 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "I will provide:", + "bbox": [ + 117, + 354, + 212, + 366 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The original video (first GIF)", + "2. The edited video (second GIF)", + "3. The user's instruction: [user instruction]" + ], + "bbox": [ + 117, + 369, + 400, + 412 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Please evaluate the editing result using the following format:", + "bbox": [ + 117, + 415, + 514, + 428 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "INSTRUCTION: [Repeat the user's instruction]", + "bbox": [ + 117, + 429, + 431, + 443 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "EVALUATION:", + "bbox": [ + 117, + 444, + 225, + 455 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Accuracy score (1-10): [Your score]", + "- Quality score (1-10): [Your score]", + "- Appropriateness score (1-10): [Your score]", + "- Overall score (1-10): [Your final score]" + ], + "bbox": [ + 116, + 459, + 406, + 518 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "EXPLANATION: [Provide a brief justification for your scores, highlighting specific strengths and weaknesses of the edit]", + "bbox": [ + 116, + 534, + 874, + 563 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "RECOMMENDATION: [Optional suggestions for improvement]", + "bbox": [ + 117, + 565, + 544, + 579 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "When scoring, consider:", + "bbox": [ + 117, + 595, + 277, + 608 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Accuracy: Does the edit precisely follow the given instruction? - Quality: Is the edit visually seamless and natural-looking? - Appropriateness: Does the edit maintain coherence with the original video context?", + "bbox": [ + 116, + 609, + 870, + 640 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The overall scale is:", + "bbox": [ + 117, + 655, + 250, + 667 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1-3: Poor - Major issues with the edit", + "4-6: Acceptable - Follows instruction but with noticeable flaws", + "7-8: Good - Clear, effective edit with minor issues", + "9-10: Excellent - Flawless execution of the instruction" + ], + "bbox": [ + 117, + 670, + 529, + 729 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Assistant", + "text_level": 1, + "bbox": [ + 117, + 746, + 184, + 758 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Scores, Explanation, Recommendation", + "bbox": [ + 117, + 761, + 374, + 775 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/898b5c247cabf3586adb7f29b21e553779aa2d29ee0be6fa5155a19233834d09.jpg", + "image_caption": [ + "Figure 11. More Examples of Concept Removal." + ], + "image_footnote": [], + "bbox": [ + 96, + 108, + 379, + 444 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/a2d9cf4b40f420ebc7ee5761705d95f8a45f39fffee54758e9f4300b770235fa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 382, + 109, + 640, + 443 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/dff3a5c584a585ae6935604dbf379d8dda7efa447ac5855c81317330be2c93ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 108, + 903, + 443 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/3f4b79d95de3df3c6f13c06609009e729bf430bbafcfbde1a4c1fa43b302c53d.jpg", + "image_caption": [ + "Figure 12. More Examples of Object Changes." + ], + "image_footnote": [], + "bbox": [ + 94, + 516, + 379, + 851 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/c8db04c3c4081c16c71effca6b95f22414b3b755bb96289cd405d314e86121e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 382, + 516, + 640, + 851 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/f34bb0162cf39f27cfe5a820676923452b4f9c7ec312b17e8153f0754ef01982.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 516, + 903, + 851 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/65f4482a764a319b6cd11cf875fc5fe37281b580a14bcf9f6fbdb6669e30fcb8.jpg", + "image_caption": [ + "Figure 13. More Examples of Stylization." + ], + "image_footnote": [], + "bbox": [ + 94, + 108, + 379, + 441 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/859b2b54f7e4ea3e5f151b40abda2a47c11a5e041a6aaf089e7c57ae94b7af8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 382, + 108, + 640, + 443 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/eb5bbecd6f86f818c07a1fe8931a93f16ea43d8ea49eec4634c2689b86050898.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 108, + 903, + 443 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b124f1339966a72d4c67b2ab4b76301ff8a7af2fc60b52d2755cbe999b6fb7fa.jpg", + "image_caption": [ + "Figure 14. More Examples of Environment and Background Editing." + ], + "image_footnote": [], + "bbox": [ + 94, + 515, + 379, + 851 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b8d1ff15c2cfbed82a207580b53d1b76e1ed13920065e6313097883e59d67805.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 382, + 516, + 640, + 851 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/eddea6f13444e5092d3c40d6739d4fdaba2cf2dc9765b24e0c382a4a6bc599e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 516, + 903, + 851 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/42d74fb3a6be2224e7ee95810b48350e42a1b56d9e77e28004e90ee22ded0ed0.jpg", + "image_caption": [ + "Figure 15. More Examples of Visual Features Editing." + ], + "image_footnote": [], + "bbox": [ + 96, + 104, + 903, + 441 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/4328af1b9cf4afc2d45f60b901b77275b097f605ff43bd121e485851d5fd828d.jpg", + "image_caption": [ + "Figure 16. More Examples of Object Grounding." + ], + "image_footnote": [], + "bbox": [ + 94, + 508, + 901, + 851 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/41ce929097f8588a945d953564c371ef38f463eb2eb718e38767cc9dce0eee6b.jpg", + "image_caption": [ + "Figure 17. More Examples of Object Reasoning Segmentation." + ], + "image_footnote": [], + "bbox": [ + 96, + 368, + 906, + 590 + ], + "page_idx": 21 + } +] \ No newline at end of file diff --git a/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_model.json b/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ea20001473243060c2f201f71676605820628a7e --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_model.json @@ -0,0 +1,3489 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.251, + 0.13, + 0.748, + 0.176 + ], + "angle": 0, + "content": "VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation" + }, + { + "type": "text", + "bbox": [ + 0.204, + 0.203, + 0.795, + 0.242 + ], + "angle": 0, + "content": "Shoubin Yu\\(^{1,3*}\\) Difan Liu\\(^{1*}\\) Ziqiao Ma\\(^{1,2*}\\) Yicong Hong\\(^{1}\\) Yang Zhou\\(^{1}\\) Hao Tan\\(^{1}\\) Joyce Chai\\(^{2}\\) Mohit Bansal\\(^{3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.236, + 0.256, + 0.761, + 0.275 + ], + "angle": 0, + "content": "\\(^{1}\\)Adobe Research \\(^{2}\\)University of Michigan \\(^{3}\\)UNC Chapel Hill" + }, + { + "type": "text", + "bbox": [ + 0.352, + 0.277, + 0.641, + 0.293 + ], + "angle": 0, + "content": "https://veggie-gen.github.io/" + }, + { + "type": "image", + "bbox": [ + 0.133, + 0.309, + 0.859, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.565, + 0.908, + 0.622 + ], + "angle": 0, + "content": "Figure 1. We propose VEGGIE, a unified and versatile video generative model that handles various tasks for both video concept grounding and editing according to user instructions. With VEGGIE, users can locate, add, delete, and change concepts in a given video through diverse instruction formats (direct referring instruction or reasoning-demanding questions). Users can also edit videos with multimodal instruction empowered by MLLM, enabling applications like video editing from a reference image." + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.636, + 0.327, + 0.652 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.668, + 0.486, + 0.866 + ], + "angle": 0, + "content": "While recent video diffusion models enable video editing, unifying diverse instructional editing tasks (e.g., add, remove, modify) under a single framework remains a significant challenge. In this paper, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions, a simple end-to-end framework that unifies video concept editing, grounding, and reasoning based on diverse user instructions. Specifically, given a video and text query, VEGGIE first utilizes an MLLM to interpret user intentions in instructions and ground them to the video contexts, generating framespecific grounded task queries for pixel-space responses. A diffusion model then renders these plans and generates edited videos that align with user intent. To support diverse" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.638, + 0.909, + 0.896 + ], + "angle": 0, + "content": "tasks and complex instructions, we employ a curriculum learning strategy: first aligning the MLLM and video diffusion model with large-scale instructional image editing data, followed by end-to-end fine-tuning on high-quality multitask video data. Additionally, we introduce a novel data synthesis pipeline to generate paired instructional video editing data for model training. It transforms static image data into diverse, high-quality video editing samples by leveraging Image-to-Video models to inject dynamics. VEGGIE shows strong performance in instructional video editing with different editing skills, outperforming the best instructional baseline as a versatile model, while other models struggle with multi-tasking. VEGGIE also excels in video object grounding and reasoning segmentation, where other baselines fail. We further reveal how the multiple tasks help each other and highlight promising applications like zero-shot multimodal instructional and in-context video editing." + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.875, + 0.221, + 0.888 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.11, + 0.888, + 0.374, + 0.9 + ], + "angle": 0, + "content": "\\(\\dagger\\) Work done during internship at Adobe Research." + }, + { + "type": "list", + "bbox": [ + 0.109, + 0.875, + 0.374, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.279, + 0.059, + 0.72 + ], + "angle": 270, + "content": "arXiv:2503.14350v3 [cs.CV] 25 Oct 2025" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.223, + 0.107 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.486, + 0.297 + ], + "angle": 0, + "content": "Building on the advances in Video Diffusion Models (VIDDMs) [2, 4, 24, 25, 74], video editing methods have emerged as video design tools, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 86] have been developed, using triples of text prompts, source videos, and target videos for training. Due to their limited performance in understanding user intent and multimodal semantics [17], several methods have incorporated multimodal large language models (MLLMs) to handle complex instructions/reasoning [17, 29, 75]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.297, + 0.486, + 0.584 + ], + "angle": 0, + "content": "However, existing methods fall short of the goal of a simple, versatile video concept editor, facing three primary challenges. First, most methods are not end-to-end, requiring intermediate layout/mask/human or model caption guidance [35, 53, 63, 75], which adds workload on users and disrupts a seamless editing experience. Second, existing pipelines connecting MLLMs to VidDMs require multiple training objectives beyond simple pixel-space diffusion loss, such as language loss [75] or mask losses [66]. This increases optimization difficulty and often requires additional hyperparameter tuning or annotations. Third, existing video editing models, both instructional and non-instructional, struggle with handling other diverse editing tasks, ranging from addition, and deletion to stylization. For example, LGVI [66] fails in global edits such as stylization and color change, while VidToMe [41] struggles with local edits such as adding or removing objects. These methods also struggle with input videos that contain multiple objects or when user instructions require complex reasoning." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.585, + 0.486, + 0.765 + ], + "angle": 0, + "content": "These challenges result from two limitations: First, there is a lack of multitasking fine-tuning on well-curated instructional video editing datasets that span a broad range of skills. Second, models often lack two critical capabilities needed to interpret user intentions and accurately locate concepts: multimodal reasoning to infer the intended modification from the user's instruction; and grounding language to the input video to precisely identify the region or object to be edited. For example, in Figure 1, one can effortlessly locate the girl given \"identify the little girl.\" When asked to \"add a hat to the little girl,\" we intuitively imagine the hat placed on her head from commonsense, even without seeing an actual hat." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.766, + 0.486, + 0.902 + ], + "angle": 0, + "content": "To address these challenges, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions. VEGGIE unifies video concept grounding and editing without relying on additional layout, mask guidance, or intermediate caption [35, 39, 42, 75, 78, 79]. Instead, we formulate the problem as end-to-end grounded generation in pixel space, using only a diffusion loss. Specifically, given a video and a text query, VEGGIE first leverages an MLLM to interpret complex instructions, generating frame-wise con" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.908, + 0.379 + ], + "angle": 0, + "content": "ditions. Unlike prior methods [43, 75] that use discrete text tokens as conditions, which disconnect the pipeline and block gradient propagation, VEGGIE employs continuous, learnable task query embeddings per frame. This enables end-to-end training and effectively captures grounded task representations for diffusion model conditioning. To handle diverse tasks and accurately interpret complex queries, we employ a curriculum learning strategy that begins by aligning MLLMs with diffusion models using massive paired instructional image editing data and then fine-tuning the model end-to-end on high-quality multitask video data to adapt video. Unlike tool-use methods [16, 35, 75], VEGGIE formulates both video grounding and instructional editing in the same video-to-video task formulation, enabling efficient handling through a unified single model. To further support end-to-end training, we introduce a novel automatic instructional video data generation pipeline that lifts high-quality instructional image editing data into the video domain using image-to-video and video evaluation tools." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.382, + 0.909, + 0.669 + ], + "angle": 0, + "content": "Existing video editing benchmarks do not provide wide and uniform coverage of diverse editing skills [59, 68]. To address this gap, we contribute VEG-Bench, an instructional video editing benchmark that spans 8 editing skills: concept addition, removal, object changing, environment background changing, visual feature changing, stylization, object grounding, and reasoning segmentation. Each skill is evaluated using a dedicated suite of metrics. We assess the proposed VEGGIE alongside 6 baselines on VEG-Bench. VEGGIE demonstrates strong performance across diverse editing skills, outperforming the best instructional baseline as a versatile, all-in-one model, while other models struggle with multi-tasking. Additionally, VEGGIE excels in video object grounding and reasoning segmentation tasks, where other baselines fall short. We show further analysis of how multi-task learning enhances our framework and highlight applications such as zero-shot multimodal instructional following (Fig. 2) and few-shot in-context editing (Fig. 3). Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.675, + 0.909, + 0.78 + ], + "angle": 0, + "content": "- We propose VEGGIE, an end-to-end model that integrates an MLLM and a VidDM. VEGGIE is a versatile framework that handles diverse instructional requests for editing and grounding various video concepts. Unlike existing work that achieves multitasking via tool use, VEGGIE unifies diverse tasks in a single model, thus simplifying the training with only diffusion loss." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.781, + 0.905, + 0.81 + ], + "angle": 0, + "content": "- We propose a data synthesis pipeline, scaling high-quality instructional video editing data for future work." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.811, + 0.908, + 0.855 + ], + "angle": 0, + "content": "- We propose VEG-Bench, an instructional video editing benchmark that spans 8 editing skills with dedicated metrics for each skill." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.857, + 0.909, + 0.901 + ], + "angle": 0, + "content": "- VEGGIE achieves strong performance across diverse editing skills compared with SoTA methods, and shows potentials for multimodal instruction and in-context following." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.675, + 0.909, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "image", + "bbox": [ + 0.162, + 0.09, + 0.245, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.169, + 0.145, + 0.239, + 0.155 + ], + "angle": 0, + "content": "Reference" + }, + { + "type": "image_caption", + "bbox": [ + 0.167, + 0.162, + 0.242, + 0.211 + ], + "angle": 0, + "content": "Transfer the style in the reference image." + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.091, + 0.495, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.101, + 0.584, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.143, + 0.581, + 0.153 + ], + "angle": 0, + "content": "Reference" + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.091, + 0.832, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.155, + 0.578, + 0.216 + ], + "angle": 0, + "content": "Add the object in the reference image on the women." + }, + { + "type": "image_caption", + "bbox": [ + 0.104, + 0.231, + 0.892, + 0.246 + ], + "angle": 0, + "content": "Figure 2. Multimodal instruction following emerges in VEGGIE, allowing for style transfer or object addition from reference images." + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.263, + 0.832, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.109, + 0.41, + 0.885, + 0.424 + ], + "angle": 0, + "content": "Figure 3. In-context editing emerges in VEGGIE, allowing for few-shot learning of editing tasks with paired image demonstrations." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.449, + 0.232, + 0.464 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.476, + 0.484, + 0.779 + ], + "angle": 0, + "content": "Instructional Video Editing Video Diffusion Models (VidDMs) [2, 4, 6, 7, 15, 26, 49, 67, 71], enable high-quality video generation across a wide range of video concepts. Building on these advances, video editing methods have emerged as tools for video design, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 70, 86] have been developed, using triples of video instructions, source videos, and target videos for training. These methods demonstrate limited performance when complex multimodal reasoning is required, as noted by previous research on instructional image editing [17]. Moreover, they struggle with diverse editing tasks, from addition and deletion to stylization. For example, LGVI [66] is primarily designed for removal tasks, while TokenFlow [20] struggles with local edits such as adding, removing, or changing objects. We address this limitation with pixel-level multitasking fine-tuning on well-curated instructional video editing datasets covering various grounding and editing skills." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Video Grounding and Segmentation Visual grounding requires models to connect language to its corresponding visual concept in the visual context [40, 47]. This is commonly evaluated via the language-guided semantic localization tasks, ranging from simple referring expressions in RefCOCO series [48, 77] and their generalized variant [44] that takes no-target and multi-target into account. Recently, grounded multimodal large lan" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.451, + 0.908, + 0.587 + ], + "angle": 0, + "content": "guage models (MLLMs) are trained for object grounding to bounding boxes [8, 51, 52, 76, 82, 87] and segmentation masks [56, 69, 81, 84] using text-image pairs with fine-grained annotations linking phrases to entities. These models unlock the potential of reasoning segmentation [11, 36], bringing language-informed reasoning into semantic segmentation. Instead of using dedicated object detection or segmentation modules, we achieve video grounding through end-to-end training with only diffusion loss." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.601, + 0.727, + 0.616 + ], + "angle": 0, + "content": "3. Our Method: VEGGIE" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.627, + 0.909, + 0.838 + ], + "angle": 0, + "content": "In this paper, we introduce a Video Editor with Grounded Generation from Instructions (VEGGIE), a unified and versatile generative video model. It combines the complex instruction understanding and reasoning capabilities of MLLMs with the generative capacity of VidDMs. The model is trained end-to-end with diffusion loss only. VEGGIE efficiently handles diverse user inputs, including direct instructions, complex questions requiring in-depth reasoning, and multimodal conditioning. It performs various pixel-level manipulations, enabling tasks such as video concept addition, removal, changing, stylization, grounding, and reasoning segmentation based on user instructions. We elaborate on the model design (Sec. 3.1), training and inference process (Sec. 3.2), and data curation (Sec. 3.3)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.849, + 0.702, + 0.863 + ], + "angle": 0, + "content": "3.1. Model Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.908, + 0.902 + ], + "angle": 0, + "content": "VEGGIE consists of four main components (see Fig. 4): (1) a multimodal large language model, (2) a set of learnable" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.202, + 0.093, + 0.226, + 0.11 + ], + "angle": 0, + "content": null + }, + { + "type": "header", + "bbox": [ + 0.226, + 0.093, + 0.763, + 0.109 + ], + "angle": 0, + "content": "VEGGIE: A Unified and Versatile Instructional Video Generative Model" + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.119, + 0.885, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.362, + 0.908, + 0.418 + ], + "angle": 0, + "content": "Figure 4. Overview of our proposed end-to-end VEGGIE framework. Our Multimodal Large Language Model first understands input video frames and diverse user instructions, then it generates frame-wise reasoning queries that maintain per-frame editing conditions for the video diffusion model. The video diffusion model will render the MLLM-generated conditions to the pixel space for diverse tasks, including video editing, video grounding, and video reasoning segmentation with questions. We only apply diffusion loss for the whole pipeline training." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.444, + 0.486, + 0.564 + ], + "angle": 0, + "content": "grounded task queries, (3) an alignment network (single-layer MLP) that projects the MLLM output into the condition space of the diffusion model, and (4) a video diffusion model initialized from an instructional image editing model [83]. Our model first generates latent conditions for target video frames by querying multimodal context using an MLLM, then renders these conditions at the pixel level through a video diffusion model, as detailed below." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.566, + 0.486, + 0.78 + ], + "angle": 0, + "content": "MLLM for Generating Grounded Task Guidance. As illustrated in the left of Fig. 4, given a video consisting of a sequence of frames \\( V = [f_{1}, \\dots, f_{n}] \\), where \\( n \\) is the frame number of the given video, a user instruction/question \\( I \\), our goal is to obtain the response \\( \\widehat{V} = [\\widehat{f}_{1}, \\dots, \\widehat{f}_{n}] \\) at pixel space that faithfully reflects user instruction about the given video. The MLLM module processes both the input video \\( V \\) and a user instruction \\( I \\) to generate a sequence of grounded task tokens per frame: \\( C = [c_{1}, \\dots, c_{n}] \\), which are input and output in parallel. These tokens serve as task guidance and implicitly encode the target manipulation, such as object attributes, spatial relationships, or style transfer parameters. The MLLM ensures the model captures both explicit user instructions and implicit reasoning needs." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.78, + 0.486, + 0.902 + ], + "angle": 0, + "content": "VidDM for Rendering MLLM Guidance at Pixel Space. As illustrated in the right of Table 4, the VidDM takes the original video \\( V \\) and the grounded task tokens \\( C \\) as conditions to synthesize the target video \\( \\widehat{V} \\). The original video is concatenated with the noise volume, and the task tokens are input to the cross-attention. With grounded task guidance in denoising steps, the generation process ensures that the output faithfully follows user instructions while preserving the" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.444, + 0.907, + 0.506 + ], + "angle": 0, + "content": "video's structure and motion dynamics. Through iterative denoising, it refines each frame while maintaining temporal consistency, applying pixel modifications coherently for a smooth and visually consistent output video \\(\\widehat{V}\\)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.516, + 0.895, + 0.532 + ], + "angle": 0, + "content": "3.2. Curriculum Learning from Images to Videos" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.539, + 0.909, + 0.749 + ], + "angle": 0, + "content": "Training the model directly on video tasks presents two key challenges: (1) misalignment between MLLM and diffusion model representations, making it difficult for the diffusion model to interpret MLLM-generated task queries with limited fine-tuning data, and (2) the diffusion model's lack of multitasking capability, even for image tasks, due to insufficient training on diverse tasks. Our initial experiments also found the model collapsed when the whole pipeline was directly trained with all data. These challenges/observations underscore the need for pre-alignment between MLLM and the diffusion model to enable seamless adaptation from language-space task queries to pixel-space modifications. To this end, we adopt a two-stage curriculum learning strategy for the proposed VEGGIE framework." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.911, + 0.901 + ], + "angle": 0, + "content": "Stage 1: Aligning Diffusion and Language Spaces. In the first stage, we align the diffusion model with the MLLM using large-scale image-level instructional editing data. The MLLM remains frozen while we update the alignment network, grounded task queries, and diffusion UNet. This process fine-tunes the diffusion model weights to align with the language space, enabling the model to interpret MLLM-generated guidance and translate user instructions into pixel-level edits while preserving the MLLM's strong ability to understand instructions and user intentions." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.094, + 0.089, + 0.48, + 0.312 + ], + "angle": 0, + "content": "
TypeSourceR.E.G.# Img./Vid.# Ins.
VideoROVI [66]4.3K27.4K
VPLM [75]4.3K5.5K
GroundMoRe [12]1.3K5.5K
RVoS [60]1.9K6.1K
MeViS [14]1.5K17.1K
InstructV2V [9]68.3K68.3K
VEG-Edit (Ours)4.0K6.2K
Total136.1K
ImageSeed-Data-Edit [18]3M3M
LISA [37]0.2K1.3K
gRefCoCo [44]13.6K73.4K
PhraseCut [65]310.8K310.8K
EraseDraw [5]64.9K42.4K
MagicBrush [83]9.3K9.3K
SmartEdit [29]0.5K0.9K
Total3438.1K
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.09, + 0.318, + 0.484, + 0.361 + ], + "angle": 0, + "content": "Table 1. Summary of our data for training. R.: Reasoning, E.: Editing, G.: Grounding. #Img/Vid: the number of images/videos, and #Ins.: the number of instruction-image/video pairs." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.384, + 0.486, + 0.611 + ], + "angle": 0, + "content": "Stage 2: Enhancing Temporal Consistency and Dynamics. With the MLLM and diffusion model aligned, fine-tuning diverse instructional video editing data becomes more effective for improved instruction following at pixel-space including temporal consistency, dynamic coherence, and editing faithfulness. In this stage, we fine-tune the framework with the MLLM, including the alignment network, grounded task queries, and all 3 dimensions in diffusion UNet, end-to-end with carefully curated multitasking instructional video editing data. Following prior work [22, 67], we inflated the 2D UNet from Stage 1 with temporal attention layers for video adaptation. For both stages 1 and 2, we optimize the framework with a single diffusion loss, enabling unified learning for improved instructional video editing performance while maintaining simplicity and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.615, + 0.487, + 0.736 + ], + "angle": 0, + "content": "Classifier-Free Guidance during Testing. We employ classifier-free guidance to balance quality and diversity in diffusion-generated samples. Following prior work [3, 17], we apply classifier-free guidance to instructional visual editing considering two conditions: the grounded task tokens and the original video. To obtain unconditional guidance, we set null values \\((\\varnothing)\\) for both task tokens and input video. In this case, our score estimate is:" + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.751, + 0.473, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {e _ {\\theta}} (z _ {t}, c _ {T}, c _ {V}) = e _ {\\theta} (z _ {t}, \\varnothing , \\varnothing) \\\\ + g _ {T} \\cdot \\left(e _ {\\theta} \\left(z _ {t}, c _ {V}, c _ {T}\\right) - e _ {\\theta} \\left(z _ {t}, c _ {V}, \\varnothing\\right)\\right) \\\\ + g _ {V} \\cdot \\left(e _ {\\theta} \\left(z _ {t}, c _ {V}, \\varnothing\\right) - e _ {\\theta} \\left(z _ {t}, \\varnothing , \\varnothing\\right)\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.825, + 0.484, + 0.903 + ], + "angle": 0, + "content": "where \\(\\theta\\) represents the model parameters, \\(C_T\\) and \\(C_V\\) denote the task tokens and video conditions, \\(\\varnothing\\) is the null value, \\(z_t\\) is the noised latent at timestamp \\(t\\), and \\(g_T\\) and \\(g_V\\) are the task guidance and video guidance scales, respectively. More training details are included later in Appendix." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.729, + 0.108 + ], + "angle": 0, + "content": "3.3. Data Curation Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.113, + 0.907, + 0.295 + ], + "angle": 0, + "content": "Existing video editing models, both instructional and non-instructional, struggle with diverse editing skills due to the lack of high-quality multitasking fine-tuning data. In this section, we introduce our data curation strategy to support VEGGIE in achieving versatile video editing skills. As listed in Tab. 1, we collect 3.4M image and 133.9K video data from diverse sources to support our VEGGIE curriculum learning as discussed in Sec. 3.2. We create our training dataset from two sources: (1) collecting existing image and video data and converting it into an instructional editing format, and (2) synthesizing new instructional video editing samples using existing datasets and generative models." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.296, + 0.909, + 0.614 + ], + "angle": 0, + "content": "Collecting Diverse Multitask Image and Video Data. We bring together instructional editing data from both image (Seed-Data-Edit [18], MagicBrush [83], EraseDraw [5]) and video (InstructV2V [9], VPLM [75]) sources. These datasets provide pairs of original and edited visual contents with user instructions. The tasks include adding, removing, and changing objects, stylizing, and performing global/local edits. Beyond editing datasets, we incorporate segmentation data at both the image level (gRefCoCo [44] and Phrase-Cut [65]) and the video level (RVoS and MeViS). These segmentation tasks are reformulated as color-filling challenges, which guide the model in learning referring grounding (i.e., understanding which object or region to edit) and strengthen its conceptual learning. To further unlock complex instruction understanding via MLLM, we include data that requires more advanced reasoning and implicit referencing. Specifically, we include: reasoning segmentation (LISA [37]), reasoning editing (SmartEdit [29]), interactive video inpainting (LGVI [66]), and motion-grounded video reasoning (GroundMoRe [12]). These tasks help VEGGIE learn implicit references and reasoning." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.614, + 0.909, + 0.902 + ], + "angle": 0, + "content": "Synthesizing Instructional Video Editing Data via Image-to-Video Animation. Recent methods [3, 54] generate synthetic instructional video-editing data by first creating text instructions with LLM, then getting edited videos via T2V models and prompt-to-prompt editing [23]. While these methods adapt image-based editing pipelines [9] to videos, the generated data suffer from temporal-consistency issues. To address this gap, we propose a novel image-to-video animation strategy that leverages the abundance of high-quality image-level instructional editing datasets [64, 83], which provide well-annotated instructions, paired edited images, and well-organized editing skill categories. As illustrated in Fig. 5, given an original image \\(I\\), an edited image \\(\\bar{I}\\), and an instruction from an instructional image editing dataset [64], our approach involves three key steps. First, we use an offline MLLM [61, 72] to generate an image caption and an animation prompt that describes plausible motion within the image. Next, an image-to-video (I2V) model animates the image into a video \\(V\\). Finally," + } + ], + [ + { + "type": "image", + "bbox": [ + 0.12, + 0.088, + 0.451, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.362, + 0.486, + 0.418 + ], + "angle": 0, + "content": "Figure 5. Our data generation pipeline for synthetic instructional video editing data. It injects dynamics into well-constructed instructional image editing datasets via the Image-to-Video (I2V) Model, and generates paired video data for instruction editing." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.431, + 0.485, + 0.658 + ], + "angle": 0, + "content": "we generate the corresponding edited video \\(\\bar{V}\\) using a first-frame-conditioned video editing model [35], leveraging \\(\\bar{I}\\) as a strong prior to ensure consistent edits across frames. Finally, to ensure data quality, we evaluate each original-edited video pair with automatic video quality evaluation metrics [30], which assess the generated videos from diverse dimensions, e.g., motion smoothness, image quality, and background consistency. This pipeline transforms carefully curated image-based datasets into instructional video-editing resources while preserving the precision of the original edits. As a result, our data method expands the availability of high-quality synthetic video-editing data, supporting a wider range of editing tasks in our end-to-end unified framework. More details on data generation, prompting, examples, and pre/post-processing are in the Appendix." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.674, + 0.224, + 0.691 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.7, + 0.486, + 0.762 + ], + "angle": 0, + "content": "We first introduce the VEG-Bench Benchmark and then demonstrate the superiority of VEGGIE across diverse video instructional editing skills. More experiments, visualization, and implementation details are in the Appendix." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.772, + 0.314, + 0.787 + ], + "angle": 0, + "content": "4.1. VEG-Bench and Metrics" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.795, + 0.486, + 0.903 + ], + "angle": 0, + "content": "As no existing benchmark is designed for fine-grained instructional video editing skills, we manually collect and annotate VEG-Bench, containing 132 video-instruction pairs that balanced cover 8 different video generative skills (15-20 for each). Beyond standard metrics, including text-to-video alignment (CLIP-Text [55]), video smoothness (CLIP-F [55]), and image quality (MUSIQ [32]), we also" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.909, + 0.289 + ], + "angle": 0, + "content": "first introduce MLLM-as-a-Judge to give a holistic evaluation score according to the given original video, edited video, and user instruction. It is achieved by prompting GPT4o [21] to evaluate whether the requested semantic change has been fulfilled, using a scale from 1 to 10. For addition and removal, we also introduce an object detector (GroundingDiNo [45]) to detect if the object is added/removed faithfully. For grounding and reasoning segmentation, we following video grounding tasks [12, 14, 33, 58] and adopt the Jaccard index \\((\\mathcal{I})\\) [31], F-measure \\((\\mathcal{F})\\) [13], and their mean \\((\\mathcal{J} \\& \\mathcal{F})\\). We also compute SSIM between the generated video and the original video masked with GT masks. More evaluation/metrics details are included in Appendix." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.301, + 0.715, + 0.317 + ], + "angle": 0, + "content": "4.2. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.324, + 0.909, + 0.596 + ], + "angle": 0, + "content": "Instructional Video Editing over Diverse Skills. As shown in Tab. 2, we evaluate 7 different models on VEG-Bench across 8 distinct editing skills. Overall, VEGGIE demonstrates the best performance among instructional video editing models. Compared to VEGGIE, non-instructional models often struggle with concept removal and addition. This limitation arises because these models rely on attention control or additional conditions (e.g., depth maps) that impose strong priors, constraining the model and making object addition or removal challenging. We also observe that InsV2V achieves high scores in quality and smoothness metrics, but underperforms in alignment and MLLM judgment, which demand faithful semantic changes. Qualitative examples in Fig. 6 illustrate that InsV2V often makes minimal changes to the input video, resulting in high video quality but unfaithful outputs. In contrast, VEGGIE strikes a better balance, delivering both high-quality visuals and accurate semantic alignment with the intended edits." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.598, + 0.909, + 0.794 + ], + "angle": 0, + "content": "Can Multi-Task Help Each Other? To test the previous hypothesis, we train our model on the VPLM [75] dataset, which includes paired grounding and removal tasks (approximately 5.5K samples for each task). We focus on these tasks as representative examples due to their straightforward evaluation against ground truth. As shown in Table 3, multitask training yields a lower FVD score and a higher SSIM score, demonstrating that learning to locate and remove a video concept can mutually reinforce performance. We show an example in Fig. 7. However, this conclusion only holds with a balanced data combination. We also observe that an excessive amount of grounding data can introduce more artifacts and negatively impact visual editing skills." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.909, + 0.901 + ], + "angle": 0, + "content": "Emergent Zero-shot Multimodal Instruction Following. We also highlight the emergent behavior of VEGGIE on multi-modal instruction following, even without dedicated training data for this specific editing instruction. Notably, VEGGIE demonstrates the ability to perform zero-shot multimodal instructional video editing. As illustrated in Fig. 2, VEGGIE can transfer styles or add objects from a reference" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.134, + 0.091, + 0.35, + 0.1 + ], + "angle": 0, + "content": "[Addition] Please add a ball in the given video frames." + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.1, + 0.368, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.371, + 0.092, + 0.627, + 0.1 + ], + "angle": 0, + "content": "[Removal] Please remove the man in black in given video frames." + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.101, + 0.626, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.644, + 0.092, + 0.865, + 0.1 + ], + "angle": 0, + "content": "[Swap] Replace golden building with a white mountain." + }, + { + "type": "image", + "bbox": [ + 0.628, + 0.101, + 0.883, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.163, + 0.343, + 0.321, + 0.352 + ], + "angle": 0, + "content": "[Environment] Make it on the beach." + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.352, + 0.369, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.435, + 0.343, + 0.562, + 0.352 + ], + "angle": 0, + "content": "[Color] Make the swan white." + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.352, + 0.626, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.681, + 0.343, + 0.831, + 0.352 + ], + "angle": 0, + "content": "[Texture] Make the rhinocero furry." + }, + { + "type": "image", + "bbox": [ + 0.628, + 0.352, + 0.883, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.598, + 0.312, + 0.607 + ], + "angle": 0, + "content": "[Style] Make it chinese ink style." + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.607, + 0.369, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.413, + 0.592, + 0.589, + 0.601 + ], + "angle": 0, + "content": "[Grounding] Could you locate the knife" + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.601, + 0.626, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.651, + 0.592, + 0.863, + 0.601 + ], + "angle": 0, + "content": "[Reasoning] What can be used for heating food?" + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.601, + 0.883, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.858, + 0.905, + 0.886 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparison of editing results across 8 different abilities (splitting visual features into color and texture). We provide zoom-in details for a more detailed comparison. Best viewed in color. More in Appendix." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.149, + 0.089, + 0.842, + 0.513 + ], + "angle": 0, + "content": "
Methods\nGeneratorNon-Instructional Editing ModelInstructional Editing Model
VidToMe [41] (SD1.5)TokenFlow [20] (SD2.1)Flatten [10] (SD2.1)InstructDiff [19] (SD1.5)LGVI [66] (SD1.5)InsV2V [9] (SD1.5)VEGGIE (Ours) (SD1.5)
Concept Addition
MLLM-Judge (↑)5.005.806.627.262.735.697.44
Alignment (↑)27.8029.3028.2228.1025.0628.2729.27
Smoothness (↑)96.7997.2695.7493.6696.0996.9494.93
Quality (↑)62.8165.6255.4554.0841.5956.2461.31
Detection (↑)47.9849.5349.7455.3614.4248.0157.96
Concept Removal
MLLM-Judge (↑)2.603.734.466.126.592.785.07
Alignment (↑)75.0175.9978.4075.5175.6774.4175.63
Smoothness (↑)96.1396.4795.8291.8397.0396.9995.04
Quality (↑)66.3271.5250.7755.0842.3158.7950.99
Detection (↑)34.3155.1670.9164.8178.4025.6470.22
Object Changing
MLLM-Judge (↑)5.006.537.377.002.066.606.63
Alignment (↑)25.6928.7627.0627.3622.1726.6027.77
Smoothness (↑)96.2397.2196.1392.0795.6696.7495.44
Quality (↑)64.0669.9759.3755.0138.2060.9058.15
Environment & Background Changing
MLLM-Judge (↑)5.817.357.376.052.376.607.18
Alignment (↑)28.1730.0030.0428.0321.9428.2729.15
Smoothness (↑)95.7696.9695.9089.8595.6696.0394.58
Quality (↑)61.9567.0654.5853.0638.9754.9454.25
Visual Feature Changing (Color & Texture)
MLLM-Judge (↑)5.866.856.606.432.147.537.33
Alignment (↑)27.9929.2529.4627.5423.1828.8828.69
Smoothness (↑)95.9397.1095.8391.7194.7596.6694.52
Quality (↑)65.8069.3153.3258.2936.2759.3657.91
Stylization
MLLM-Judge (↑)7.237.628.317.413.718.078.26
Alignment (↑)29.8430.2529.0027.7422.8029.1429.38
Smoothness (↑)96.3197.2396.7188.9795.6296.5095.69
Quality (↑)64.0568.2253.1854.1535.7662.5957.00
Object Grounding
SSIM (↑)40.4750.4647.2137.9866.8449.6570.90
Jaccard Index J (↑)13.8519.2925.6219.881.5213.8937.74
F-measure F (↑)15.5016.8617.6012.813.0717.3721.83
Reasoning Segmentation
SSIM (↑)---32.3944.4759.8668.41
Jaccard Index J (↑)---14.0210.1216.8922.53
F-measure F (↑)---8.079.0610.4515.97
Avg. Ranking2.611.411.963.003.212.001.78
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.517, + 0.907, + 0.547 + ], + "angle": 0, + "content": "Table 2. Comparison of video editing task with instructional / non-instructional models on VEG-Bench. -: the task is not capable of non-instructional models. We gray out numbers of non-instructional models that are in different categories." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.557, + 0.465, + 0.622 + ], + "angle": 0, + "content": "
SettingsRemoval (FVD ↓)Grounding (SSIM ↑)
Grd.-only-52.34
Rmv.-only1098.52-
Mixed987.8055.21
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.633, + 0.483, + 0.675 + ], + "angle": 0, + "content": "Table 3. An ablation study on whether multi-task learning provides transferable benefits that enhance performance across tasks. We focus on removal and grounding tasks as representative examples." + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.688, + 0.462, + 0.829 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.84, + 0.483, + 0.87 + ], + "angle": 0, + "content": "Figure 7. Comparison between single- and multi-skill models with different data training. We find tasks can help each other." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.56, + 0.835, + 0.574 + ], + "angle": 0, + "content": "image into the input video based on instructions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.575, + 0.907, + 0.681 + ], + "angle": 0, + "content": "Emergent Few-shot In-Context Editing. As shown in Fig. 3, VEGGIE can effectively utilize a few example image pairs to transfer the intended editing changes seamlessly to the input video. We observe that VEGGIE exhibits in-context learning for image editing without the need for language instructions. Instead, it uses image pairs as examples to infer and apply the desired editing intention directly." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.695, + 0.634, + 0.71 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.909, + 0.888 + ], + "angle": 0, + "content": "We present VEGGIE, a unified end-to-end model for instructional video editing that handles diverse pixel-level tasks. VEGGIE leverages MLLM for robust instruction understanding and employs a video diffusion model to execute pixel-level edits. Our framework uses a single diffusion loss for end-to-end optimization across varied tasks/skills. We also introduce a novel synthetic data generation pipeline and VEG-Bench, a benchmark that assesses a broad range of editing skills. Our VEGGIE outperforms previous methods as a versatile, all-in-one solution. We hope our model, data, and benchmark to advance research on instructional generative video models." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.115, + 0.486, + 0.184 + ], + "angle": 0, + "content": "[1] Zechen Bai, Tong He, Haiyang Mei, Pichao Wang, Ziteng Gao, Joya Chen, Lei Liu, Zheng Zhang, and Mike Zheng Shou. One token to seg them all: Language instructed reasoning segmentation in videos. arXiv preprint arXiv:2409.19603, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.185, + 0.484, + 0.255 + ], + "angle": 0, + "content": "[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.256, + 0.484, + 0.311 + ], + "angle": 0, + "content": "[3] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18392-18402, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.312, + 0.484, + 0.395 + ], + "angle": 0, + "content": "[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. Video generation models as world simulators. OpenAI, https://openai.com/research/video-generation-models-as-world-simulators, 2024.2.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.396, + 0.484, + 0.45 + ], + "angle": 0, + "content": "[5] Alper Canberk, Maksym Bondarenko, Ege Ozguroglu, Ruoshi Liu, and Carl Vondrick. Erasedraw: Learning to insert objects by erasing them from images. arXiv preprint arXiv:2409.00522, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.452, + 0.484, + 0.508 + ], + "angle": 0, + "content": "[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23206-23217, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.509, + 0.484, + 0.564 + ], + "angle": 0, + "content": "[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23040-23050, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.565, + 0.484, + 0.619 + ], + "angle": 0, + "content": "[8] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.621, + 0.484, + 0.676 + ], + "angle": 0, + "content": "[9] Jiaxin Cheng, Tianjun Xiao, and Tong He. Consistent video-to-video transfer using synthetic dataset. In The Twelfth International Conference on Learning Representations, 2024. 5, 8, 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.678, + 0.484, + 0.761 + ], + "angle": 0, + "content": "[10] Yuren Cong, Mengmeng Xu, christian simon, Shoufa Chen, Jiawei Ren, Yanping Xie, Juan-Manuel Perez-Rua, Bodo Rosenhahn, Tao Xiang, and Sen He. FLATTEN: optical FLOW-guided ATTENtion for consistent text-to-video editing. In The Twelfth International Conference on Learning Representations, 2024. 8, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.762, + 0.484, + 0.831 + ], + "angle": 0, + "content": "[11] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Mohit Bansal, and Chen Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. arXiv preprint arXiv:2411.09921, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.832, + 0.484, + 0.9 + ], + "angle": 0, + "content": "[12] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Bansal Mohit, and Chen. Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. 2024. 5, 6, 2" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.115, + 0.486, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.12 + ], + "angle": 0, + "content": "[13] Lee R Dice. Measures of the amount of ecologic association between species. Ecology, 26(3):297-302, 1945. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.121, + 0.908, + 0.176 + ], + "angle": 0, + "content": "[14] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In ICCV, pages 2694-2703, 2023. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.177, + 0.907, + 0.232 + ], + "angle": 0, + "content": "[15] Patrick Esser, Johnathan Chiu, Parmida Atighehchian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In CVPR, pages 7346-7356, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.233, + 0.906, + 0.275 + ], + "angle": 0, + "content": "[16] Hao Fei, Shengqiong Wu, Hanwang Zhang, Tat-Seng Chua, and Shuicheng Yan. Vitron: A unified pixel-level vision llm for understanding, generating, segmenting, editing, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.275, + 0.906, + 0.328 + ], + "angle": 0, + "content": "[17] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. arXiv preprint arXiv:2309.17102, 2023. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.33, + 0.908, + 0.383 + ], + "angle": 0, + "content": "[18] Yuying Ge, Sijie Zhao, Chen Li, Yixiao Ge, and Ying Shan. Seed-data-edit technical report: A hybrid dataset for instructional image editing. arXiv preprint arXiv:2405.04007, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.385, + 0.907, + 0.469 + ], + "angle": 0, + "content": "[19] Zigang Geng, Binxin Yang, Tiankai Hang, Chen Li, Shuyang Gu, Ting Zhang, Jianmin Bao, Zheng Zhang, Houqiang Li, Han Hu, et al. Instructdiffusion: A generalist modeling interface for vision tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12709-12720, 2024. 8, 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.47, + 0.906, + 0.511 + ], + "angle": 0, + "content": "[20] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 2, 3, 8, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.512, + 0.907, + 0.537 + ], + "angle": 0, + "content": "[21] gpt 4o. https://openai.com/index/hello-gpt-4o/.2024.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.539, + 0.907, + 0.607 + ], + "angle": 0, + "content": "[22] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 5, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.609, + 0.907, + 0.663 + ], + "angle": 0, + "content": "[23] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.665, + 0.906, + 0.734 + ], + "angle": 0, + "content": "[24] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.735, + 0.906, + 0.788 + ], + "angle": 0, + "content": "[25] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Advances in Neural Information Processing Systems, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.79, + 0.906, + 0.845 + ], + "angle": 0, + "content": "[26] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. In The Eleventh International Conference on Learning Representations, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.906, + 0.9 + ], + "angle": 0, + "content": "[27] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. ICLR, 1(2):3, 2022. 1" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.908, + 0.9 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.148 + ], + "angle": 0, + "content": "[28] Jiahao Hu, Tianxiong Zhong, Xuebo Wang, Boyuan Jiang, Xingye Tian, Fei Yang, Pengfei Wan, and Di Zhang. Vivid-10m: A dataset and baseline for versatile and interactive video local editing. arXiv preprint arXiv:2411.15260, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.486, + 0.246 + ], + "angle": 0, + "content": "[29] Yuzhou Huang, Liangbin Xie, Xintao Wang, Ziyang Yuan, Xiaodong Cun, Yixiao Ge, Jiantao Zhou, Chao Dong, Rui Huang, Ruimao Zhang, et al. Smartedit: Exploring complex instruction-based image editing with multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8362-8371, 2024. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.248, + 0.485, + 0.331 + ], + "angle": 0, + "content": "[30] Ziqi Huang, Yinan He, Jiashuo Yu, Fan Zhang, Chenyang Si, Yuming Jiang, Yuanhan Zhang, Tianxing Wu, Qingyang Jin, Nattapol Chanpaisit, et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21807-21818, 2024. 6, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.333, + 0.484, + 0.36 + ], + "angle": 0, + "content": "[31] Paul Jaccard. The distribution of the flora in the alpine zone. 1. New phytologist, 11(2):37-50, 1912. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.362, + 0.484, + 0.417 + ], + "angle": 0, + "content": "[32] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.419, + 0.485, + 0.5 + ], + "angle": 0, + "content": "[33] Anna Khoreva, Anna Rohrbach, and Bernt Schiele. Video object segmentation with language referring expressions. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part IV 14, pages 123–141. Springer, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.504, + 0.484, + 0.559 + ], + "angle": 0, + "content": "[34] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.561, + 0.484, + 0.614 + ], + "angle": 0, + "content": "[35] Max Ku, Cong Wei, Weiming Ren, Harry Yang, and Wenhu Chen. Anyv2v: A tuning-free framework for any video-to-video editing tasks. arXiv preprint arXiv:2403.14468, 2024. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.617, + 0.484, + 0.671 + ], + "angle": 0, + "content": "[36] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.675, + 0.484, + 0.743 + ], + "angle": 0, + "content": "[37] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.746, + 0.484, + 0.801 + ], + "angle": 0, + "content": "[38] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.803, + 0.484, + 0.857 + ], + "angle": 0, + "content": "[39] Jialu Li, Shoubin Yu, Han Lin, Jaemin Cho, Jaehong Yoon, and Mohit Bansal. Training-free guidance in text-to-video generation via multimodal planning and structured noise initialization. arXiv preprint arXiv:2504.08641, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.859, + 0.484, + 0.901 + ], + "angle": 0, + "content": "[40] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.908, + 0.133 + ], + "angle": 0, + "content": "image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.136, + 0.908, + 0.205 + ], + "angle": 0, + "content": "[41] Xirui Li, Chao Ma, Xiaokang Yang, and Ming-Hsuan Yang. Vidthome: Video token merging for zero-shot video editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7486-7495, 2024. 2, 8, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.207, + 0.907, + 0.247 + ], + "angle": 0, + "content": "[42] Long Lian, Baifeng Shi, Adam Yala, Trevor Darrell, and Boyi Li. Llm-grounded video diffusion models. arXiv preprint arXiv:2309.17444, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.249, + 0.908, + 0.303 + ], + "angle": 0, + "content": "[43] Han Lin, Abhay Zala, Jaemin Cho, and Mohit Bansal. Videodirectorgpt: Consistent multi-scene video generation via llm-guided planning. arXiv preprint arXiv:2309.15091, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.305, + 0.908, + 0.361 + ], + "angle": 0, + "content": "[44] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.363, + 0.906, + 0.431 + ], + "angle": 0, + "content": "[45] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.434, + 0.906, + 0.489 + ], + "angle": 0, + "content": "[46] Shaoteng Liu, Tianyu Wang, Jui-Hsien Wang, Qing Liu, Zhifei Zhang, Joon-Young Lee, Yijun Li, Bei Yu, Zhe Lin, Soo Ye Kim, et al. Generative video propagation. arXiv preprint arXiv:2412.19761, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.491, + 0.906, + 0.56 + ], + "angle": 0, + "content": "[47] Ziqiao Ma, Jiayi Pan, and Joyce Chai. World-to-words: Grounded open vocabulary acquisition through fast mapping in vision-language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 524–544, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.562, + 0.908, + 0.63 + ], + "angle": 0, + "content": "[48] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11-20, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.632, + 0.906, + 0.673 + ], + "angle": 0, + "content": "[49] Kangfu Mei and Vishal Patel. Vidm: Video implicit diffusion models. In Proceedings of the AAAI conference on artificial intelligence, pages 9117-9125, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.676, + 0.908, + 0.731 + ], + "angle": 0, + "content": "[50] Bo Miao, Mohammed Bennamoun, Yongsheng Gao, Mubarak Shah, and Ajmal Mian. Towards temporally consistent referring video object segmentation. https://arxiv.org/abs/2403.19407, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.732, + 0.908, + 0.8 + ], + "angle": 0, + "content": "[51] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Qixiang Ye, and Furu Wei. Grounding multimodal large language models to the world. In The Twelfth International Conference on Learning Representations, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.802, + 0.908, + 0.871 + ], + "angle": 0, + "content": "[52] Renjie Pi, Jiahui Gao, Shizhe Diao, Rui Pan, Hanze Dong, Jipeng Zhang, Lewei Yao, Jianhua Han, Hang Xu, Lingpeng Kong, et al. Detgpt: Detect what you need via reasoning. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14172-14189, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.873, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[53] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.908, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.092, + 0.484, + 0.12 + ], + "angle": 0, + "content": " attentions for zero-shot text-based video editing. In ICCV, pages 15932-15942, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.484, + 0.191 + ], + "angle": 0, + "content": "[54] Bosheng Qin, Juncheng Li, Siliang Tang, Tat-Seng Chua, and Yueting Zhuang. Instructvid2vid: Controllable video editing with natural language instructions. In 2024 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2024. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.193, + 0.484, + 0.275 + ], + "angle": 0, + "content": "[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.278, + 0.484, + 0.36 + ], + "angle": 0, + "content": "[56] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Erix Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.362, + 0.484, + 0.432 + ], + "angle": 0, + "content": "[57] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.434, + 0.484, + 0.515 + ], + "angle": 0, + "content": "[58] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.518, + 0.484, + 0.573 + ], + "angle": 0, + "content": "[59] Shangkun Sun, Xiaoyu Liang, Songlin Fan, Wenxu Gao, and Wei Gao. Ve-bench: Subjective-aligned benchmark suite for text-driven video editing quality assessment. In Proceedings of the AAAI Conference on Artificial Intelligence, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.576, + 0.484, + 0.645 + ], + "angle": 0, + "content": "[60] Carles Ventura, Miriam Bellver, Andreu Girbau, Amaia Salvador, Ferran Marques, and Xavier Giro-i Nieto. Rvos: End-to-end recurrent network for video object segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5277-5286, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.647, + 0.484, + 0.741 + ], + "angle": 0, + "content": "[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.745, + 0.484, + 0.814 + ], + "angle": 0, + "content": "[62] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.816, + 0.484, + 0.869 + ], + "angle": 0, + "content": "[63] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal IIm as an agent for unified image generation and editing. arXiv preprint arXiv:2407.05600, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.484, + 0.901 + ], + "angle": 0, + "content": "[64] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image edit" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.092, + 0.906, + 0.12 + ], + "angle": 0, + "content": "ing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 5, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.908, + 0.19 + ], + "angle": 0, + "content": "[65] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasecut: Language-based image segmentation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10216-10225, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.193, + 0.908, + 0.276 + ], + "angle": 0, + "content": "[66] Jianzong Wu, Xiangtai Li, Chenyang Si, Shangchen Zhou, Jingkang Yang, Jiangning Zhang, Yining Li, Kai Chen, Yunhai Tong, Ziwei Liu, et al. Towards language-driven video inpainting via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12501-12511, 2024. 2, 3, 5, 8, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.278, + 0.908, + 0.347 + ], + "angle": 0, + "content": "[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.349, + 0.908, + 0.405 + ], + "angle": 0, + "content": "[68] Jay Zhangjie Wu, Xiuyu Li, Difei Gao, Zhen Dong, Jinbin Bai, Aishani Singh, Xiaoyu Xiang, Youzeng Li, Zuwei Huang, Yuanxi Sun, et al. Cvpr 2023 text guided video editing competition. arXiv preprint arXiv:2310.16003, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.407, + 0.908, + 0.475 + ], + "angle": 0, + "content": "[69] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.478, + 0.908, + 0.533 + ], + "angle": 0, + "content": "[70] Zhen Xing, Qi Dai, Zihao Zhang, Hui Zhang, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Vidiff: Translating videos via multi-modal instructions with diffusion models. arXiv preprint arXiv:2311.18837, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.535, + 0.908, + 0.576 + ], + "angle": 0, + "content": "[71] Wilson Yan, Yunzhi Zhang, Pieter Abbeel, and Aravind Srinivas. Videogpt: Video generation using vq-vae and transformers. arXiv preprint arXiv:2104.10157, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.579, + 0.908, + 0.772 + ], + "angle": 0, + "content": "[72] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.774, + 0.908, + 0.829 + ], + "angle": 0, + "content": "[73] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.831, + 0.908, + 0.9 + ], + "angle": 0, + "content": "[74] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.908, + 0.9 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.484, + 0.134 + ], + "angle": 0, + "content": "[75] Jaehong Yoon, Shoubin Yu, and Mohit Bansal. Raccoon: Remove, add, and change video content with auto-generated narratives. arXiv preprint arXiv:2405.18406, 2024. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.136, + 0.484, + 0.204 + ], + "angle": 0, + "content": "[76] Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. In The Twelfth International Conference on Learning Representations, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.205, + 0.484, + 0.285 + ], + "angle": 0, + "content": "[77] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.288, + 0.484, + 0.369 + ], + "angle": 0, + "content": "[78] Shoubin Yu, Jacob Zhiyuan Fang, Jian Zheng, Gunnar Sigurdsson, Vicente Ordonez, Robinson Piramuthu, and Mohit Bansal. Zero-shot controllable image-to-video animation via motion decomposition. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 3332-3341, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.371, + 0.484, + 0.426 + ], + "angle": 0, + "content": "[79] Tao Yu, Runseng Feng, Ruoyu Feng, Jinming Liu, Xin Jin, Wenjun Zeng, and Zhibo Chen. Inpaint anything: Segment anything meets image inpainting. arXiv preprint arXiv:2304.06790, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.428, + 0.484, + 0.468 + ], + "angle": 0, + "content": "[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.47, + 0.484, + 0.536 + ], + "angle": 0, + "content": "[81] Hao Zhang, Hongyang Li, Feng Li, Tianhe Ren, Xueyan Zou, Shilong Liu, Shijia Huang, Jianfeng Gao, Lei Zhang, Chunyuan Li, et al. Llava-grounding: Grounded visual chat with large multimodal models. arXiv preprint arXiv:2312.02949, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.539, + 0.484, + 0.607 + ], + "angle": 0, + "content": "[82] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.609, + 0.484, + 0.663 + ], + "angle": 0, + "content": "[83] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. Advances in Neural Information Processing Systems, 36, 2024. 4, 5, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.665, + 0.484, + 0.733 + ], + "angle": 0, + "content": "[84] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.735, + 0.484, + 0.802 + ], + "angle": 0, + "content": "[85] Zhixing Zhang, Bichen Wu, Xiaoyan Wang, Yaqiao Luo, Luxin Zhang, Yinan Zhao, Peter Vajda, Dimitris Metaxas, and Licheng Yu. Avid: Any-length video inpainting with diffusion model. arXiv preprint arXiv:2312.03816, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.804, + 0.484, + 0.845 + ], + "angle": 0, + "content": "[86] Zhenghao Zhang, Zuozhuo Dai, Long Qin, and Weizhi Wang. Effived: Efficient video editing via text-instruction diffusion models. arXiv preprint arXiv:2403.11568, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.846, + 0.484, + 0.898 + ], + "angle": 0, + "content": "[87] Yang Zhao, Zhijie Lin, Daquan Zhou, Zilong Huang, Jiashi Feng, and Bingyi Kang. Bubogpt: Enabling visual grounding in multi-modal llms. arXiv preprint arXiv:2307.08581, 2023. 3" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.484, + 0.898 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.25, + 0.089, + 0.747, + 0.137 + ], + "angle": 0, + "content": "VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.146, + 0.615, + 0.166 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.183, + 0.198, + 0.202 + ], + "angle": 0, + "content": "6. Appendix" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.21, + 0.383, + 0.225 + ], + "angle": 0, + "content": "In this Appendix, we provide extra details on" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.229, + 0.483, + 0.256 + ], + "angle": 0, + "content": "- Implementation details of VEGGIE training, and evaluation and baseline evaluations." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.259, + 0.483, + 0.303 + ], + "angle": 0, + "content": "- Extra details on our data generation pipeline, including each module's details, prompts for each promptable module, data filtering, and visualization." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.305, + 0.482, + 0.333 + ], + "angle": 0, + "content": "- Extra visualizations for each task and the comparison with the other 6 strong baseline models." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.335, + 0.362, + 0.347 + ], + "angle": 0, + "content": "- Limitation and future work discussion." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.229, + 0.483, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.362, + 0.308, + 0.379 + ], + "angle": 0, + "content": "6.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.385, + 0.485, + 0.61 + ], + "angle": 0, + "content": "Model Architecture. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. It is a strong MLLM consisting of Qwen2 [73] LLM with 32K context window, SigLIP [80] visual encoder, and a 2-layer-MLP projector. LLaVA-OV can handle diverse visual-language tasks (including interleaved-frame, video). It provides a good starting point for our VEGGIE to understand complex user instructions and can respond with multiple frame-wise implicit planning thanks to its long context window. Our video diffusion model is initialized from the instructional image editing model, MagicBrush [83]. We further inflated 2D convolution layers to 3D form and inserted temporal attention layers followingAnimateDiff [22] to adapt videos. Our alignment network is a single-layer MLP. We set 32 grounded task tokens for each frame." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.613, + 0.484, + 0.839 + ], + "angle": 0, + "content": "Training Details. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. Our VidDM is initialized from the instructional image editing model, MagicBrush [83] with Stable Diffusion v1.5 backbone [57]. We further inflated 2D convolution layers with temporal attention layers, followingAnimateDiff [22] to adapt videos. Our VEGGIE adopts a 2-stage curriculum training strategy (Sec. 3.2). In the first stage, we fully fine-tune the 2D convolution layers in the UNet, the alignment network, and the task query tokens in the MLLM on image data, with 862M trainable parameters. In the second stage, we train all 3 dimensions in the UNet, the alignment network, the task query tokens, and a LoRA in the MLLM, leading to 1.3B trainable parameters. Both stages are trained end-to-end with only a diffusion loss. More details are in the Appendix." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.902 + ], + "angle": 0, + "content": "We keep the VAE encoder and decoder frozen during the entire training process. In the first stage, we keep the MLLM (including visual encoder, MLP projector, and LLM) frozen, and fully fine-tune learnable grounded task queries," + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.184, + 0.908, + 0.395 + ], + "angle": 0, + "content": "alignment network, and diffusion model, leading to around 800M training parameters. We set \\(1e^{-4}\\) learning rate, and 96 batch size on each GPU. We use 16 A100 GPUs for the first stage of fine-tuning with 25K steps. In the second stage, we insert LoRA [27] modules into the LLM backbone, and inflate diffusion models by inserting extra temporal layers as inAnimateDiff [22]. We fine-tune LoRA, alignment network, learnable grounded task query tokens, and the diffusion model, leading to around 1.3B trainable parameters. We set \\(5e^{-4}\\) learning rate, and 1 batch size with 8 gradient accumulation steps on 32 A100 GPUs. For LoRA, we set lora rank 64, lora alpha 16, and lora dropout 0.05. We train the second stage video model 2.5K step with 8 uniformly sampled frames." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.399, + 0.909, + 0.505 + ], + "angle": 0, + "content": "Evaluation and Baseline Details. We primarily compare our model with strong instructional editing models [9, 19, 66]. Additionally, we include non-instructional editing models [10, 20, 41] for completeness, although these are not fair baselines since they are not end-to-end and rely on additional conditions, such as depth maps or intermediate captions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.509, + 0.907, + 0.6 + ], + "angle": 0, + "content": "We randomly sample 3 seeds for both our method and baseline methods. In our experiments, we use different classifier-free guidance scores (\\( g_{T} \\) and \\( g_{V} \\) in Sec. 3.2) for different skills. Specifically, we set \\( g_{T} = 14.5 \\) and \\( g_{V} = 1.5 \\) for grounding and reasoning segmentation, while for other editing skills, we use \\( g_{T} = 10.5 \\) and \\( g_{V} = 2.0 \\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.603, + 0.907, + 0.679 + ], + "angle": 0, + "content": "For baseline methods, we adopt their default settings (e.g., diffusion steps, guidance scores, frame numbers) as provided in their GitHub repositories. To ensure fair evaluation, we sample the same eight frames from each method's video editing results." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.682, + 0.909, + 0.773 + ], + "angle": 0, + "content": "For alignment and smoothness metrics, we use CLIP-B/32 to measure text-image and image-image similarity, averaging across all frames to obtain video-level scores. For detection metrics, we use GroundingDINO (Swin-T OGC) to detect target objects frame by frame, averaging confidence scores across all frames for the final video-level metric." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.776, + 0.909, + 0.823 + ], + "angle": 0, + "content": "For the removal task, where fewer detected objects and lower alignment with the original text prompt are desired, we compute alignment and detection metrics as \\( 1 - \\) value." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.909, + 0.902 + ], + "angle": 0, + "content": "We compare the model judged best for each video sample. The agreement between human and MLLM judgments is 0.74, whereas the agreement between human and CLIP is only 0.45. We conducted 5 times of the MLLM evaluation and took an average." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.093, + 0.089, + 0.482, + 0.233 + ], + "angle": 0, + "content": "
MethodsGroundingReasoning
JFJ&FJFJ&F
Segmentation Models
HTR [50]47.1147.6047.3520.0128.0224.01
VideoLISA [1]53.2354.3753.8038.4839.2038.84
MoRA [12]57.7353.6355.6838.9237.4840.36
Generative Editing Models
InstructDiff [19]19.8812.8116.3514.028.0711.05
InsV2V [9]13.8917.3715.6316.8910.4513.67
VEGGIE (Ours)37.7421.8329.7922.5315.9719.25
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.243, + 0.485, + 0.286 + ], + "angle": 0, + "content": "Table 4. Comparison of video concept grounding and reasoning segmentation tasks with other instructional generative models and expert segmentation models." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.318, + 0.307, + 0.334 + ], + "angle": 0, + "content": "6.2. Data Collection Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.344, + 0.484, + 0.405 + ], + "angle": 0, + "content": "As mentioned in the earlier Sec. 3.3, beyond collecting existing data, we proposed a novel data synthesis pipeline to generate instructional video data by animating images in the instructional image dataset." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.409, + 0.485, + 0.454 + ], + "angle": 0, + "content": "Specifically, we first select images from Omni-Edit [64], an instructional image editing dataset with carefully designed tasks/skills." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.457, + 0.484, + 0.594 + ], + "angle": 0, + "content": "We first use QWen2-VL [61] to caption the original image and give an animation prompt to animate the image via CogVideX1.5-I2V [74]. Please refer Tab. 5 and Tab. 6 to our prompt for caption and animation. After getting the animated video, we utilize AnyV2V [35] to edit the video based on the reference image (edited image from image dataset). The reference image gives a strong prior to maintaining the image dataset's high-quality edit and thus transfer it to the video via the video editing model." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.598, + 0.486, + 0.705 + ], + "angle": 0, + "content": "Next, we filter out videos by evaluating VBenchmark metrics [30], including aesthetic quality, motion smoothness, image quality, subject consistency, and background consistency. We set thresholds at 0.6 for aesthetic quality, 65 for imaging quality, 0.9 for motion smoothness, subject consistency, and background consistency. We provide our generated data visualization in Fig. 9." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.725, + 0.424, + 0.74 + ], + "angle": 0, + "content": "6.3. More Quantative Results & Discussion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.485, + 0.903 + ], + "angle": 0, + "content": "Video Concept Grounding & Reasoning Segmentation We include additional results on video concept grounding and reasoning segmentation in Tab. 4. VEGGIE outperforms the diffusion-based baseline by a significant margin, showcasing its superior ability to accurately locate fine-grained object references and handle complex reasoning tasks. We hypothesize that through grounded generation, VEGGIE demonstrates remarkable precision in concept editing. For example, as shown in Fig. 11 in the Appendix, VEGGIE can remove the woman without altering the nearby girl." + }, + { + "type": "image", + "bbox": [ + 0.548, + 0.092, + 0.875, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.289, + 0.908, + 0.318 + ], + "angle": 0, + "content": "Figure 8. t-SNE Visualization of different task query distribution. Different colors represent different tasks/skills. Best view in color." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.346, + 0.779, + 0.362 + ], + "angle": 0, + "content": "6.4. Limitation and Future Works" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.369, + 0.907, + 0.521 + ], + "angle": 0, + "content": "Our current method, VEGGIE, is built upon Stable-Diffusion 1.5, which inevitably constrains its editing quality compared to cutting-edge video generation models that rely on DiT or flow-based architectures. In addition, the video outputs we produce are relatively short, lagging behind some recent state-of-the-art methods in terms of length and temporal consistency. Furthermore, we observe increased editing artifacts when incorporating large amounts of grounding data, suggesting that multi-task data mixture strategies play a key role in maintaining high-quality edits." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.522, + 0.909, + 0.688 + ], + "angle": 0, + "content": "Despite these limitations, our results demonstrate promising directions for improvement in terms of model design, data curation, and evaluation. Future work could explore integrating more advanced base architectures (e.g., DiT [34, 74] or flow-based models), extending the maximum video duration, developing more systematic data [28] with more advanced method [46] and carefully designed mixture strategies to balance fidelity and flexibility, and conducting scalable training. We hope our findings will inspire further research into these directions, pushing the boundaries of instructional video editing performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.689, + 0.911, + 0.901 + ], + "angle": 0, + "content": "Task Query Visualization & Analysis via t-SNE. To analyze task/skill correlations, we project their grounded queries into lower-dimensional spaces using PCA and t-SNE. As shown in Fig. 8, distinct clusters form for each category (e.g., Addition), indicating effective differentiation by the model. Reasoning and Grounding appear together on the right. It may be because they both require cognitive/semantic understanding or logical reference. Color, Env, and Change clusters are closer to each other, indicating that the model views them as similar operations focusing on changing different visual attributes. Style lies in the lower-left region but remains relatively close to Color, Env, and Change. This proximity may reflect that \"stylization\" is conceptually similar to these visual attribute tasks, although it targets different" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.358, + 0.174, + 0.64, + 0.188 + ], + "angle": 0, + "content": "Table 5. Qwen2-VL prompt for Image caption." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.195, + 0.905, + 0.37 + ], + "angle": 0, + "content": "
Please describe this image shortly, try to capture main details in the image.\nHere are some examples of image caption styles:\n1. A Couple In A Public Display Of Affection\n2. A kitten turning its head on a wooden floor\n3. An Old Man Doing Exercises For The Body And Mind\n4. Man Walking\nNow, please describe the given image briefly in one sentence, please do not say something like 'The image shows...' or 'The image depicts...'
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.402, + 0.486, + 0.509 + ], + "angle": 0, + "content": "transformations. Removal stands apart on the top, especially distant from Addition, indicating the model perceives them as distinct rather than inverse operations. In contrast, Addition lies closer to tasks like Reasoning and Grounding. It suggests that the act of adding elements may rely on similar semantic or referential processes (e.g., deciding what to add and how to reference the newly added element)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.518, + 0.279, + 0.533 + ], + "angle": 0, + "content": "6.5. Extra Visualization" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.54, + 0.407, + 0.555 + ], + "angle": 0, + "content": "We provide extra visualization in Figs. 10 to 16" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.212, + 0.118, + 0.787, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.267, + 0.787, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.409, + 0.787, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.556, + 0.787, + 0.678 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.681, + 0.669, + 0.695 + ], + "angle": 0, + "content": "Instruction: transform the setting to a snowy scene" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.704, + 0.787, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.769, + 0.787, + 0.843 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.855, + 0.703, + 0.87 + ], + "angle": 0, + "content": "Figure 9. Examples of our generated instructional video editing data." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.315, + 0.185, + 0.681, + 0.2 + ], + "angle": 0, + "content": "Table 6. Qwen2-VL prompt for generating animation prompt." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.217, + 0.871, + 0.248 + ], + "angle": 0, + "content": "I want to animate this image using an Image-Text-to-Video model. Your task is to generate a detailed and reasonable text prompt that describes how the image should be animated." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.263, + 0.195, + 0.276 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.293, + 0.871, + 0.322 + ], + "angle": 0, + "content": "1. Clarity & Realism - The animation description should be logical based on the given image, ensuring the movement makes sense for the scene." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.338, + 0.871, + 0.369 + ], + "angle": 0, + "content": "2. Short & Vivid Description - Use expressive language to guide the animation model effectively, ensuring high-quality and visually engaging results." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.293, + 0.871, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.384, + 0.871, + 0.414 + ], + "angle": 0, + "content": "Ensure that your animation prompt aligns with the content of the provided image and describes a visually compelling motion sequence." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.429, + 0.729, + 0.444 + ], + "angle": 0, + "content": "Do not output animation prompts that contain objects/scenes not included in the given image." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.459, + 0.429, + 0.474 + ], + "angle": 0, + "content": "Make sure the prompt is short in 1-2 sentences." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.526, + 0.902, + 0.86 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.352, + 0.872, + 0.643, + 0.886 + ], + "angle": 0, + "content": "Figure 10. More Examples of Concept Addition." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.203, + 0.276, + 0.794, + 0.29 + ], + "angle": 0, + "content": "Table 7. GPT-4o prompt for MLLM-as-a-Judge for automatic instructional video editing evaluation." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.308, + 0.154, + 0.321 + ], + "angle": 0, + "content": "User" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.324, + 0.872, + 0.353 + ], + "angle": 0, + "content": "You are an evaluator for instructional video editing tasks. Your job is to assess how well the edited video fulfills the user's specific instructions." + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.355, + 0.214, + 0.367 + ], + "angle": 0, + "content": "I will provide:" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.37, + 0.329, + 0.383 + ], + "angle": 0, + "content": "1. The original video (first GIF)" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.385, + 0.339, + 0.398 + ], + "angle": 0, + "content": "2. The edited video (second GIF)" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.4, + 0.401, + 0.414 + ], + "angle": 0, + "content": "3. The user's instruction: [user instruction]" + }, + { + "type": "list", + "bbox": [ + 0.118, + 0.37, + 0.401, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.416, + 0.516, + 0.429 + ], + "angle": 0, + "content": "Please evaluate the editing result using the following format:" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.43, + 0.433, + 0.444 + ], + "angle": 0, + "content": "INSTRUCTION: [Repeat the user's instruction]" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.445, + 0.226, + 0.457 + ], + "angle": 0, + "content": "EVALUATION:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.46, + 0.364, + 0.474 + ], + "angle": 0, + "content": "- Accuracy score (1-10): [Your score]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.476, + 0.351, + 0.489 + ], + "angle": 0, + "content": "- Quality score (1-10): [Your score]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.491, + 0.408, + 0.504 + ], + "angle": 0, + "content": "- Appropriateness score (1-10): [Your score]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.506, + 0.383, + 0.52 + ], + "angle": 0, + "content": "- Overall score (1-10): [Your final score]" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.46, + 0.408, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.535, + 0.875, + 0.564 + ], + "angle": 0, + "content": "EXPLANATION: [Provide a brief justification for your scores, highlighting specific strengths and weaknesses of the edit]" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.566, + 0.545, + 0.58 + ], + "angle": 0, + "content": "RECOMMENDATION: [Optional suggestions for improvement]" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.596, + 0.279, + 0.609 + ], + "angle": 0, + "content": "When scoring, consider:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.611, + 0.872, + 0.641 + ], + "angle": 0, + "content": "- Accuracy: Does the edit precisely follow the given instruction? - Quality: Is the edit visually seamless and natural-looking? - Appropriateness: Does the edit maintain coherence with the original video context?" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.656, + 0.251, + 0.669 + ], + "angle": 0, + "content": "The overall scale is:" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.671, + 0.364, + 0.685 + ], + "angle": 0, + "content": "1-3: Poor - Major issues with the edit" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.687, + 0.531, + 0.7 + ], + "angle": 0, + "content": "4-6: Acceptable - Follows instruction but with noticeable flaws" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.702, + 0.449, + 0.715 + ], + "angle": 0, + "content": "7-8: Good - Clear, effective edit with minor issues" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.717, + 0.475, + 0.73 + ], + "angle": 0, + "content": "9-10: Excellent - Flawless execution of the instruction" + }, + { + "type": "list", + "bbox": [ + 0.118, + 0.671, + 0.531, + 0.73 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.118, + 0.747, + 0.185, + 0.759 + ], + "angle": 0, + "content": "Assistant" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.762, + 0.375, + 0.776 + ], + "angle": 0, + "content": "Scores, Explanation, Recommendation" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.109, + 0.38, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.383, + 0.11, + 0.641, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.109, + 0.905, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.455, + 0.645, + 0.47 + ], + "angle": 0, + "content": "Figure 11. More Examples of Concept Removal." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.517, + 0.38, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.383, + 0.517, + 0.641, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.517, + 0.904, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.358, + 0.863, + 0.639, + 0.879 + ], + "angle": 0, + "content": "Figure 12. More Examples of Object Changes." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.096, + 0.109, + 0.38, + 0.443 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.383, + 0.109, + 0.641, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.109, + 0.905, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.374, + 0.455, + 0.625, + 0.47 + ], + "angle": 0, + "content": "Figure 13. More Examples of Stylization." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.516, + 0.38, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.383, + 0.517, + 0.641, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.517, + 0.904, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.863, + 0.703, + 0.878 + ], + "angle": 0, + "content": "Figure 14. More Examples of Environment and Background Editing." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.106, + 0.905, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.337, + 0.453, + 0.66, + 0.467 + ], + "angle": 0, + "content": "Figure 15. More Examples of Visual Features Editing." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.51, + 0.903, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.352, + 0.866, + 0.645, + 0.88 + ], + "angle": 0, + "content": "Figure 16. More Examples of Object Grounding." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.369, + 0.907, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.602, + 0.687, + 0.617 + ], + "angle": 0, + "content": "Figure 17. More Examples of Object Reasoning Segmentation." + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_origin.pdf b/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f7d001f1fa5885ee39086b30ac1303be59b0ff74 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63ffe7dfa0008858dfcf9054632396f6bb1da1827c1d8199c334b67bfa5706d9 +size 50956282 diff --git a/data/2025/2503_14xxx/2503.14350/full.md b/data/2025/2503_14xxx/2503.14350/full.md new file mode 100644 index 0000000000000000000000000000000000000000..732517ccdde45acee277ced6f2fdc1a02bd8afc3 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/full.md @@ -0,0 +1,469 @@ +# VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation + +Shoubin Yu $^{1,3*}$ Difan Liu $^{1*}$ Ziqiao Ma $^{1,2*}$ Yicong Hong $^{1}$ Yang Zhou $^{1}$ Hao Tan $^{1}$ Joyce Chai $^{2}$ Mohit Bansal $^{3}$ + +$^{1}$ Adobe Research $^{2}$ University of Michigan $^{3}$ UNC Chapel Hill + +https://veggie-gen.github.io/ + +![](images/248f43d2a8a5d0f6aaf3651344b22c259b8cc08d13d17fa22f292d515bdb478e.jpg) +Figure 1. We propose VEGGIE, a unified and versatile video generative model that handles various tasks for both video concept grounding and editing according to user instructions. With VEGGIE, users can locate, add, delete, and change concepts in a given video through diverse instruction formats (direct referring instruction or reasoning-demanding questions). Users can also edit videos with multimodal instruction empowered by MLLM, enabling applications like video editing from a reference image. + +# Abstract + +While recent video diffusion models enable video editing, unifying diverse instructional editing tasks (e.g., add, remove, modify) under a single framework remains a significant challenge. In this paper, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions, a simple end-to-end framework that unifies video concept editing, grounding, and reasoning based on diverse user instructions. Specifically, given a video and text query, VEGGIE first utilizes an MLLM to interpret user intentions in instructions and ground them to the video contexts, generating framespecific grounded task queries for pixel-space responses. A diffusion model then renders these plans and generates edited videos that align with user intent. To support diverse + +tasks and complex instructions, we employ a curriculum learning strategy: first aligning the MLLM and video diffusion model with large-scale instructional image editing data, followed by end-to-end fine-tuning on high-quality multitask video data. Additionally, we introduce a novel data synthesis pipeline to generate paired instructional video editing data for model training. It transforms static image data into diverse, high-quality video editing samples by leveraging Image-to-Video models to inject dynamics. VEGGIE shows strong performance in instructional video editing with different editing skills, outperforming the best instructional baseline as a versatile model, while other models struggle with multi-tasking. VEGGIE also excels in video object grounding and reasoning segmentation, where other baselines fail. We further reveal how the multiple tasks help each other and highlight promising applications like zero-shot multimodal instructional and in-context video editing. + +# 1. Introduction + +Building on the advances in Video Diffusion Models (VIDDMs) [2, 4, 24, 25, 74], video editing methods have emerged as video design tools, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 86] have been developed, using triples of text prompts, source videos, and target videos for training. Due to their limited performance in understanding user intent and multimodal semantics [17], several methods have incorporated multimodal large language models (MLLMs) to handle complex instructions/reasoning [17, 29, 75]. + +However, existing methods fall short of the goal of a simple, versatile video concept editor, facing three primary challenges. First, most methods are not end-to-end, requiring intermediate layout/mask/human or model caption guidance [35, 53, 63, 75], which adds workload on users and disrupts a seamless editing experience. Second, existing pipelines connecting MLLMs to VidDMs require multiple training objectives beyond simple pixel-space diffusion loss, such as language loss [75] or mask losses [66]. This increases optimization difficulty and often requires additional hyperparameter tuning or annotations. Third, existing video editing models, both instructional and non-instructional, struggle with handling other diverse editing tasks, ranging from addition, and deletion to stylization. For example, LGVI [66] fails in global edits such as stylization and color change, while VidToMe [41] struggles with local edits such as adding or removing objects. These methods also struggle with input videos that contain multiple objects or when user instructions require complex reasoning. + +These challenges result from two limitations: First, there is a lack of multitasking fine-tuning on well-curated instructional video editing datasets that span a broad range of skills. Second, models often lack two critical capabilities needed to interpret user intentions and accurately locate concepts: multimodal reasoning to infer the intended modification from the user's instruction; and grounding language to the input video to precisely identify the region or object to be edited. For example, in Figure 1, one can effortlessly locate the girl given "identify the little girl." When asked to "add a hat to the little girl," we intuitively imagine the hat placed on her head from commonsense, even without seeing an actual hat. + +To address these challenges, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions. VEGGIE unifies video concept grounding and editing without relying on additional layout, mask guidance, or intermediate caption [35, 39, 42, 75, 78, 79]. Instead, we formulate the problem as end-to-end grounded generation in pixel space, using only a diffusion loss. Specifically, given a video and a text query, VEGGIE first leverages an MLLM to interpret complex instructions, generating frame-wise con + +ditions. Unlike prior methods [43, 75] that use discrete text tokens as conditions, which disconnect the pipeline and block gradient propagation, VEGGIE employs continuous, learnable task query embeddings per frame. This enables end-to-end training and effectively captures grounded task representations for diffusion model conditioning. To handle diverse tasks and accurately interpret complex queries, we employ a curriculum learning strategy that begins by aligning MLLMs with diffusion models using massive paired instructional image editing data and then fine-tuning the model end-to-end on high-quality multitask video data to adapt video. Unlike tool-use methods [16, 35, 75], VEGGIE formulates both video grounding and instructional editing in the same video-to-video task formulation, enabling efficient handling through a unified single model. To further support end-to-end training, we introduce a novel automatic instructional video data generation pipeline that lifts high-quality instructional image editing data into the video domain using image-to-video and video evaluation tools. + +Existing video editing benchmarks do not provide wide and uniform coverage of diverse editing skills [59, 68]. To address this gap, we contribute VEG-Bench, an instructional video editing benchmark that spans 8 editing skills: concept addition, removal, object changing, environment background changing, visual feature changing, stylization, object grounding, and reasoning segmentation. Each skill is evaluated using a dedicated suite of metrics. We assess the proposed VEGGIE alongside 6 baselines on VEG-Bench. VEGGIE demonstrates strong performance across diverse editing skills, outperforming the best instructional baseline as a versatile, all-in-one model, while other models struggle with multi-tasking. Additionally, VEGGIE excels in video object grounding and reasoning segmentation tasks, where other baselines fall short. We show further analysis of how multi-task learning enhances our framework and highlight applications such as zero-shot multimodal instructional following (Fig. 2) and few-shot in-context editing (Fig. 3). Our contributions are summarized as follows: + +- We propose VEGGIE, an end-to-end model that integrates an MLLM and a VidDM. VEGGIE is a versatile framework that handles diverse instructional requests for editing and grounding various video concepts. Unlike existing work that achieves multitasking via tool use, VEGGIE unifies diverse tasks in a single model, thus simplifying the training with only diffusion loss. +- We propose a data synthesis pipeline, scaling high-quality instructional video editing data for future work. +- We propose VEG-Bench, an instructional video editing benchmark that spans 8 editing skills with dedicated metrics for each skill. +- VEGGIE achieves strong performance across diverse editing skills compared with SoTA methods, and shows potentials for multimodal instruction and in-context following. + +![](images/63aee7531a8352ad2237d744edc2c4244b764cdceaf3ac7d28b1d1aaff353c32.jpg) +Reference + +![](images/8ebc41e6884b7f069ed5ee7ac23e1d302d303b3d20ae0c2837436d84bb8719e2.jpg) +Transfer the style in the reference image. +Add the object in the reference image on the women. + +![](images/82fc676d2e8301dd302c138b4fb2e8579575fc1c2873220621aa9287a9876ed6.jpg) +Reference + +![](images/a52aa43228df85914871541a07c98e72868322f9fb8f14dbe66c5c1a9614cc60.jpg) +Figure 2. Multimodal instruction following emerges in VEGGIE, allowing for style transfer or object addition from reference images. + +![](images/15a599d498c84b1cb1fee7c11dacd6d52aee7bacf1257730ce3353e7016fc924.jpg) +Figure 3. In-context editing emerges in VEGGIE, allowing for few-shot learning of editing tasks with paired image demonstrations. + +# 2. Related Work + +Instructional Video Editing Video Diffusion Models (VidDMs) [2, 4, 6, 7, 15, 26, 49, 67, 71], enable high-quality video generation across a wide range of video concepts. Building on these advances, video editing methods have emerged as tools for video design, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 70, 86] have been developed, using triples of video instructions, source videos, and target videos for training. These methods demonstrate limited performance when complex multimodal reasoning is required, as noted by previous research on instructional image editing [17]. Moreover, they struggle with diverse editing tasks, from addition and deletion to stylization. For example, LGVI [66] is primarily designed for removal tasks, while TokenFlow [20] struggles with local edits such as adding, removing, or changing objects. We address this limitation with pixel-level multitasking fine-tuning on well-curated instructional video editing datasets covering various grounding and editing skills. + +Video Grounding and Segmentation Visual grounding requires models to connect language to its corresponding visual concept in the visual context [40, 47]. This is commonly evaluated via the language-guided semantic localization tasks, ranging from simple referring expressions in RefCOCO series [48, 77] and their generalized variant [44] that takes no-target and multi-target into account. Recently, grounded multimodal large lan + +guage models (MLLMs) are trained for object grounding to bounding boxes [8, 51, 52, 76, 82, 87] and segmentation masks [56, 69, 81, 84] using text-image pairs with fine-grained annotations linking phrases to entities. These models unlock the potential of reasoning segmentation [11, 36], bringing language-informed reasoning into semantic segmentation. Instead of using dedicated object detection or segmentation modules, we achieve video grounding through end-to-end training with only diffusion loss. + +# 3. Our Method: VEGGIE + +In this paper, we introduce a Video Editor with Grounded Generation from Instructions (VEGGIE), a unified and versatile generative video model. It combines the complex instruction understanding and reasoning capabilities of MLLMs with the generative capacity of VidDMs. The model is trained end-to-end with diffusion loss only. VEGGIE efficiently handles diverse user inputs, including direct instructions, complex questions requiring in-depth reasoning, and multimodal conditioning. It performs various pixel-level manipulations, enabling tasks such as video concept addition, removal, changing, stylization, grounding, and reasoning segmentation based on user instructions. We elaborate on the model design (Sec. 3.1), training and inference process (Sec. 3.2), and data curation (Sec. 3.3). + +# 3.1. Model Architecture + +VEGGIE consists of four main components (see Fig. 4): (1) a multimodal large language model, (2) a set of learnable + +![](images/a5d3f638a01dbbb52a6cc9b53f3d35262c056bd0b75f46a28b987aa706886c70.jpg) + +![](images/b55dc2a0ef4463e2084d920193d930a31ab28d573d9f0d5ccfef321fdc867476.jpg) +Figure 4. Overview of our proposed end-to-end VEGGIE framework. Our Multimodal Large Language Model first understands input video frames and diverse user instructions, then it generates frame-wise reasoning queries that maintain per-frame editing conditions for the video diffusion model. The video diffusion model will render the MLLM-generated conditions to the pixel space for diverse tasks, including video editing, video grounding, and video reasoning segmentation with questions. We only apply diffusion loss for the whole pipeline training. + +grounded task queries, (3) an alignment network (single-layer MLP) that projects the MLLM output into the condition space of the diffusion model, and (4) a video diffusion model initialized from an instructional image editing model [83]. Our model first generates latent conditions for target video frames by querying multimodal context using an MLLM, then renders these conditions at the pixel level through a video diffusion model, as detailed below. + +MLLM for Generating Grounded Task Guidance. As illustrated in the left of Fig. 4, given a video consisting of a sequence of frames $V = [f_{1}, \dots, f_{n}]$ , where $n$ is the frame number of the given video, a user instruction/question $I$ , our goal is to obtain the response $\widehat{V} = [\widehat{f}_{1}, \dots, \widehat{f}_{n}]$ at pixel space that faithfully reflects user instruction about the given video. The MLLM module processes both the input video $V$ and a user instruction $I$ to generate a sequence of grounded task tokens per frame: $C = [c_{1}, \dots, c_{n}]$ , which are input and output in parallel. These tokens serve as task guidance and implicitly encode the target manipulation, such as object attributes, spatial relationships, or style transfer parameters. The MLLM ensures the model captures both explicit user instructions and implicit reasoning needs. + +VidDM for Rendering MLLM Guidance at Pixel Space. As illustrated in the right of Table 4, the VidDM takes the original video $V$ and the grounded task tokens $C$ as conditions to synthesize the target video $\widehat{V}$ . The original video is concatenated with the noise volume, and the task tokens are input to the cross-attention. With grounded task guidance in denoising steps, the generation process ensures that the output faithfully follows user instructions while preserving the + +video's structure and motion dynamics. Through iterative denoising, it refines each frame while maintaining temporal consistency, applying pixel modifications coherently for a smooth and visually consistent output video $\widehat{V}$ . + +# 3.2. Curriculum Learning from Images to Videos + +Training the model directly on video tasks presents two key challenges: (1) misalignment between MLLM and diffusion model representations, making it difficult for the diffusion model to interpret MLLM-generated task queries with limited fine-tuning data, and (2) the diffusion model's lack of multitasking capability, even for image tasks, due to insufficient training on diverse tasks. Our initial experiments also found the model collapsed when the whole pipeline was directly trained with all data. These challenges/observations underscore the need for pre-alignment between MLLM and the diffusion model to enable seamless adaptation from language-space task queries to pixel-space modifications. To this end, we adopt a two-stage curriculum learning strategy for the proposed VEGGIE framework. + +Stage 1: Aligning Diffusion and Language Spaces. In the first stage, we align the diffusion model with the MLLM using large-scale image-level instructional editing data. The MLLM remains frozen while we update the alignment network, grounded task queries, and diffusion UNet. This process fine-tunes the diffusion model weights to align with the language space, enabling the model to interpret MLLM-generated guidance and translate user instructions into pixel-level edits while preserving the MLLM's strong ability to understand instructions and user intentions. + +
TypeSourceR.E.G.# Img./Vid.# Ins.
VideoROVI [66]4.3K27.4K
VPLM [75]4.3K5.5K
GroundMoRe [12]1.3K5.5K
RVoS [60]1.9K6.1K
MeViS [14]1.5K17.1K
InstructV2V [9]68.3K68.3K
VEG-Edit (Ours)4.0K6.2K
Total136.1K
ImageSeed-Data-Edit [18]3M3M
LISA [37]0.2K1.3K
gRefCoCo [44]13.6K73.4K
PhraseCut [65]310.8K310.8K
EraseDraw [5]64.9K42.4K
MagicBrush [83]9.3K9.3K
SmartEdit [29]0.5K0.9K
Total3438.1K
+ +Table 1. Summary of our data for training. R.: Reasoning, E.: Editing, G.: Grounding. #Img/Vid: the number of images/videos, and #Ins.: the number of instruction-image/video pairs. + +Stage 2: Enhancing Temporal Consistency and Dynamics. With the MLLM and diffusion model aligned, fine-tuning diverse instructional video editing data becomes more effective for improved instruction following at pixel-space including temporal consistency, dynamic coherence, and editing faithfulness. In this stage, we fine-tune the framework with the MLLM, including the alignment network, grounded task queries, and all 3 dimensions in diffusion UNet, end-to-end with carefully curated multitasking instructional video editing data. Following prior work [22, 67], we inflated the 2D UNet from Stage 1 with temporal attention layers for video adaptation. For both stages 1 and 2, we optimize the framework with a single diffusion loss, enabling unified learning for improved instructional video editing performance while maintaining simplicity and efficiency. + +Classifier-Free Guidance during Testing. We employ classifier-free guidance to balance quality and diversity in diffusion-generated samples. Following prior work [3, 17], we apply classifier-free guidance to instructional visual editing considering two conditions: the grounded task tokens and the original video. To obtain unconditional guidance, we set null values $(\varnothing)$ for both task tokens and input video. In this case, our score estimate is: + +$$ +\begin{array}{l} \tilde {e _ {\theta}} (z _ {t}, c _ {T}, c _ {V}) = e _ {\theta} (z _ {t}, \varnothing , \varnothing) \\ + g _ {T} \cdot \left(e _ {\theta} \left(z _ {t}, c _ {V}, c _ {T}\right) - e _ {\theta} \left(z _ {t}, c _ {V}, \varnothing\right)\right) \\ + g _ {V} \cdot \left(e _ {\theta} \left(z _ {t}, c _ {V}, \varnothing\right) - e _ {\theta} \left(z _ {t}, \varnothing , \varnothing\right)\right), \\ \end{array} +$$ + +where $\theta$ represents the model parameters, $C_T$ and $C_V$ denote the task tokens and video conditions, $\varnothing$ is the null value, $z_t$ is the noised latent at timestamp $t$ , and $g_T$ and $g_V$ are the task guidance and video guidance scales, respectively. More training details are included later in Appendix. + +# 3.3. Data Curation Pipeline + +Existing video editing models, both instructional and non-instructional, struggle with diverse editing skills due to the lack of high-quality multitasking fine-tuning data. In this section, we introduce our data curation strategy to support VEGGIE in achieving versatile video editing skills. As listed in Tab. 1, we collect 3.4M image and 133.9K video data from diverse sources to support our VEGGIE curriculum learning as discussed in Sec. 3.2. We create our training dataset from two sources: (1) collecting existing image and video data and converting it into an instructional editing format, and (2) synthesizing new instructional video editing samples using existing datasets and generative models. + +Collecting Diverse Multitask Image and Video Data. We bring together instructional editing data from both image (Seed-Data-Edit [18], MagicBrush [83], EraseDraw [5]) and video (InstructV2V [9], VPLM [75]) sources. These datasets provide pairs of original and edited visual contents with user instructions. The tasks include adding, removing, and changing objects, stylizing, and performing global/local edits. Beyond editing datasets, we incorporate segmentation data at both the image level (gRefCoCo [44] and Phrase-Cut [65]) and the video level (RVoS and MeViS). These segmentation tasks are reformulated as color-filling challenges, which guide the model in learning referring grounding (i.e., understanding which object or region to edit) and strengthen its conceptual learning. To further unlock complex instruction understanding via MLLM, we include data that requires more advanced reasoning and implicit referencing. Specifically, we include: reasoning segmentation (LISA [37]), reasoning editing (SmartEdit [29]), interactive video inpainting (LGVI [66]), and motion-grounded video reasoning (GroundMoRe [12]). These tasks help VEGGIE learn implicit references and reasoning. + +Synthesizing Instructional Video Editing Data via Image-to-Video Animation. Recent methods [3, 54] generate synthetic instructional video-editing data by first creating text instructions with LLM, then getting edited videos via T2V models and prompt-to-prompt editing [23]. While these methods adapt image-based editing pipelines [9] to videos, the generated data suffer from temporal-consistency issues. To address this gap, we propose a novel image-to-video animation strategy that leverages the abundance of high-quality image-level instructional editing datasets [64, 83], which provide well-annotated instructions, paired edited images, and well-organized editing skill categories. As illustrated in Fig. 5, given an original image $I$ , an edited image $\bar{I}$ , and an instruction from an instructional image editing dataset [64], our approach involves three key steps. First, we use an offline MLLM [61, 72] to generate an image caption and an animation prompt that describes plausible motion within the image. Next, an image-to-video (I2V) model animates the image into a video $V$ . Finally, + +![](images/e788f4f4b9713f7aa04e3b0723c0bd508449a3c41f0753126f94891fa627fb93.jpg) +Figure 5. Our data generation pipeline for synthetic instructional video editing data. It injects dynamics into well-constructed instructional image editing datasets via the Image-to-Video (I2V) Model, and generates paired video data for instruction editing. + +we generate the corresponding edited video $\bar{V}$ using a first-frame-conditioned video editing model [35], leveraging $\bar{I}$ as a strong prior to ensure consistent edits across frames. Finally, to ensure data quality, we evaluate each original-edited video pair with automatic video quality evaluation metrics [30], which assess the generated videos from diverse dimensions, e.g., motion smoothness, image quality, and background consistency. This pipeline transforms carefully curated image-based datasets into instructional video-editing resources while preserving the precision of the original edits. As a result, our data method expands the availability of high-quality synthetic video-editing data, supporting a wider range of editing tasks in our end-to-end unified framework. More details on data generation, prompting, examples, and pre/post-processing are in the Appendix. + +# 4. Experiments + +We first introduce the VEG-Bench Benchmark and then demonstrate the superiority of VEGGIE across diverse video instructional editing skills. More experiments, visualization, and implementation details are in the Appendix. + +# 4.1. VEG-Bench and Metrics + +As no existing benchmark is designed for fine-grained instructional video editing skills, we manually collect and annotate VEG-Bench, containing 132 video-instruction pairs that balanced cover 8 different video generative skills (15-20 for each). Beyond standard metrics, including text-to-video alignment (CLIP-Text [55]), video smoothness (CLIP-F [55]), and image quality (MUSIQ [32]), we also + +first introduce MLLM-as-a-Judge to give a holistic evaluation score according to the given original video, edited video, and user instruction. It is achieved by prompting GPT4o [21] to evaluate whether the requested semantic change has been fulfilled, using a scale from 1 to 10. For addition and removal, we also introduce an object detector (GroundingDiNo [45]) to detect if the object is added/removed faithfully. For grounding and reasoning segmentation, we following video grounding tasks [12, 14, 33, 58] and adopt the Jaccard index $(\mathcal{I})$ [31], F-measure $(\mathcal{F})$ [13], and their mean $(\mathcal{J} \& \mathcal{F})$ . We also compute SSIM between the generated video and the original video masked with GT masks. More evaluation/metrics details are included in Appendix. + +# 4.2. Experimental Results + +Instructional Video Editing over Diverse Skills. As shown in Tab. 2, we evaluate 7 different models on VEG-Bench across 8 distinct editing skills. Overall, VEGGIE demonstrates the best performance among instructional video editing models. Compared to VEGGIE, non-instructional models often struggle with concept removal and addition. This limitation arises because these models rely on attention control or additional conditions (e.g., depth maps) that impose strong priors, constraining the model and making object addition or removal challenging. We also observe that InsV2V achieves high scores in quality and smoothness metrics, but underperforms in alignment and MLLM judgment, which demand faithful semantic changes. Qualitative examples in Fig. 6 illustrate that InsV2V often makes minimal changes to the input video, resulting in high video quality but unfaithful outputs. In contrast, VEGGIE strikes a better balance, delivering both high-quality visuals and accurate semantic alignment with the intended edits. + +Can Multi-Task Help Each Other? To test the previous hypothesis, we train our model on the VPLM [75] dataset, which includes paired grounding and removal tasks (approximately 5.5K samples for each task). We focus on these tasks as representative examples due to their straightforward evaluation against ground truth. As shown in Table 3, multitask training yields a lower FVD score and a higher SSIM score, demonstrating that learning to locate and remove a video concept can mutually reinforce performance. We show an example in Fig. 7. However, this conclusion only holds with a balanced data combination. We also observe that an excessive amount of grounding data can introduce more artifacts and negatively impact visual editing skills. + +Emergent Zero-shot Multimodal Instruction Following. We also highlight the emergent behavior of VEGGIE on multi-modal instruction following, even without dedicated training data for this specific editing instruction. Notably, VEGGIE demonstrates the ability to perform zero-shot multimodal instructional video editing. As illustrated in Fig. 2, VEGGIE can transfer styles or add objects from a reference + +![](images/769f5b258ad2a40f74015e9fe4b7b6b606d38b4bc700fb57fbc9df38e8c93b20.jpg) +[Addition] Please add a ball in the given video frames. + +![](images/00f32512d007bed96281648f160b4774880b37f5f9ff4dfdf7bdfebeb8280ea6.jpg) +[Removal] Please remove the man in black in given video frames. + +![](images/d5a4bed054201a04c482dc9e52feec17fb85da977718e3c6b1ff3aedd5e32725.jpg) +[Swap] Replace golden building with a white mountain. + +![](images/fc7eb7072f3ac10455a05af86de057f09bf8de77d3070a1df4c5cdecfb45e595.jpg) +[Environment] Make it on the beach. + +![](images/34b49ae75a067c46555833a6dcf6b163d985c2eb0b4af622d4cafc6f1c4f5a16.jpg) +[Color] Make the swan white. + +![](images/cedd9c5e501fb9c67b83bffd4cdbec3face648d9021835024e06f5277e66ca55.jpg) +[Texture] Make the rhinocero furry. + +![](images/d119609c69d9e8f8e016ca37c53edbeabac37f9d31df9ffab230fed4f1bd87fa.jpg) +[Style] Make it chinese ink style. +Figure 6. Qualitative comparison of editing results across 8 different abilities (splitting visual features into color and texture). We provide zoom-in details for a more detailed comparison. Best viewed in color. More in Appendix. + +![](images/ece2036c5a6eb87cd9ef520c69ff7ac773f46d71db767926851aa27e027f4caa.jpg) +[Grounding] Could you locate the knife + +![](images/7fa0623cc60d54f9ad0f0fcf8df5957362b4f09b3621f95288c1e4201f7170bf.jpg) +[Reasoning] What can be used for heating food? + +
Methods +GeneratorNon-Instructional Editing ModelInstructional Editing Model
VidToMe [41] (SD1.5)TokenFlow [20] (SD2.1)Flatten [10] (SD2.1)InstructDiff [19] (SD1.5)LGVI [66] (SD1.5)InsV2V [9] (SD1.5)VEGGIE (Ours) (SD1.5)
Concept Addition
MLLM-Judge (↑)5.005.806.627.262.735.697.44
Alignment (↑)27.8029.3028.2228.1025.0628.2729.27
Smoothness (↑)96.7997.2695.7493.6696.0996.9494.93
Quality (↑)62.8165.6255.4554.0841.5956.2461.31
Detection (↑)47.9849.5349.7455.3614.4248.0157.96
Concept Removal
MLLM-Judge (↑)2.603.734.466.126.592.785.07
Alignment (↑)75.0175.9978.4075.5175.6774.4175.63
Smoothness (↑)96.1396.4795.8291.8397.0396.9995.04
Quality (↑)66.3271.5250.7755.0842.3158.7950.99
Detection (↑)34.3155.1670.9164.8178.4025.6470.22
Object Changing
MLLM-Judge (↑)5.006.537.377.002.066.606.63
Alignment (↑)25.6928.7627.0627.3622.1726.6027.77
Smoothness (↑)96.2397.2196.1392.0795.6696.7495.44
Quality (↑)64.0669.9759.3755.0138.2060.9058.15
Environment & Background Changing
MLLM-Judge (↑)5.817.357.376.052.376.607.18
Alignment (↑)28.1730.0030.0428.0321.9428.2729.15
Smoothness (↑)95.7696.9695.9089.8595.6696.0394.58
Quality (↑)61.9567.0654.5853.0638.9754.9454.25
Visual Feature Changing (Color & Texture)
MLLM-Judge (↑)5.866.856.606.432.147.537.33
Alignment (↑)27.9929.2529.4627.5423.1828.8828.69
Smoothness (↑)95.9397.1095.8391.7194.7596.6694.52
Quality (↑)65.8069.3153.3258.2936.2759.3657.91
Stylization
MLLM-Judge (↑)7.237.628.317.413.718.078.26
Alignment (↑)29.8430.2529.0027.7422.8029.1429.38
Smoothness (↑)96.3197.2396.7188.9795.6296.5095.69
Quality (↑)64.0568.2253.1854.1535.7662.5957.00
Object Grounding
SSIM (↑)40.4750.4647.2137.9866.8449.6570.90
Jaccard Index J (↑)13.8519.2925.6219.881.5213.8937.74
F-measure F (↑)15.5016.8617.6012.813.0717.3721.83
Reasoning Segmentation
SSIM (↑)---32.3944.4759.8668.41
Jaccard Index J (↑)---14.0210.1216.8922.53
F-measure F (↑)---8.079.0610.4515.97
Avg. Ranking2.611.411.963.003.212.001.78
+ +Table 2. Comparison of video editing task with instructional / non-instructional models on VEG-Bench. -: the task is not capable of non-instructional models. We gray out numbers of non-instructional models that are in different categories. + +
SettingsRemoval (FVD ↓)Grounding (SSIM ↑)
Grd.-only-52.34
Rmv.-only1098.52-
Mixed987.8055.21
+ +Table 3. An ablation study on whether multi-task learning provides transferable benefits that enhance performance across tasks. We focus on removal and grounding tasks as representative examples. + +![](images/26fbd8a917fd53e373c29389d3c2a59e6baaf3dc3521674392c00f0a449b214f.jpg) +Figure 7. Comparison between single- and multi-skill models with different data training. We find tasks can help each other. + +image into the input video based on instructions. + +Emergent Few-shot In-Context Editing. As shown in Fig. 3, VEGGIE can effectively utilize a few example image pairs to transfer the intended editing changes seamlessly to the input video. We observe that VEGGIE exhibits in-context learning for image editing without the need for language instructions. Instead, it uses image pairs as examples to infer and apply the desired editing intention directly. + +# 5. Conclusion + +We present VEGGIE, a unified end-to-end model for instructional video editing that handles diverse pixel-level tasks. VEGGIE leverages MLLM for robust instruction understanding and employs a video diffusion model to execute pixel-level edits. Our framework uses a single diffusion loss for end-to-end optimization across varied tasks/skills. We also introduce a novel synthetic data generation pipeline and VEG-Bench, a benchmark that assesses a broad range of editing skills. Our VEGGIE outperforms previous methods as a versatile, all-in-one solution. We hope our model, data, and benchmark to advance research on instructional generative video models. + +# References + +[1] Zechen Bai, Tong He, Haiyang Mei, Pichao Wang, Ziteng Gao, Joya Chen, Lei Liu, Zheng Zhang, and Mike Zheng Shou. One token to seg them all: Language instructed reasoning segmentation in videos. arXiv preprint arXiv:2409.19603, 2024. 2 +[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 2, 3 +[3] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18392-18402, 2023. 5 +[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. Video generation models as world simulators. OpenAI, https://openai.com/research/video-generation-models-as-world-simulators, 2024.2.3 +[5] Alper Canberk, Maksym Bondarenko, Ege Ozguroglu, Ruoshi Liu, and Carl Vondrick. Erasedraw: Learning to insert objects by erasing them from images. arXiv preprint arXiv:2409.00522, 2024. 5 +[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23206-23217, 2023. 3 +[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23040-23050, 2023. 3 +[8] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 3 +[9] Jiaxin Cheng, Tianjun Xiao, and Tong He. Consistent video-to-video transfer using synthetic dataset. In The Twelfth International Conference on Learning Representations, 2024. 5, 8, 1, 2 +[10] Yuren Cong, Mengmeng Xu, christian simon, Shoufa Chen, Jiawei Ren, Yanping Xie, Juan-Manuel Perez-Rua, Bodo Rosenhahn, Tao Xiang, and Sen He. FLATTEN: optical FLOW-guided ATTENtion for consistent text-to-video editing. In The Twelfth International Conference on Learning Representations, 2024. 8, 1 +[11] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Mohit Bansal, and Chen Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. arXiv preprint arXiv:2411.09921, 2024. 3 +[12] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Bansal Mohit, and Chen. Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. 2024. 5, 6, 2 + +[13] Lee R Dice. Measures of the amount of ecologic association between species. Ecology, 26(3):297-302, 1945. 6 +[14] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In ICCV, pages 2694-2703, 2023. 5, 6 +[15] Patrick Esser, Johnathan Chiu, Parmida Atighehchian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In CVPR, pages 7346-7356, 2023. 3 +[16] Hao Fei, Shengqiong Wu, Hanwang Zhang, Tat-Seng Chua, and Shuicheng Yan. Vitron: A unified pixel-level vision llm for understanding, generating, segmenting, editing, 2024. 2 +[17] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. arXiv preprint arXiv:2309.17102, 2023. 2, 3, 5 +[18] Yuying Ge, Sijie Zhao, Chen Li, Yixiao Ge, and Ying Shan. Seed-data-edit technical report: A hybrid dataset for instructional image editing. arXiv preprint arXiv:2405.04007, 2024. 5 +[19] Zigang Geng, Binxin Yang, Tiankai Hang, Chen Li, Shuyang Gu, Ting Zhang, Jianmin Bao, Zheng Zhang, Houqiang Li, Han Hu, et al. Instructdiffusion: A generalist modeling interface for vision tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12709-12720, 2024. 8, 1, 2 +[20] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 2, 3, 8, 1 +[21] gpt 4o. https://openai.com/index/hello-gpt-4o/.2024.6 +[22] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 5, 1 +[23] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022.5 +[24] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2 +[25] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Advances in Neural Information Processing Systems, 2022. 2 +[26] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. In The Eleventh International Conference on Learning Representations, 2023. 3 +[27] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. ICLR, 1(2):3, 2022. 1 + +[28] Jiahao Hu, Tianxiong Zhong, Xuebo Wang, Boyuan Jiang, Xingye Tian, Fei Yang, Pengfei Wan, and Di Zhang. Vivid-10m: A dataset and baseline for versatile and interactive video local editing. arXiv preprint arXiv:2411.15260, 2024. 2 +[29] Yuzhou Huang, Liangbin Xie, Xintao Wang, Ziyang Yuan, Xiaodong Cun, Yixiao Ge, Jiantao Zhou, Chao Dong, Rui Huang, Ruimao Zhang, et al. Smartedit: Exploring complex instruction-based image editing with multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8362-8371, 2024. 2, 5 +[30] Ziqi Huang, Yinan He, Jiashuo Yu, Fan Zhang, Chenyang Si, Yuming Jiang, Yuanhan Zhang, Tianxing Wu, Qingyang Jin, Nattapol Chanpaisit, et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21807-21818, 2024. 6, 2 +[31] Paul Jaccard. The distribution of the flora in the alpine zone. 1. New phytologist, 11(2):37-50, 1912. 6 +[32] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 6 +[33] Anna Khoreva, Anna Rohrbach, and Bernt Schiele. Video object segmentation with language referring expressions. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part IV 14, pages 123–141. Springer, 2019. 6 +[34] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2 +[35] Max Ku, Cong Wei, Weiming Ren, Harry Yang, and Wenhu Chen. Anyv2v: A tuning-free framework for any video-to-video editing tasks. arXiv preprint arXiv:2403.14468, 2024. 2, 6 +[36] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023. 3 +[37] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024. 5 +[38] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 1 +[39] Jialu Li, Shoubin Yu, Han Lin, Jaemin Cho, Jaehong Yoon, and Mohit Bansal. Training-free guidance in text-to-video generation via multimodal planning and structured noise initialization. arXiv preprint arXiv:2504.08641, 2025. 2 +[40] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language + +image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 3 +[41] Xirui Li, Chao Ma, Xiaokang Yang, and Ming-Hsuan Yang. Vidthome: Video token merging for zero-shot video editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7486-7495, 2024. 2, 8, 1 +[42] Long Lian, Baifeng Shi, Adam Yala, Trevor Darrell, and Boyi Li. Llm-grounded video diffusion models. arXiv preprint arXiv:2309.17444, 2023. 2 +[43] Han Lin, Abhay Zala, Jaemin Cho, and Mohit Bansal. Videodirectorgpt: Consistent multi-scene video generation via llm-guided planning. arXiv preprint arXiv:2309.15091, 2023. 2 +[44] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023. 3, 5 +[45] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. 6 +[46] Shaoteng Liu, Tianyu Wang, Jui-Hsien Wang, Qing Liu, Zhifei Zhang, Joon-Young Lee, Yijun Li, Bei Yu, Zhe Lin, Soo Ye Kim, et al. Generative video propagation. arXiv preprint arXiv:2412.19761, 2024. 2 +[47] Ziqiao Ma, Jiayi Pan, and Joyce Chai. World-to-words: Grounded open vocabulary acquisition through fast mapping in vision-language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 524–544, 2023. 3 +[48] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11-20, 2016. 3 +[49] Kangfu Mei and Vishal Patel. Vidm: Video implicit diffusion models. In Proceedings of the AAAI conference on artificial intelligence, pages 9117-9125, 2023. 3 +[50] Bo Miao, Mohammed Bennamoun, Yongsheng Gao, Mubarak Shah, and Ajmal Mian. Towards temporally consistent referring video object segmentation. https://arxiv.org/abs/2403.19407, 2024. 2 +[51] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Qixiang Ye, and Furu Wei. Grounding multimodal large language models to the world. In The Twelfth International Conference on Learning Representations, 2024. 3 +[52] Renjie Pi, Jiahui Gao, Shizhe Diao, Rui Pan, Hanze Dong, Jipeng Zhang, Lewei Yao, Jianhua Han, Hang Xu, Lingpeng Kong, et al. Detgpt: Detect what you need via reasoning. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14172-14189, 2023. 3 +[53] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing + +attentions for zero-shot text-based video editing. In ICCV, pages 15932-15942, 2023. 2, 3 +[54] Bosheng Qin, Juncheng Li, Siliang Tang, Tat-Seng Chua, and Yueting Zhuang. Instructvid2vid: Controllable video editing with natural language instructions. In 2024 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2024. 2, 3, 5 +[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 6 +[56] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Erix Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3 +[57] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1 +[58] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020. 6 +[59] Shangkun Sun, Xiaoyu Liang, Songlin Fan, Wenxu Gao, and Wei Gao. Ve-bench: Subjective-aligned benchmark suite for text-driven video editing quality assessment. In Proceedings of the AAAI Conference on Artificial Intelligence, 2025. 2 +[60] Carles Ventura, Miriam Bellver, Andreu Girbau, Amaia Salvador, Ferran Marques, and Xavier Giro-i Nieto. Rvos: End-to-end recurrent network for video object segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5277-5286, 2019. 5 +[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5, 2 +[62] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 2, 3 +[63] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal IIm as an agent for unified image generation and editing. arXiv preprint arXiv:2407.05600, 2024. 2 +[64] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image edit + +ing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 5, 2 +[65] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasecut: Language-based image segmentation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10216-10225, 2020. 5 +[66] Jianzong Wu, Xiangtai Li, Chenyang Si, Shangchen Zhou, Jingkang Yang, Jiangning Zhang, Yining Li, Kai Chen, Yunhai Tong, Ziwei Liu, et al. Towards language-driven video inpainting via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12501-12511, 2024. 2, 3, 5, 8, 1 +[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 3, 5 +[68] Jay Zhangjie Wu, Xiuyu Li, Difei Gao, Zhen Dong, Jinbin Bai, Aishani Singh, Xiaoyu Xiang, Youzeng Li, Zuwei Huang, Yuanxi Sun, et al. Cvpr 2023 text guided video editing competition. arXiv preprint arXiv:2310.16003, 2023. 2 +[69] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3 +[70] Zhen Xing, Qi Dai, Zihao Zhang, Hui Zhang, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Vidiff: Translating videos via multi-modal instructions with diffusion models. arXiv preprint arXiv:2311.18837, 2023. 3 +[71] Wilson Yan, Yunzhi Zhang, Pieter Abbeel, and Aravind Srinivas. Videogpt: Video generation using vq-vae and transformers. arXiv preprint arXiv:2104.10157, 2021. 3 +[72] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 5 +[73] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 1 +[74] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 2 + +[75] Jaehong Yoon, Shoubin Yu, and Mohit Bansal. Raccoon: Remove, add, and change video content with auto-generated narratives. arXiv preprint arXiv:2405.18406, 2024. 2, 5, 6 +[76] Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. In The Twelfth International Conference on Learning Representations, 2023. 3 +[77] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016. 3 +[78] Shoubin Yu, Jacob Zhiyuan Fang, Jian Zheng, Gunnar Sigurdsson, Vicente Ordonez, Robinson Piramuthu, and Mohit Bansal. Zero-shot controllable image-to-video animation via motion decomposition. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 3332-3341, 2024. 2 +[79] Tao Yu, Runseng Feng, Ruoyu Feng, Jinming Liu, Xin Jin, Wenjun Zeng, and Zhibo Chen. Inpaint anything: Segment anything meets image inpainting. arXiv preprint arXiv:2304.06790, 2023. 2, 3 +[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 1 +[81] Hao Zhang, Hongyang Li, Feng Li, Tianhe Ren, Xueyan Zou, Shilong Liu, Shijia Huang, Jianfeng Gao, Lei Zhang, Chunyuan Li, et al. Llava-grounding: Grounded visual chat with large multimodal models. arXiv preprint arXiv:2312.02949, 2023. 3 +[82] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 3 +[83] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. Advances in Neural Information Processing Systems, 36, 2024. 4, 5, 1 +[84] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3 +[85] Zhixing Zhang, Bichen Wu, Xiaoyan Wang, Yaqiao Luo, Luxin Zhang, Yinan Zhao, Peter Vajda, Dimitris Metaxas, and Licheng Yu. Avid: Any-length video inpainting with diffusion model. arXiv preprint arXiv:2312.03816, 2023. 2, 3 +[86] Zhenghao Zhang, Zuozhuo Dai, Long Qin, and Weizhi Wang. Effived: Efficient video editing via text-instruction diffusion models. arXiv preprint arXiv:2403.11568, 2024. 2, 3 +[87] Yang Zhao, Zhijie Lin, Daquan Zhou, Zilong Huang, Jiashi Feng, and Bingyi Kang. Bubogpt: Enabling visual grounding in multi-modal llms. arXiv preprint arXiv:2307.08581, 2023. 3 + +# VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation + +Supplementary Material + +# 6. Appendix + +In this Appendix, we provide extra details on + +- Implementation details of VEGGIE training, and evaluation and baseline evaluations. +- Extra details on our data generation pipeline, including each module's details, prompts for each promptable module, data filtering, and visualization. +- Extra visualizations for each task and the comparison with the other 6 strong baseline models. +- Limitation and future work discussion. + +# 6.1. Implementation Details + +Model Architecture. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. It is a strong MLLM consisting of Qwen2 [73] LLM with 32K context window, SigLIP [80] visual encoder, and a 2-layer-MLP projector. LLaVA-OV can handle diverse visual-language tasks (including interleaved-frame, video). It provides a good starting point for our VEGGIE to understand complex user instructions and can respond with multiple frame-wise implicit planning thanks to its long context window. Our video diffusion model is initialized from the instructional image editing model, MagicBrush [83]. We further inflated 2D convolution layers to 3D form and inserted temporal attention layers followingAnimateDiff [22] to adapt videos. Our alignment network is a single-layer MLP. We set 32 grounded task tokens for each frame. + +Training Details. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. Our VidDM is initialized from the instructional image editing model, MagicBrush [83] with Stable Diffusion v1.5 backbone [57]. We further inflated 2D convolution layers with temporal attention layers, followingAnimateDiff [22] to adapt videos. Our VEGGIE adopts a 2-stage curriculum training strategy (Sec. 3.2). In the first stage, we fully fine-tune the 2D convolution layers in the UNet, the alignment network, and the task query tokens in the MLLM on image data, with 862M trainable parameters. In the second stage, we train all 3 dimensions in the UNet, the alignment network, the task query tokens, and a LoRA in the MLLM, leading to 1.3B trainable parameters. Both stages are trained end-to-end with only a diffusion loss. More details are in the Appendix. + +We keep the VAE encoder and decoder frozen during the entire training process. In the first stage, we keep the MLLM (including visual encoder, MLP projector, and LLM) frozen, and fully fine-tune learnable grounded task queries, + +alignment network, and diffusion model, leading to around 800M training parameters. We set $1e^{-4}$ learning rate, and 96 batch size on each GPU. We use 16 A100 GPUs for the first stage of fine-tuning with 25K steps. In the second stage, we insert LoRA [27] modules into the LLM backbone, and inflate diffusion models by inserting extra temporal layers as inAnimateDiff [22]. We fine-tune LoRA, alignment network, learnable grounded task query tokens, and the diffusion model, leading to around 1.3B trainable parameters. We set $5e^{-4}$ learning rate, and 1 batch size with 8 gradient accumulation steps on 32 A100 GPUs. For LoRA, we set lora rank 64, lora alpha 16, and lora dropout 0.05. We train the second stage video model 2.5K step with 8 uniformly sampled frames. + +Evaluation and Baseline Details. We primarily compare our model with strong instructional editing models [9, 19, 66]. Additionally, we include non-instructional editing models [10, 20, 41] for completeness, although these are not fair baselines since they are not end-to-end and rely on additional conditions, such as depth maps or intermediate captions. + +We randomly sample 3 seeds for both our method and baseline methods. In our experiments, we use different classifier-free guidance scores ( $g_{T}$ and $g_{V}$ in Sec. 3.2) for different skills. Specifically, we set $g_{T} = 14.5$ and $g_{V} = 1.5$ for grounding and reasoning segmentation, while for other editing skills, we use $g_{T} = 10.5$ and $g_{V} = 2.0$ . + +For baseline methods, we adopt their default settings (e.g., diffusion steps, guidance scores, frame numbers) as provided in their GitHub repositories. To ensure fair evaluation, we sample the same eight frames from each method's video editing results. + +For alignment and smoothness metrics, we use CLIP-B/32 to measure text-image and image-image similarity, averaging across all frames to obtain video-level scores. For detection metrics, we use GroundingDINO (Swin-T OGC) to detect target objects frame by frame, averaging confidence scores across all frames for the final video-level metric. + +For the removal task, where fewer detected objects and lower alignment with the original text prompt are desired, we compute alignment and detection metrics as $1 -$ value. + +We compare the model judged best for each video sample. The agreement between human and MLLM judgments is 0.74, whereas the agreement between human and CLIP is only 0.45. We conducted 5 times of the MLLM evaluation and took an average. + +
MethodsGroundingReasoning
JFJ&FJFJ&F
Segmentation Models
HTR [50]47.1147.6047.3520.0128.0224.01
VideoLISA [1]53.2354.3753.8038.4839.2038.84
MoRA [12]57.7353.6355.6838.9237.4840.36
Generative Editing Models
InstructDiff [19]19.8812.8116.3514.028.0711.05
InsV2V [9]13.8917.3715.6316.8910.4513.67
VEGGIE (Ours)37.7421.8329.7922.5315.9719.25
+ +Table 4. Comparison of video concept grounding and reasoning segmentation tasks with other instructional generative models and expert segmentation models. + +# 6.2. Data Collection Details + +As mentioned in the earlier Sec. 3.3, beyond collecting existing data, we proposed a novel data synthesis pipeline to generate instructional video data by animating images in the instructional image dataset. + +Specifically, we first select images from Omni-Edit [64], an instructional image editing dataset with carefully designed tasks/skills. + +We first use QWen2-VL [61] to caption the original image and give an animation prompt to animate the image via CogVideX1.5-I2V [74]. Please refer Tab. 5 and Tab. 6 to our prompt for caption and animation. After getting the animated video, we utilize AnyV2V [35] to edit the video based on the reference image (edited image from image dataset). The reference image gives a strong prior to maintaining the image dataset's high-quality edit and thus transfer it to the video via the video editing model. + +Next, we filter out videos by evaluating VBenchmark metrics [30], including aesthetic quality, motion smoothness, image quality, subject consistency, and background consistency. We set thresholds at 0.6 for aesthetic quality, 65 for imaging quality, 0.9 for motion smoothness, subject consistency, and background consistency. We provide our generated data visualization in Fig. 9. + +# 6.3. More Quantative Results & Discussion + +Video Concept Grounding & Reasoning Segmentation We include additional results on video concept grounding and reasoning segmentation in Tab. 4. VEGGIE outperforms the diffusion-based baseline by a significant margin, showcasing its superior ability to accurately locate fine-grained object references and handle complex reasoning tasks. We hypothesize that through grounded generation, VEGGIE demonstrates remarkable precision in concept editing. For example, as shown in Fig. 11 in the Appendix, VEGGIE can remove the woman without altering the nearby girl. + +![](images/f3c07174590d1a47445555b05acfd26b07ef6ca5174fecceef5bc55c8bdd1138.jpg) +Figure 8. t-SNE Visualization of different task query distribution. Different colors represent different tasks/skills. Best view in color. + +# 6.4. Limitation and Future Works + +Our current method, VEGGIE, is built upon Stable-Diffusion 1.5, which inevitably constrains its editing quality compared to cutting-edge video generation models that rely on DiT or flow-based architectures. In addition, the video outputs we produce are relatively short, lagging behind some recent state-of-the-art methods in terms of length and temporal consistency. Furthermore, we observe increased editing artifacts when incorporating large amounts of grounding data, suggesting that multi-task data mixture strategies play a key role in maintaining high-quality edits. + +Despite these limitations, our results demonstrate promising directions for improvement in terms of model design, data curation, and evaluation. Future work could explore integrating more advanced base architectures (e.g., DiT [34, 74] or flow-based models), extending the maximum video duration, developing more systematic data [28] with more advanced method [46] and carefully designed mixture strategies to balance fidelity and flexibility, and conducting scalable training. We hope our findings will inspire further research into these directions, pushing the boundaries of instructional video editing performance. + +Task Query Visualization & Analysis via t-SNE. To analyze task/skill correlations, we project their grounded queries into lower-dimensional spaces using PCA and t-SNE. As shown in Fig. 8, distinct clusters form for each category (e.g., Addition), indicating effective differentiation by the model. Reasoning and Grounding appear together on the right. It may be because they both require cognitive/semantic understanding or logical reference. Color, Env, and Change clusters are closer to each other, indicating that the model views them as similar operations focusing on changing different visual attributes. Style lies in the lower-left region but remains relatively close to Color, Env, and Change. This proximity may reflect that "stylization" is conceptually similar to these visual attribute tasks, although it targets different + +Table 5. Qwen2-VL prompt for Image caption. + +
Please describe this image shortly, try to capture main details in the image. +Here are some examples of image caption styles: +1. A Couple In A Public Display Of Affection +2. A kitten turning its head on a wooden floor +3. An Old Man Doing Exercises For The Body And Mind +4. Man Walking +Now, please describe the given image briefly in one sentence, please do not say something like 'The image shows...' or 'The image depicts...'
+ +transformations. Removal stands apart on the top, especially distant from Addition, indicating the model perceives them as distinct rather than inverse operations. In contrast, Addition lies closer to tasks like Reasoning and Grounding. It suggests that the act of adding elements may rely on similar semantic or referential processes (e.g., deciding what to add and how to reference the newly added element). + +# 6.5. Extra Visualization + +We provide extra visualization in Figs. 10 to 16 + +![](images/8e960bad5a16bce703b5fcb70a569c7d16770762185ba1588063f1036611943d.jpg) + +![](images/735c86bcdb4d0957b6364d6294b5509271fe312247cae910fd452e2351e8cbed.jpg) + +![](images/df0f11d18bdfdc3b9c21b6fa529d5ccddccb0ca06469997d62359d8a0bfa75fb.jpg) + +![](images/b679e36fe777360ebc2be8336cb67c250d84b550eb22f8ebeb7bb187463a7703.jpg) +Instruction: transform the setting to a snowy scene + +![](images/f3bc012cf2e6a9d62134a6b3ee005b89853723b1c73534b08c36befb6f9ba667.jpg) + +![](images/96751fe61944764a81c1e8306ccadab6aa99b31163bed4354804d679da1bd518.jpg) +Figure 9. Examples of our generated instructional video editing data. + +Table 6. Qwen2-VL prompt for generating animation prompt. + +I want to animate this image using an Image-Text-to-Video model. Your task is to generate a detailed and reasonable text prompt that describes how the image should be animated. + +Guidelines: + +1. Clarity & Realism - The animation description should be logical based on the given image, ensuring the movement makes sense for the scene. +2. Short & Vivid Description - Use expressive language to guide the animation model effectively, ensuring high-quality and visually engaging results. + +Ensure that your animation prompt aligns with the content of the provided image and describes a visually compelling motion sequence. + +Do not output animation prompts that contain objects/scenes not included in the given image. + +Make sure the prompt is short in 1-2 sentences. + +![](images/cff8fe39e7c04b9e27b42c5b598f301ac5e411f00152bdff7bf3bb1b67d1da5a.jpg) +Figure 10. More Examples of Concept Addition. + +Table 7. GPT-4o prompt for MLLM-as-a-Judge for automatic instructional video editing evaluation. + +# User + +You are an evaluator for instructional video editing tasks. Your job is to assess how well the edited video fulfills the user's specific instructions. + +I will provide: + +1. The original video (first GIF) +2. The edited video (second GIF) +3. The user's instruction: [user instruction] + +Please evaluate the editing result using the following format: + +INSTRUCTION: [Repeat the user's instruction] + +EVALUATION: + +- Accuracy score (1-10): [Your score] +- Quality score (1-10): [Your score] +- Appropriateness score (1-10): [Your score] +- Overall score (1-10): [Your final score] + +EXPLANATION: [Provide a brief justification for your scores, highlighting specific strengths and weaknesses of the edit] + +RECOMMENDATION: [Optional suggestions for improvement] + +When scoring, consider: + +- Accuracy: Does the edit precisely follow the given instruction? - Quality: Is the edit visually seamless and natural-looking? - Appropriateness: Does the edit maintain coherence with the original video context? + +The overall scale is: + +1-3: Poor - Major issues with the edit +4-6: Acceptable - Follows instruction but with noticeable flaws +7-8: Good - Clear, effective edit with minor issues +9-10: Excellent - Flawless execution of the instruction + +# Assistant + +Scores, Explanation, Recommendation + +![](images/898b5c247cabf3586adb7f29b21e553779aa2d29ee0be6fa5155a19233834d09.jpg) +Figure 11. More Examples of Concept Removal. + +![](images/a2d9cf4b40f420ebc7ee5761705d95f8a45f39fffee54758e9f4300b770235fa.jpg) + +![](images/dff3a5c584a585ae6935604dbf379d8dda7efa447ac5855c81317330be2c93ed.jpg) + +![](images/3f4b79d95de3df3c6f13c06609009e729bf430bbafcfbde1a4c1fa43b302c53d.jpg) +Figure 12. More Examples of Object Changes. + +![](images/c8db04c3c4081c16c71effca6b95f22414b3b755bb96289cd405d314e86121e6.jpg) + +![](images/f34bb0162cf39f27cfe5a820676923452b4f9c7ec312b17e8153f0754ef01982.jpg) + +![](images/65f4482a764a319b6cd11cf875fc5fe37281b580a14bcf9f6fbdb6669e30fcb8.jpg) +Figure 13. More Examples of Stylization. + +![](images/859b2b54f7e4ea3e5f151b40abda2a47c11a5e041a6aaf089e7c57ae94b7af8c.jpg) + +![](images/eb5bbecd6f86f818c07a1fe8931a93f16ea43d8ea49eec4634c2689b86050898.jpg) + +![](images/b124f1339966a72d4c67b2ab4b76301ff8a7af2fc60b52d2755cbe999b6fb7fa.jpg) +Figure 14. More Examples of Environment and Background Editing. + +![](images/b8d1ff15c2cfbed82a207580b53d1b76e1ed13920065e6313097883e59d67805.jpg) + +![](images/eddea6f13444e5092d3c40d6739d4fdaba2cf2dc9765b24e0c382a4a6bc599e4.jpg) + +![](images/42d74fb3a6be2224e7ee95810b48350e42a1b56d9e77e28004e90ee22ded0ed0.jpg) +Figure 15. More Examples of Visual Features Editing. + +![](images/4328af1b9cf4afc2d45f60b901b77275b097f605ff43bd121e485851d5fd828d.jpg) +Figure 16. More Examples of Object Grounding. + +![](images/41ce929097f8588a945d953564c371ef38f463eb2eb718e38767cc9dce0eee6b.jpg) +Figure 17. More Examples of Object Reasoning Segmentation. \ No newline at end of file diff --git a/data/2025/2503_14xxx/2503.14350/images/00f32512d007bed96281648f160b4774880b37f5f9ff4dfdf7bdfebeb8280ea6.jpg b/data/2025/2503_14xxx/2503.14350/images/00f32512d007bed96281648f160b4774880b37f5f9ff4dfdf7bdfebeb8280ea6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9987112c19c71d30a4588b876dce5a7439ea9367 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/00f32512d007bed96281648f160b4774880b37f5f9ff4dfdf7bdfebeb8280ea6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e901ae4909fb72ab24be18983e191f721330fad3a00eb4bde9208614dd012efe +size 60895 diff --git a/data/2025/2503_14xxx/2503.14350/images/06c86a484814fdc576446edcb73b36614248174c9f2efbcb21db237e6b677342.jpg b/data/2025/2503_14xxx/2503.14350/images/06c86a484814fdc576446edcb73b36614248174c9f2efbcb21db237e6b677342.jpg new file mode 100644 index 0000000000000000000000000000000000000000..304c56c8bfcae742293dd571b2fd021489f29c72 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/06c86a484814fdc576446edcb73b36614248174c9f2efbcb21db237e6b677342.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80fd471fe7a50d6d643638bea0d118f11272f8959e37436adb390e885f7fcee3 +size 65292 diff --git a/data/2025/2503_14xxx/2503.14350/images/08127c4d973e01c341e464b900f712f62c230d2e6ec89cf2da94db90ac35387d.jpg b/data/2025/2503_14xxx/2503.14350/images/08127c4d973e01c341e464b900f712f62c230d2e6ec89cf2da94db90ac35387d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d190671bcc420f7f4e845f5cab9be1f0ceb0cd7 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/08127c4d973e01c341e464b900f712f62c230d2e6ec89cf2da94db90ac35387d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:085ac8b0e6021ed43b28b506dad22291f5fad890fa8bd7f90ea75f7230cfba84 +size 11783 diff --git a/data/2025/2503_14xxx/2503.14350/images/15a599d498c84b1cb1fee7c11dacd6d52aee7bacf1257730ce3353e7016fc924.jpg b/data/2025/2503_14xxx/2503.14350/images/15a599d498c84b1cb1fee7c11dacd6d52aee7bacf1257730ce3353e7016fc924.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9580d54da9d88a0ecfae3fffd9e94dedfb0caac6 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/15a599d498c84b1cb1fee7c11dacd6d52aee7bacf1257730ce3353e7016fc924.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f58c954ef221020b6de263145fecd7f4c98baae1869fd3e102302da58851d046 +size 102152 diff --git a/data/2025/2503_14xxx/2503.14350/images/248f43d2a8a5d0f6aaf3651344b22c259b8cc08d13d17fa22f292d515bdb478e.jpg b/data/2025/2503_14xxx/2503.14350/images/248f43d2a8a5d0f6aaf3651344b22c259b8cc08d13d17fa22f292d515bdb478e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d997ba231a734bd36c44375d0079a9834cea9b7 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/248f43d2a8a5d0f6aaf3651344b22c259b8cc08d13d17fa22f292d515bdb478e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28f2880d936d1009d51947ebd20916cbf2a692ad44f7db458c6d99feb9fc0fbb +size 154982 diff --git a/data/2025/2503_14xxx/2503.14350/images/26fbd8a917fd53e373c29389d3c2a59e6baaf3dc3521674392c00f0a449b214f.jpg b/data/2025/2503_14xxx/2503.14350/images/26fbd8a917fd53e373c29389d3c2a59e6baaf3dc3521674392c00f0a449b214f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6ab3ec907ef9722d977bc816fdfacd4b88510c3 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/26fbd8a917fd53e373c29389d3c2a59e6baaf3dc3521674392c00f0a449b214f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92a489ffaf94302dcc533677bd790c61567193469562eeb7f5cd6f348a2b75a9 +size 39959 diff --git a/data/2025/2503_14xxx/2503.14350/images/34b49ae75a067c46555833a6dcf6b163d985c2eb0b4af622d4cafc6f1c4f5a16.jpg b/data/2025/2503_14xxx/2503.14350/images/34b49ae75a067c46555833a6dcf6b163d985c2eb0b4af622d4cafc6f1c4f5a16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..168df65406f1daa9ca97afb199fd48a69a590c7f --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/34b49ae75a067c46555833a6dcf6b163d985c2eb0b4af622d4cafc6f1c4f5a16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01fccc4da1e01b900e7a6b1d6d0142e906480d115d4b31ec8fa85b85eb2d44ce +size 62381 diff --git a/data/2025/2503_14xxx/2503.14350/images/3f4b79d95de3df3c6f13c06609009e729bf430bbafcfbde1a4c1fa43b302c53d.jpg b/data/2025/2503_14xxx/2503.14350/images/3f4b79d95de3df3c6f13c06609009e729bf430bbafcfbde1a4c1fa43b302c53d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93732b6f977ba8f89e6e3d82384e32781d68c1b0 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/3f4b79d95de3df3c6f13c06609009e729bf430bbafcfbde1a4c1fa43b302c53d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b27639f88700e82ab4a142a13798d59bd7c6c8542479dad2d71ed92adbf665c2 +size 98599 diff --git a/data/2025/2503_14xxx/2503.14350/images/41ce929097f8588a945d953564c371ef38f463eb2eb718e38767cc9dce0eee6b.jpg b/data/2025/2503_14xxx/2503.14350/images/41ce929097f8588a945d953564c371ef38f463eb2eb718e38767cc9dce0eee6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee37495a4ba5f63aac1964bbf8a0953e887450a9 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/41ce929097f8588a945d953564c371ef38f463eb2eb718e38767cc9dce0eee6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c55d9a8e83c6aaec17015d32abf26c804e3f25e96245327d63b87d6c4fd85081 +size 160950 diff --git a/data/2025/2503_14xxx/2503.14350/images/42d74fb3a6be2224e7ee95810b48350e42a1b56d9e77e28004e90ee22ded0ed0.jpg b/data/2025/2503_14xxx/2503.14350/images/42d74fb3a6be2224e7ee95810b48350e42a1b56d9e77e28004e90ee22ded0ed0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7dbb3d7af1c942dc4f80eeb37f2db9c8d74bfa92 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/42d74fb3a6be2224e7ee95810b48350e42a1b56d9e77e28004e90ee22ded0ed0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ce7c9d86d0323bf8c834d983996fb276e822d7e3ad07753307cbc0283bf9c07 +size 267116 diff --git a/data/2025/2503_14xxx/2503.14350/images/4328af1b9cf4afc2d45f60b901b77275b097f605ff43bd121e485851d5fd828d.jpg b/data/2025/2503_14xxx/2503.14350/images/4328af1b9cf4afc2d45f60b901b77275b097f605ff43bd121e485851d5fd828d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82389125ec8b64a66fb96d5f05c0cc92ab8bb514 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/4328af1b9cf4afc2d45f60b901b77275b097f605ff43bd121e485851d5fd828d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c379316d89d59b3ee660fe2b4718ed66b493e42a861d6ef6bfdbe1959a4ee48 +size 261553 diff --git a/data/2025/2503_14xxx/2503.14350/images/63aee7531a8352ad2237d744edc2c4244b764cdceaf3ac7d28b1d1aaff353c32.jpg b/data/2025/2503_14xxx/2503.14350/images/63aee7531a8352ad2237d744edc2c4244b764cdceaf3ac7d28b1d1aaff353c32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..096f21e66ac9624c45b5ba9469feecf42ac57e9d --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/63aee7531a8352ad2237d744edc2c4244b764cdceaf3ac7d28b1d1aaff353c32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5eeec130b8def5a66554481b414cb2896b538eea09781db91c900437101b24d +size 2954 diff --git a/data/2025/2503_14xxx/2503.14350/images/65f4482a764a319b6cd11cf875fc5fe37281b580a14bcf9f6fbdb6669e30fcb8.jpg b/data/2025/2503_14xxx/2503.14350/images/65f4482a764a319b6cd11cf875fc5fe37281b580a14bcf9f6fbdb6669e30fcb8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d21c7c7b2558699b6994789e2e1518d940ba5a4 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/65f4482a764a319b6cd11cf875fc5fe37281b580a14bcf9f6fbdb6669e30fcb8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb38fec97882bfc50a29bd5bc37b37e2e18112cceaf3ec4dbcbd380fcccde957 +size 77797 diff --git a/data/2025/2503_14xxx/2503.14350/images/72a21b6c9815d103f96dd8cd3baa1af72c116e890688bd264c8b890c2b98b54f.jpg b/data/2025/2503_14xxx/2503.14350/images/72a21b6c9815d103f96dd8cd3baa1af72c116e890688bd264c8b890c2b98b54f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a04255d197ac6a17a72216d203d8bc859809ed07 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/72a21b6c9815d103f96dd8cd3baa1af72c116e890688bd264c8b890c2b98b54f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d437178b2bc40a41512ee7129a3bed98e5d1e86f149ee947263afd9d25545ccf +size 17118 diff --git a/data/2025/2503_14xxx/2503.14350/images/735c86bcdb4d0957b6364d6294b5509271fe312247cae910fd452e2351e8cbed.jpg b/data/2025/2503_14xxx/2503.14350/images/735c86bcdb4d0957b6364d6294b5509271fe312247cae910fd452e2351e8cbed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce272b43cecaf465540d17d16cd9c9640a488520 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/735c86bcdb4d0957b6364d6294b5509271fe312247cae910fd452e2351e8cbed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3730600e54ebc82ddd88f5737099648b308f43bf21ac638891443b6164c166ac +size 75344 diff --git a/data/2025/2503_14xxx/2503.14350/images/769f5b258ad2a40f74015e9fe4b7b6b606d38b4bc700fb57fbc9df38e8c93b20.jpg b/data/2025/2503_14xxx/2503.14350/images/769f5b258ad2a40f74015e9fe4b7b6b606d38b4bc700fb57fbc9df38e8c93b20.jpg new file mode 100644 index 0000000000000000000000000000000000000000..301ad683087c28343d904de7b31b683cc5794839 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/769f5b258ad2a40f74015e9fe4b7b6b606d38b4bc700fb57fbc9df38e8c93b20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f7a9d71bff710703ccf1495e3c94f6b0805d5d791669a8c238ff1e3f35c3623 +size 55964 diff --git a/data/2025/2503_14xxx/2503.14350/images/7fa0623cc60d54f9ad0f0fcf8df5957362b4f09b3621f95288c1e4201f7170bf.jpg b/data/2025/2503_14xxx/2503.14350/images/7fa0623cc60d54f9ad0f0fcf8df5957362b4f09b3621f95288c1e4201f7170bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e39fcbe93169fe4ef8b20d9311d59c4075f76a18 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/7fa0623cc60d54f9ad0f0fcf8df5957362b4f09b3621f95288c1e4201f7170bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bfe48423ab67e1bc7b71996afc5e78862baa8c30afb82988cd120b088cb531f +size 72816 diff --git a/data/2025/2503_14xxx/2503.14350/images/82fc676d2e8301dd302c138b4fb2e8579575fc1c2873220621aa9287a9876ed6.jpg b/data/2025/2503_14xxx/2503.14350/images/82fc676d2e8301dd302c138b4fb2e8579575fc1c2873220621aa9287a9876ed6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88b4d9643e5c8ef8189cd657a03a255401bb7c69 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/82fc676d2e8301dd302c138b4fb2e8579575fc1c2873220621aa9287a9876ed6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:311a3d08de076aab40f37d6ae60941ac734bd9d51edf6d201d43624d9ed51a7f +size 2758 diff --git a/data/2025/2503_14xxx/2503.14350/images/859b2b54f7e4ea3e5f151b40abda2a47c11a5e041a6aaf089e7c57ae94b7af8c.jpg b/data/2025/2503_14xxx/2503.14350/images/859b2b54f7e4ea3e5f151b40abda2a47c11a5e041a6aaf089e7c57ae94b7af8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d908ac202a31989ee5f785cf322fe0a5dbce1b9 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/859b2b54f7e4ea3e5f151b40abda2a47c11a5e041a6aaf089e7c57ae94b7af8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:371521a973b57f2209941023ed2791d09a80845d54f55e4eaacdcfd5d5267d12 +size 90733 diff --git a/data/2025/2503_14xxx/2503.14350/images/898b5c247cabf3586adb7f29b21e553779aa2d29ee0be6fa5155a19233834d09.jpg b/data/2025/2503_14xxx/2503.14350/images/898b5c247cabf3586adb7f29b21e553779aa2d29ee0be6fa5155a19233834d09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ade179a660cb0b69d179e264810c99bcd1fe5450 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/898b5c247cabf3586adb7f29b21e553779aa2d29ee0be6fa5155a19233834d09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0127e4b300fdaaa8219358e3778052c0b8b69a08554bf95f539d35ee8bf17a2d +size 78932 diff --git a/data/2025/2503_14xxx/2503.14350/images/8e960bad5a16bce703b5fcb70a569c7d16770762185ba1588063f1036611943d.jpg b/data/2025/2503_14xxx/2503.14350/images/8e960bad5a16bce703b5fcb70a569c7d16770762185ba1588063f1036611943d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01a995e9c5d8f08056878441b95f8002dcbb7435 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/8e960bad5a16bce703b5fcb70a569c7d16770762185ba1588063f1036611943d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e4800b8a8657439a89fa6da200cb5037fb1c39339025d211a770ce3036dacb6 +size 71787 diff --git a/data/2025/2503_14xxx/2503.14350/images/8ebc41e6884b7f069ed5ee7ac23e1d302d303b3d20ae0c2837436d84bb8719e2.jpg b/data/2025/2503_14xxx/2503.14350/images/8ebc41e6884b7f069ed5ee7ac23e1d302d303b3d20ae0c2837436d84bb8719e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45833174ba416d5f4c65554ffb34f74812c27b21 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/8ebc41e6884b7f069ed5ee7ac23e1d302d303b3d20ae0c2837436d84bb8719e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a6a80019f920d89919a0af746c475bccc5dcb49e57e210964ba9e173efeea57 +size 25770 diff --git a/data/2025/2503_14xxx/2503.14350/images/96751fe61944764a81c1e8306ccadab6aa99b31163bed4354804d679da1bd518.jpg b/data/2025/2503_14xxx/2503.14350/images/96751fe61944764a81c1e8306ccadab6aa99b31163bed4354804d679da1bd518.jpg new file mode 100644 index 0000000000000000000000000000000000000000..164f423e84c3c8d120c2b1e2960420e2c31e3ab7 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/96751fe61944764a81c1e8306ccadab6aa99b31163bed4354804d679da1bd518.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ceee1d60e5f461060cb66916c73616225f7e491a80c785beaa06a287ad38112 +size 29582 diff --git a/data/2025/2503_14xxx/2503.14350/images/a2d9cf4b40f420ebc7ee5761705d95f8a45f39fffee54758e9f4300b770235fa.jpg b/data/2025/2503_14xxx/2503.14350/images/a2d9cf4b40f420ebc7ee5761705d95f8a45f39fffee54758e9f4300b770235fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48e9e59b569e18d18961c5fb6e176230d6fbb5b2 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/a2d9cf4b40f420ebc7ee5761705d95f8a45f39fffee54758e9f4300b770235fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:232cef1ff7a39905c091ac4b431621a30582376e5941e7f05a77f7cc59d37d0a +size 83584 diff --git a/data/2025/2503_14xxx/2503.14350/images/a52aa43228df85914871541a07c98e72868322f9fb8f14dbe66c5c1a9614cc60.jpg b/data/2025/2503_14xxx/2503.14350/images/a52aa43228df85914871541a07c98e72868322f9fb8f14dbe66c5c1a9614cc60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acad0e897fd92393afc323e96b86f00c390457f0 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/a52aa43228df85914871541a07c98e72868322f9fb8f14dbe66c5c1a9614cc60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ff728c4a898e4d20f6bc71dc1ec1276553d3eb49e0b98c9818d3d6e80eddf2b +size 32583 diff --git a/data/2025/2503_14xxx/2503.14350/images/a5d3f638a01dbbb52a6cc9b53f3d35262c056bd0b75f46a28b987aa706886c70.jpg b/data/2025/2503_14xxx/2503.14350/images/a5d3f638a01dbbb52a6cc9b53f3d35262c056bd0b75f46a28b987aa706886c70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5c42f385c513d0a011f80971e7cffc80670340f --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/a5d3f638a01dbbb52a6cc9b53f3d35262c056bd0b75f46a28b987aa706886c70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61dc45b66f7d2dd0ef45206ea5cacbbc3c23f5d997100aafbd1a04d20b70fc9a +size 1165 diff --git a/data/2025/2503_14xxx/2503.14350/images/b124f1339966a72d4c67b2ab4b76301ff8a7af2fc60b52d2755cbe999b6fb7fa.jpg b/data/2025/2503_14xxx/2503.14350/images/b124f1339966a72d4c67b2ab4b76301ff8a7af2fc60b52d2755cbe999b6fb7fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa9420b6724616b83077416ba51be310312f84dd --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/b124f1339966a72d4c67b2ab4b76301ff8a7af2fc60b52d2755cbe999b6fb7fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa493cc5bac2cb81fa500dc08bd3a1041bc7fc10f848fbdfbc7cf74f9f1f450b +size 98443 diff --git a/data/2025/2503_14xxx/2503.14350/images/b55dc2a0ef4463e2084d920193d930a31ab28d573d9f0d5ccfef321fdc867476.jpg b/data/2025/2503_14xxx/2503.14350/images/b55dc2a0ef4463e2084d920193d930a31ab28d573d9f0d5ccfef321fdc867476.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfa77ed0c4dd67b36534bdd543d40de844f19b6f --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/b55dc2a0ef4463e2084d920193d930a31ab28d573d9f0d5ccfef321fdc867476.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a91b7acd8eb89079fa59d86620099d02012856d4ceebeb1d4102e5a46b03aff2 +size 149691 diff --git a/data/2025/2503_14xxx/2503.14350/images/b679e36fe777360ebc2be8336cb67c250d84b550eb22f8ebeb7bb187463a7703.jpg b/data/2025/2503_14xxx/2503.14350/images/b679e36fe777360ebc2be8336cb67c250d84b550eb22f8ebeb7bb187463a7703.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62377b6165b2bb4122a52c476e77b716f5993679 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/b679e36fe777360ebc2be8336cb67c250d84b550eb22f8ebeb7bb187463a7703.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2eab4ab480d0fff3ed2154373523846f06beffd7d6600060f128e0d7f9c24a1 +size 64759 diff --git a/data/2025/2503_14xxx/2503.14350/images/b8d1ff15c2cfbed82a207580b53d1b76e1ed13920065e6313097883e59d67805.jpg b/data/2025/2503_14xxx/2503.14350/images/b8d1ff15c2cfbed82a207580b53d1b76e1ed13920065e6313097883e59d67805.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c51f5cc6f1559860a905a0017e49260c604ea04e --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/b8d1ff15c2cfbed82a207580b53d1b76e1ed13920065e6313097883e59d67805.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a8d6bb2eb89998d0dfd832a5c5407a46bbce37d07bd5120bb1e38cb0ed0acf5 +size 80666 diff --git a/data/2025/2503_14xxx/2503.14350/images/c6886a9d5898513643b38cb38f91e2f9594828a09e7f5582e736d37ec64105ca.jpg b/data/2025/2503_14xxx/2503.14350/images/c6886a9d5898513643b38cb38f91e2f9594828a09e7f5582e736d37ec64105ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50b3084344a16e5c8462b5add7414767de741f2e --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/c6886a9d5898513643b38cb38f91e2f9594828a09e7f5582e736d37ec64105ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb2e3ad035ef888750b9cd6ce05d59bd3b2d297dbd4361ea50e2801b95242c2 +size 62195 diff --git a/data/2025/2503_14xxx/2503.14350/images/c8db04c3c4081c16c71effca6b95f22414b3b755bb96289cd405d314e86121e6.jpg b/data/2025/2503_14xxx/2503.14350/images/c8db04c3c4081c16c71effca6b95f22414b3b755bb96289cd405d314e86121e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65ba3548e90beeacf5814bded38b87b30a542bd2 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/c8db04c3c4081c16c71effca6b95f22414b3b755bb96289cd405d314e86121e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:def6024e4fcb5cffe972e88e89e4fb6694c52ad2db5b87c4f9190c08e52be11e +size 96658 diff --git a/data/2025/2503_14xxx/2503.14350/images/cedd9c5e501fb9c67b83bffd4cdbec3face648d9021835024e06f5277e66ca55.jpg b/data/2025/2503_14xxx/2503.14350/images/cedd9c5e501fb9c67b83bffd4cdbec3face648d9021835024e06f5277e66ca55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10147e0c9c58ed6dcf19e53652f74bfc56c62590 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/cedd9c5e501fb9c67b83bffd4cdbec3face648d9021835024e06f5277e66ca55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ffed8060113177df1a3482c30db1a2c07db7e1a030963afa42990608a4acb18 +size 75723 diff --git a/data/2025/2503_14xxx/2503.14350/images/cff8fe39e7c04b9e27b42c5b598f301ac5e411f00152bdff7bf3bb1b67d1da5a.jpg b/data/2025/2503_14xxx/2503.14350/images/cff8fe39e7c04b9e27b42c5b598f301ac5e411f00152bdff7bf3bb1b67d1da5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8831e127769aaac4d39a723ed7335c4fb9cfa3e --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/cff8fe39e7c04b9e27b42c5b598f301ac5e411f00152bdff7bf3bb1b67d1da5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:731d3d5c060b66ab03b6f32b7e11493cd8dd7489af1ec0649b51723b7dd95437 +size 284763 diff --git a/data/2025/2503_14xxx/2503.14350/images/d119609c69d9e8f8e016ca37c53edbeabac37f9d31df9ffab230fed4f1bd87fa.jpg b/data/2025/2503_14xxx/2503.14350/images/d119609c69d9e8f8e016ca37c53edbeabac37f9d31df9ffab230fed4f1bd87fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93a20d9305faea407793833a6a144c76f22e1600 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/d119609c69d9e8f8e016ca37c53edbeabac37f9d31df9ffab230fed4f1bd87fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4a83dd07b614c65975e56ecc32a44fb8f6f757ff6dbb97749a0c403c6651bae +size 58419 diff --git a/data/2025/2503_14xxx/2503.14350/images/d5a4bed054201a04c482dc9e52feec17fb85da977718e3c6b1ff3aedd5e32725.jpg b/data/2025/2503_14xxx/2503.14350/images/d5a4bed054201a04c482dc9e52feec17fb85da977718e3c6b1ff3aedd5e32725.jpg new file mode 100644 index 0000000000000000000000000000000000000000..608247b13557b12737bb44259924650d94b78e28 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/d5a4bed054201a04c482dc9e52feec17fb85da977718e3c6b1ff3aedd5e32725.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b1f0e969afb3ab4919dc56379ad509691ae648a60c96f2bd20997632d5551d5 +size 58009 diff --git a/data/2025/2503_14xxx/2503.14350/images/df0f11d18bdfdc3b9c21b6fa529d5ccddccb0ca06469997d62359d8a0bfa75fb.jpg b/data/2025/2503_14xxx/2503.14350/images/df0f11d18bdfdc3b9c21b6fa529d5ccddccb0ca06469997d62359d8a0bfa75fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4bdb8857ef2bde2fbc0f661d5651fbfc4eea2c5 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/df0f11d18bdfdc3b9c21b6fa529d5ccddccb0ca06469997d62359d8a0bfa75fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:983905ab65f0e1d5375342c801d231112916d3cf64c5f2aabf962f01b9af0795 +size 52660 diff --git a/data/2025/2503_14xxx/2503.14350/images/dff3a5c584a585ae6935604dbf379d8dda7efa447ac5855c81317330be2c93ed.jpg b/data/2025/2503_14xxx/2503.14350/images/dff3a5c584a585ae6935604dbf379d8dda7efa447ac5855c81317330be2c93ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..931d9fee6f8ade6732160ba000a303fff80be153 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/dff3a5c584a585ae6935604dbf379d8dda7efa447ac5855c81317330be2c93ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f830c84e1675d626261e465bc9614825c7fb1297434e6e12072edc111ab9fa0 +size 72642 diff --git a/data/2025/2503_14xxx/2503.14350/images/e788f4f4b9713f7aa04e3b0723c0bd508449a3c41f0753126f94891fa627fb93.jpg b/data/2025/2503_14xxx/2503.14350/images/e788f4f4b9713f7aa04e3b0723c0bd508449a3c41f0753126f94891fa627fb93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bfee642802545a0b5e58f2268259066a1adbcd7 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/e788f4f4b9713f7aa04e3b0723c0bd508449a3c41f0753126f94891fa627fb93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14aef40c9a38a21f428755d3e1d0af87b04bdb63e9f75ff2b48fa90a88635f3a +size 60438 diff --git a/data/2025/2503_14xxx/2503.14350/images/eb5bbecd6f86f818c07a1fe8931a93f16ea43d8ea49eec4634c2689b86050898.jpg b/data/2025/2503_14xxx/2503.14350/images/eb5bbecd6f86f818c07a1fe8931a93f16ea43d8ea49eec4634c2689b86050898.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c47d2219d1771904d64539f9eb0d22d9a667e325 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/eb5bbecd6f86f818c07a1fe8931a93f16ea43d8ea49eec4634c2689b86050898.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ecef332cbcd9c52eb2f779756518a8a88565983bddc976e8b028bbe9bdebc2e +size 98729 diff --git a/data/2025/2503_14xxx/2503.14350/images/ece2036c5a6eb87cd9ef520c69ff7ac773f46d71db767926851aa27e027f4caa.jpg b/data/2025/2503_14xxx/2503.14350/images/ece2036c5a6eb87cd9ef520c69ff7ac773f46d71db767926851aa27e027f4caa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..289ff04a3e5b0eb100235ba45f3d95ad37019031 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/ece2036c5a6eb87cd9ef520c69ff7ac773f46d71db767926851aa27e027f4caa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e16cef73d793e951a36c838f624a544056570563b689d71255632eaa8325ff55 +size 56780 diff --git a/data/2025/2503_14xxx/2503.14350/images/eddea6f13444e5092d3c40d6739d4fdaba2cf2dc9765b24e0c382a4a6bc599e4.jpg b/data/2025/2503_14xxx/2503.14350/images/eddea6f13444e5092d3c40d6739d4fdaba2cf2dc9765b24e0c382a4a6bc599e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b02f4c98a09930b43e8613e9d820320d6e16d87 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/eddea6f13444e5092d3c40d6739d4fdaba2cf2dc9765b24e0c382a4a6bc599e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5317674979eeb00a8d1e68dc4a7e4eea30b8f94ba22751f723f156acb4d8a462 +size 106890 diff --git a/data/2025/2503_14xxx/2503.14350/images/efd4caf16a889c87b28d59462f54107cedcabbbf03aacab091df80dc1de2ac82.jpg b/data/2025/2503_14xxx/2503.14350/images/efd4caf16a889c87b28d59462f54107cedcabbbf03aacab091df80dc1de2ac82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f960045d06378fa891d230691639f5db68c0b6b5 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/efd4caf16a889c87b28d59462f54107cedcabbbf03aacab091df80dc1de2ac82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56bb6773e0153b4a2d5f21d3b472d7ccdb08016ce38b0f4e5d0a5319ad290588 +size 46061 diff --git a/data/2025/2503_14xxx/2503.14350/images/f34bb0162cf39f27cfe5a820676923452b4f9c7ec312b17e8153f0754ef01982.jpg b/data/2025/2503_14xxx/2503.14350/images/f34bb0162cf39f27cfe5a820676923452b4f9c7ec312b17e8153f0754ef01982.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29fe2bcd879755a5a0520b67d261d2afff2dcba7 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/f34bb0162cf39f27cfe5a820676923452b4f9c7ec312b17e8153f0754ef01982.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:185b3c0e07ebf9fe495a4835e94df4ba00bfd5505250ae6c4c4283de963bb0f0 +size 100218 diff --git a/data/2025/2503_14xxx/2503.14350/images/f3bc012cf2e6a9d62134a6b3ee005b89853723b1c73534b08c36befb6f9ba667.jpg b/data/2025/2503_14xxx/2503.14350/images/f3bc012cf2e6a9d62134a6b3ee005b89853723b1c73534b08c36befb6f9ba667.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e72e3f177362274e841ce6005c2867ea6fa9af60 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/f3bc012cf2e6a9d62134a6b3ee005b89853723b1c73534b08c36befb6f9ba667.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fc3644b341bd36afbcb38d287e51ded06dd25c58eda4e1391c2b50bde2d68e1 +size 28778 diff --git a/data/2025/2503_14xxx/2503.14350/images/f3c07174590d1a47445555b05acfd26b07ef6ca5174fecceef5bc55c8bdd1138.jpg b/data/2025/2503_14xxx/2503.14350/images/f3c07174590d1a47445555b05acfd26b07ef6ca5174fecceef5bc55c8bdd1138.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ba54d91e774464f4d2cf8617f8c2d234b7855f6 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/f3c07174590d1a47445555b05acfd26b07ef6ca5174fecceef5bc55c8bdd1138.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b63c9fc383bfab6d5527fd588d3a4591a9195064824d5e64ef96da87fc363635 +size 27000 diff --git a/data/2025/2503_14xxx/2503.14350/images/fc7eb7072f3ac10455a05af86de057f09bf8de77d3070a1df4c5cdecfb45e595.jpg b/data/2025/2503_14xxx/2503.14350/images/fc7eb7072f3ac10455a05af86de057f09bf8de77d3070a1df4c5cdecfb45e595.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92bc513d90c7b8f91dcb8ba6a9ef31e128be5a52 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/fc7eb7072f3ac10455a05af86de057f09bf8de77d3070a1df4c5cdecfb45e595.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d81d7369e839aa8f9021a0c22a6184b360396554772c817a7f6d4fb0d9ea6c98 +size 69925 diff --git a/data/2025/2503_14xxx/2503.14350/images/fd2127f64af65331f9ef175acf2c164e15803575e2f78951103783d194d03106.jpg b/data/2025/2503_14xxx/2503.14350/images/fd2127f64af65331f9ef175acf2c164e15803575e2f78951103783d194d03106.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fbafeef5cf8fa4cc470c6f8f648db2d5caaf383 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/images/fd2127f64af65331f9ef175acf2c164e15803575e2f78951103783d194d03106.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c60c7a9e748a9bd605537d625f487e160b994ff7ba0e6a906a2f85ef16b6e48 +size 195443 diff --git a/data/2025/2503_14xxx/2503.14350/layout.json b/data/2025/2503_14xxx/2503.14350/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d98539f020437ffc1a370a153f85c21ca1ae7fa0 --- /dev/null +++ b/data/2025/2503_14xxx/2503.14350/layout.json @@ -0,0 +1,11797 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 153, + 102, + 457, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 102, + 457, + 139 + ], + "spans": [ + { + "bbox": [ + 153, + 102, + 457, + 139 + ], + "type": "text", + "content": "VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "spans": [ + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": "Shoubin Yu" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{1,3*}" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": " Difan Liu" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": " Ziqiao Ma" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": " Yicong Hong" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": " Yang Zhou" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": " Hao Tan" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": " Joyce Chai" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "text", + "content": " Mohit Bansal" + }, + { + "bbox": [ + 124, + 160, + 486, + 191 + ], + "type": "inline_equation", + "content": "^{3}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "spans": [ + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "type": "text", + "content": "Adobe Research " + }, + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "type": "text", + "content": "University of Michigan " + }, + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 144, + 202, + 465, + 217 + ], + "type": "text", + "content": "UNC Chapel Hill" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 215, + 219, + 392, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 219, + 392, + 232 + ], + "spans": [ + { + "bbox": [ + 215, + 219, + 392, + 232 + ], + "type": "text", + "content": "https://veggie-gen.github.io/" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 81, + 244, + 525, + 441 + ], + "blocks": [ + { + "bbox": [ + 81, + 244, + 525, + 441 + ], + "lines": [ + { + "bbox": [ + 81, + 244, + 525, + 441 + ], + "spans": [ + { + "bbox": [ + 81, + 244, + 525, + 441 + ], + "type": "image", + "image_path": "248f43d2a8a5d0f6aaf3651344b22c259b8cc08d13d17fa22f292d515bdb478e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 447, + 555, + 492 + ], + "lines": [ + { + "bbox": [ + 54, + 447, + 555, + 492 + ], + "spans": [ + { + "bbox": [ + 54, + 447, + 555, + 492 + ], + "type": "text", + "content": "Figure 1. We propose VEGGIE, a unified and versatile video generative model that handles various tasks for both video concept grounding and editing according to user instructions. With VEGGIE, users can locate, add, delete, and change concepts in a given video through diverse instruction formats (direct referring instruction or reasoning-demanding questions). Users can also edit videos with multimodal instruction empowered by MLLM, enabling applications like video editing from a reference image." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 151, + 503, + 200, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 503, + 200, + 516 + ], + "spans": [ + { + "bbox": [ + 151, + 503, + 200, + 516 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 529, + 297, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 529, + 297, + 685 + ], + "spans": [ + { + "bbox": [ + 54, + 529, + 297, + 685 + ], + "type": "text", + "content": "While recent video diffusion models enable video editing, unifying diverse instructional editing tasks (e.g., add, remove, modify) under a single framework remains a significant challenge. In this paper, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions, a simple end-to-end framework that unifies video concept editing, grounding, and reasoning based on diverse user instructions. Specifically, given a video and text query, VEGGIE first utilizes an MLLM to interpret user intentions in instructions and ground them to the video contexts, generating framespecific grounded task queries for pixel-space responses. A diffusion model then renders these plans and generates edited videos that align with user intent. To support diverse" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 505, + 556, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 505, + 556, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 505, + 556, + 709 + ], + "type": "text", + "content": "tasks and complex instructions, we employ a curriculum learning strategy: first aligning the MLLM and video diffusion model with large-scale instructional image editing data, followed by end-to-end fine-tuning on high-quality multitask video data. Additionally, we introduce a novel data synthesis pipeline to generate paired instructional video editing data for model training. It transforms static image data into diverse, high-quality video editing samples by leveraging Image-to-Video models to inject dynamics. VEGGIE shows strong performance in instructional video editing with different editing skills, outperforming the best instructional baseline as a versatile model, while other models struggle with multi-tasking. VEGGIE also excels in video object grounding and reasoning segmentation, where other baselines fail. We further reveal how the multiple tasks help each other and highlight promising applications like zero-shot multimodal instructional and in-context video editing." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 66, + 693, + 135, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 693, + 135, + 703 + ], + "spans": [ + { + "bbox": [ + 66, + 693, + 135, + 703 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 703, + 228, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 703, + 228, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 228, + 712 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 67, + 703, + 228, + 712 + ], + "type": "text", + "content": " Work done during internship at Adobe Research." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 14, + 220, + 36, + 570 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 220, + 36, + 570 + ], + "spans": [ + { + "bbox": [ + 14, + 220, + 36, + 570 + ], + "type": "text", + "content": "arXiv:2503.14350v3 [cs.CV] 25 Oct 2025" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 136, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 136, + 84 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 136, + 84 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 297, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 297, + 235 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 297, + 235 + ], + "type": "text", + "content": "Building on the advances in Video Diffusion Models (VIDDMs) [2, 4, 24, 25, 74], video editing methods have emerged as video design tools, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 86] have been developed, using triples of text prompts, source videos, and target videos for training. Due to their limited performance in understanding user intent and multimodal semantics [17], several methods have incorporated multimodal large language models (MLLMs) to handle complex instructions/reasoning [17, 29, 75]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 235, + 297, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 235, + 297, + 462 + ], + "spans": [ + { + "bbox": [ + 55, + 235, + 297, + 462 + ], + "type": "text", + "content": "However, existing methods fall short of the goal of a simple, versatile video concept editor, facing three primary challenges. First, most methods are not end-to-end, requiring intermediate layout/mask/human or model caption guidance [35, 53, 63, 75], which adds workload on users and disrupts a seamless editing experience. Second, existing pipelines connecting MLLMs to VidDMs require multiple training objectives beyond simple pixel-space diffusion loss, such as language loss [75] or mask losses [66]. This increases optimization difficulty and often requires additional hyperparameter tuning or annotations. Third, existing video editing models, both instructional and non-instructional, struggle with handling other diverse editing tasks, ranging from addition, and deletion to stylization. For example, LGVI [66] fails in global edits such as stylization and color change, while VidToMe [41] struggles with local edits such as adding or removing objects. These methods also struggle with input videos that contain multiple objects or when user instructions require complex reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 463, + 297, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 463, + 297, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 463, + 297, + 605 + ], + "type": "text", + "content": "These challenges result from two limitations: First, there is a lack of multitasking fine-tuning on well-curated instructional video editing datasets that span a broad range of skills. Second, models often lack two critical capabilities needed to interpret user intentions and accurately locate concepts: multimodal reasoning to infer the intended modification from the user's instruction; and grounding language to the input video to precisely identify the region or object to be edited. For example, in Figure 1, one can effortlessly locate the girl given \"identify the little girl.\" When asked to \"add a hat to the little girl,\" we intuitively imagine the hat placed on her head from commonsense, even without seeing an actual hat." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 606, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 606, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 606, + 297, + 714 + ], + "type": "text", + "content": "To address these challenges, we introduce VEGGIE, a Video Editor with Grounded Generation from Instructions. VEGGIE unifies video concept grounding and editing without relying on additional layout, mask guidance, or intermediate caption [35, 39, 42, 75, 78, 79]. Instead, we formulate the problem as end-to-end grounded generation in pixel space, using only a diffusion loss. Specifically, given a video and a text query, VEGGIE first leverages an MLLM to interpret complex instructions, generating frame-wise con" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 555, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 300 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 300 + ], + "type": "text", + "content": "ditions. Unlike prior methods [43, 75] that use discrete text tokens as conditions, which disconnect the pipeline and block gradient propagation, VEGGIE employs continuous, learnable task query embeddings per frame. This enables end-to-end training and effectively captures grounded task representations for diffusion model conditioning. To handle diverse tasks and accurately interpret complex queries, we employ a curriculum learning strategy that begins by aligning MLLMs with diffusion models using massive paired instructional image editing data and then fine-tuning the model end-to-end on high-quality multitask video data to adapt video. Unlike tool-use methods [16, 35, 75], VEGGIE formulates both video grounding and instructional editing in the same video-to-video task formulation, enabling efficient handling through a unified single model. To further support end-to-end training, we introduce a novel automatic instructional video data generation pipeline that lifts high-quality instructional image editing data into the video domain using image-to-video and video evaluation tools." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 302, + 556, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 302, + 556, + 529 + ], + "spans": [ + { + "bbox": [ + 313, + 302, + 556, + 529 + ], + "type": "text", + "content": "Existing video editing benchmarks do not provide wide and uniform coverage of diverse editing skills [59, 68]. To address this gap, we contribute VEG-Bench, an instructional video editing benchmark that spans 8 editing skills: concept addition, removal, object changing, environment background changing, visual feature changing, stylization, object grounding, and reasoning segmentation. Each skill is evaluated using a dedicated suite of metrics. We assess the proposed VEGGIE alongside 6 baselines on VEG-Bench. VEGGIE demonstrates strong performance across diverse editing skills, outperforming the best instructional baseline as a versatile, all-in-one model, while other models struggle with multi-tasking. Additionally, VEGGIE excels in video object grounding and reasoning segmentation tasks, where other baselines fall short. We show further analysis of how multi-task learning enhances our framework and highlight applications such as zero-shot multimodal instructional following (Fig. 2) and few-shot in-context editing (Fig. 3). Our contributions are summarized as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 534, + 556, + 713 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 316, + 534, + 556, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 534, + 556, + 617 + ], + "spans": [ + { + "bbox": [ + 316, + 534, + 556, + 617 + ], + "type": "text", + "content": "- We propose VEGGIE, an end-to-end model that integrates an MLLM and a VidDM. VEGGIE is a versatile framework that handles diverse instructional requests for editing and grounding various video concepts. Unlike existing work that achieves multitasking via tool use, VEGGIE unifies diverse tasks in a single model, thus simplifying the training with only diffusion loss." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 618, + 553, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 618, + 553, + 641 + ], + "spans": [ + { + "bbox": [ + 316, + 618, + 553, + 641 + ], + "type": "text", + "content": "- We propose a data synthesis pipeline, scaling high-quality instructional video editing data for future work." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 642, + 555, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 642, + 555, + 677 + ], + "spans": [ + { + "bbox": [ + 316, + 642, + 555, + 677 + ], + "type": "text", + "content": "- We propose VEG-Bench, an instructional video editing benchmark that spans 8 editing skills with dedicated metrics for each skill." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 678, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 678, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 678, + 556, + 713 + ], + "type": "text", + "content": "- VEGGIE achieves strong performance across diverse editing skills compared with SoTA methods, and shows potentials for multimodal instruction and in-context following." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 99, + 71, + 149, + 113 + ], + "blocks": [ + { + "bbox": [ + 99, + 71, + 149, + 113 + ], + "lines": [ + { + "bbox": [ + 99, + 71, + 149, + 113 + ], + "spans": [ + { + "bbox": [ + 99, + 71, + 149, + 113 + ], + "type": "image", + "image_path": "63aee7531a8352ad2237d744edc2c4244b764cdceaf3ac7d28b1d1aaff353c32.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 103, + 114, + 146, + 122 + ], + "lines": [ + { + "bbox": [ + 103, + 114, + 146, + 122 + ], + "spans": [ + { + "bbox": [ + 103, + 114, + 146, + 122 + ], + "type": "text", + "content": "Reference" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 152, + 72, + 302, + 173 + ], + "blocks": [ + { + "bbox": [ + 102, + 128, + 148, + 167 + ], + "lines": [ + { + "bbox": [ + 102, + 128, + 148, + 167 + ], + "spans": [ + { + "bbox": [ + 102, + 128, + 148, + 167 + ], + "type": "text", + "content": "Transfer the style in the reference image." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 152, + 72, + 302, + 173 + ], + "lines": [ + { + "bbox": [ + 152, + 72, + 302, + 173 + ], + "spans": [ + { + "bbox": [ + 152, + 72, + 302, + 173 + ], + "type": "image", + "image_path": "8ebc41e6884b7f069ed5ee7ac23e1d302d303b3d20ae0c2837436d84bb8719e2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 122, + 353, + 171 + ], + "lines": [ + { + "bbox": [ + 306, + 122, + 353, + 171 + ], + "spans": [ + { + "bbox": [ + 306, + 122, + 353, + 171 + ], + "type": "text", + "content": "Add the object in the reference image on the women." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 79, + 357, + 113 + ], + "blocks": [ + { + "bbox": [ + 306, + 79, + 357, + 113 + ], + "lines": [ + { + "bbox": [ + 306, + 79, + 357, + 113 + ], + "spans": [ + { + "bbox": [ + 306, + 79, + 357, + 113 + ], + "type": "image", + "image_path": "82fc676d2e8301dd302c138b4fb2e8579575fc1c2873220621aa9287a9876ed6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 113, + 355, + 121 + ], + "lines": [ + { + "bbox": [ + 306, + 113, + 355, + 121 + ], + "spans": [ + { + "bbox": [ + 306, + 113, + 355, + 121 + ], + "type": "text", + "content": "Reference" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 359, + 72, + 509, + 172 + ], + "blocks": [ + { + "bbox": [ + 359, + 72, + 509, + 172 + ], + "lines": [ + { + "bbox": [ + 359, + 72, + 509, + 172 + ], + "spans": [ + { + "bbox": [ + 359, + 72, + 509, + 172 + ], + "type": "image", + "image_path": "a52aa43228df85914871541a07c98e72868322f9fb8f14dbe66c5c1a9614cc60.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 63, + 182, + 545, + 194 + ], + "lines": [ + { + "bbox": [ + 63, + 182, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 63, + 182, + 545, + 194 + ], + "type": "text", + "content": "Figure 2. Multimodal instruction following emerges in VEGGIE, allowing for style transfer or object addition from reference images." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 99, + 208, + 509, + 312 + ], + "blocks": [ + { + "bbox": [ + 99, + 208, + 509, + 312 + ], + "lines": [ + { + "bbox": [ + 99, + 208, + 509, + 312 + ], + "spans": [ + { + "bbox": [ + 99, + 208, + 509, + 312 + ], + "type": "image", + "image_path": "15a599d498c84b1cb1fee7c11dacd6d52aee7bacf1257730ce3353e7016fc924.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 324, + 541, + 335 + ], + "lines": [ + { + "bbox": [ + 66, + 324, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 66, + 324, + 541, + 335 + ], + "type": "text", + "content": "Figure 3. In-context editing emerges in VEGGIE, allowing for few-shot learning of editing tasks with paired image demonstrations." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 355, + 141, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 355, + 141, + 367 + ], + "spans": [ + { + "bbox": [ + 55, + 355, + 141, + 367 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 376, + 296, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 296, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 296, + 616 + ], + "type": "text", + "content": "Instructional Video Editing Video Diffusion Models (VidDMs) [2, 4, 6, 7, 15, 26, 49, 67, 71], enable high-quality video generation across a wide range of video concepts. Building on these advances, video editing methods have emerged as tools for video design, allowing users to manipulate video concepts such as adding, removing, altering objects and style translation [20, 53, 62, 79, 85]. To enhance user experiences, instructional video editing methods [54, 70, 86] have been developed, using triples of video instructions, source videos, and target videos for training. These methods demonstrate limited performance when complex multimodal reasoning is required, as noted by previous research on instructional image editing [17]. Moreover, they struggle with diverse editing tasks, from addition and deletion to stylization. For example, LGVI [66] is primarily designed for removal tasks, while TokenFlow [20] struggles with local edits such as adding, removing, or changing objects. We address this limitation with pixel-level multitasking fine-tuning on well-curated instructional video editing datasets covering various grounding and editing skills." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "content": "Video Grounding and Segmentation Visual grounding requires models to connect language to its corresponding visual concept in the visual context [40, 47]. This is commonly evaluated via the language-guided semantic localization tasks, ranging from simple referring expressions in RefCOCO series [48, 77] and their generalized variant [44] that takes no-target and multi-target into account. Recently, grounded multimodal large lan" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 357, + 555, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 555, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 555, + 464 + ], + "type": "text", + "content": "guage models (MLLMs) are trained for object grounding to bounding boxes [8, 51, 52, 76, 82, 87] and segmentation masks [56, 69, 81, 84] using text-image pairs with fine-grained annotations linking phrases to entities. These models unlock the potential of reasoning segmentation [11, 36], bringing language-informed reasoning into semantic segmentation. Instead of using dedicated object detection or segmentation modules, we achieve video grounding through end-to-end training with only diffusion loss." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 475, + 444, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 444, + 487 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 444, + 487 + ], + "type": "text", + "content": "3. Our Method: VEGGIE" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 496, + 556, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 496, + 556, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 496, + 556, + 663 + ], + "type": "text", + "content": "In this paper, we introduce a Video Editor with Grounded Generation from Instructions (VEGGIE), a unified and versatile generative video model. It combines the complex instruction understanding and reasoning capabilities of MLLMs with the generative capacity of VidDMs. The model is trained end-to-end with diffusion loss only. VEGGIE efficiently handles diverse user inputs, including direct instructions, complex questions requiring in-depth reasoning, and multimodal conditioning. It performs various pixel-level manipulations, enabling tasks such as video concept addition, removal, changing, stylization, grounding, and reasoning segmentation based on user instructions. We elaborate on the model design (Sec. 3.1), training and inference process (Sec. 3.2), and data curation (Sec. 3.3)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 672, + 429, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 672, + 429, + 683 + ], + "spans": [ + { + "bbox": [ + 313, + 672, + 429, + 683 + ], + "type": "text", + "content": "3.1. Model Architecture" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": "VEGGIE consists of four main components (see Fig. 4): (1) a multimodal large language model, (2) a set of learnable" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 123, + 73, + 138, + 87 + ], + "blocks": [ + { + "bbox": [ + 123, + 73, + 138, + 87 + ], + "lines": [ + { + "bbox": [ + 123, + 73, + 138, + 87 + ], + "spans": [ + { + "bbox": [ + 123, + 73, + 138, + 87 + ], + "type": "image", + "image_path": "a5d3f638a01dbbb52a6cc9b53f3d35262c056bd0b75f46a28b987aa706886c70.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 94, + 541, + 281 + ], + "blocks": [ + { + "bbox": [ + 69, + 94, + 541, + 281 + ], + "lines": [ + { + "bbox": [ + 69, + 94, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 69, + 94, + 541, + 281 + ], + "type": "image", + "image_path": "b55dc2a0ef4463e2084d920193d930a31ab28d573d9f0d5ccfef321fdc867476.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 286, + 555, + 331 + ], + "lines": [ + { + "bbox": [ + 54, + 286, + 555, + 331 + ], + "spans": [ + { + "bbox": [ + 54, + 286, + 555, + 331 + ], + "type": "text", + "content": "Figure 4. Overview of our proposed end-to-end VEGGIE framework. Our Multimodal Large Language Model first understands input video frames and diverse user instructions, then it generates frame-wise reasoning queries that maintain per-frame editing conditions for the video diffusion model. The video diffusion model will render the MLLM-generated conditions to the pixel space for diverse tasks, including video editing, video grounding, and video reasoning segmentation with questions. We only apply diffusion loss for the whole pipeline training." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 351, + 297, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 351, + 297, + 446 + ], + "spans": [ + { + "bbox": [ + 54, + 351, + 297, + 446 + ], + "type": "text", + "content": "grounded task queries, (3) an alignment network (single-layer MLP) that projects the MLLM output into the condition space of the diffusion model, and (4) a video diffusion model initialized from an instructional image editing model [83]. Our model first generates latent conditions for target video frames by querying multimodal context using an MLLM, then renders these conditions at the pixel level through a video diffusion model, as detailed below." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "spans": [ + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": "MLLM for Generating Grounded Task Guidance. As illustrated in the left of Fig. 4, given a video consisting of a sequence of frames " + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "inline_equation", + "content": "V = [f_{1}, \\dots, f_{n}]" + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": " is the frame number of the given video, a user instruction/question " + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": ", our goal is to obtain the response " + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "inline_equation", + "content": "\\widehat{V} = [\\widehat{f}_{1}, \\dots, \\widehat{f}_{n}]" + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": " at pixel space that faithfully reflects user instruction about the given video. The MLLM module processes both the input video " + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": " and a user instruction " + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": " to generate a sequence of grounded task tokens per frame: " + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "inline_equation", + "content": "C = [c_{1}, \\dots, c_{n}]" + }, + { + "bbox": [ + 54, + 448, + 297, + 617 + ], + "type": "text", + "content": ", which are input and output in parallel. These tokens serve as task guidance and implicitly encode the target manipulation, such as object attributes, spatial relationships, or style transfer parameters. The MLLM ensures the model captures both explicit user instructions and implicit reasoning needs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "text", + "content": "VidDM for Rendering MLLM Guidance at Pixel Space. As illustrated in the right of Table 4, the VidDM takes the original video " + }, + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "text", + "content": " and the grounded task tokens " + }, + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "text", + "content": " as conditions to synthesize the target video " + }, + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "inline_equation", + "content": "\\widehat{V}" + }, + { + "bbox": [ + 54, + 617, + 297, + 714 + ], + "type": "text", + "content": ". The original video is concatenated with the noise volume, and the task tokens are input to the cross-attention. With grounded task guidance in denoising steps, the generation process ensures that the output faithfully follows user instructions while preserving the" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 351, + 555, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 351, + 555, + 400 + ], + "spans": [ + { + "bbox": [ + 313, + 351, + 555, + 400 + ], + "type": "text", + "content": "video's structure and motion dynamics. Through iterative denoising, it refines each frame while maintaining temporal consistency, applying pixel modifications coherently for a smooth and visually consistent output video " + }, + { + "bbox": [ + 313, + 351, + 555, + 400 + ], + "type": "inline_equation", + "content": "\\widehat{V}" + }, + { + "bbox": [ + 313, + 351, + 555, + 400 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 408, + 547, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 408, + 547, + 421 + ], + "spans": [ + { + "bbox": [ + 313, + 408, + 547, + 421 + ], + "type": "text", + "content": "3.2. Curriculum Learning from Images to Videos" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 426, + 556, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 426, + 556, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 556, + 593 + ], + "type": "text", + "content": "Training the model directly on video tasks presents two key challenges: (1) misalignment between MLLM and diffusion model representations, making it difficult for the diffusion model to interpret MLLM-generated task queries with limited fine-tuning data, and (2) the diffusion model's lack of multitasking capability, even for image tasks, due to insufficient training on diverse tasks. Our initial experiments also found the model collapsed when the whole pipeline was directly trained with all data. These challenges/observations underscore the need for pre-alignment between MLLM and the diffusion model to enable seamless adaptation from language-space task queries to pixel-space modifications. To this end, we adopt a two-stage curriculum learning strategy for the proposed VEGGIE framework." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 594, + 557, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 557, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 557, + 713 + ], + "type": "text", + "content": "Stage 1: Aligning Diffusion and Language Spaces. In the first stage, we align the diffusion model with the MLLM using large-scale image-level instructional editing data. The MLLM remains frozen while we update the alignment network, grounded task queries, and diffusion UNet. This process fine-tunes the diffusion model weights to align with the language space, enabling the model to interpret MLLM-generated guidance and translate user instructions into pixel-level edits while preserving the MLLM's strong ability to understand instructions and user intentions." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 73, + 466, + 86 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 73, + 466, + 86 + ], + "spans": [ + { + "bbox": [ + 138, + 73, + 466, + 86 + ], + "type": "text", + "content": "VEGGIE: A Unified and Versatile Instructional Video Generative Model" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 70, + 293, + 247 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 293, + 247 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 293, + 247 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 293, + 247 + ], + "type": "table", + "html": "
TypeSourceR.E.G.# Img./Vid.# Ins.
VideoROVI [66]4.3K27.4K
VPLM [75]4.3K5.5K
GroundMoRe [12]1.3K5.5K
RVoS [60]1.9K6.1K
MeViS [14]1.5K17.1K
InstructV2V [9]68.3K68.3K
VEG-Edit (Ours)4.0K6.2K
Total136.1K
ImageSeed-Data-Edit [18]3M3M
LISA [37]0.2K1.3K
gRefCoCo [44]13.6K73.4K
PhraseCut [65]310.8K310.8K
EraseDraw [5]64.9K42.4K
MagicBrush [83]9.3K9.3K
SmartEdit [29]0.5K0.9K
Total3438.1K
", + "image_path": "c6886a9d5898513643b38cb38f91e2f9594828a09e7f5582e736d37ec64105ca.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 251, + 296, + 285 + ], + "lines": [ + { + "bbox": [ + 55, + 251, + 296, + 285 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 296, + 285 + ], + "type": "text", + "content": "Table 1. Summary of our data for training. R.: Reasoning, E.: Editing, G.: Grounding. #Img/Vid: the number of images/videos, and #Ins.: the number of instruction-image/video pairs." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 304, + 297, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 304, + 297, + 483 + ], + "spans": [ + { + "bbox": [ + 54, + 304, + 297, + 483 + ], + "type": "text", + "content": "Stage 2: Enhancing Temporal Consistency and Dynamics. With the MLLM and diffusion model aligned, fine-tuning diverse instructional video editing data becomes more effective for improved instruction following at pixel-space including temporal consistency, dynamic coherence, and editing faithfulness. In this stage, we fine-tune the framework with the MLLM, including the alignment network, grounded task queries, and all 3 dimensions in diffusion UNet, end-to-end with carefully curated multitasking instructional video editing data. Following prior work [22, 67], we inflated the 2D UNet from Stage 1 with temporal attention layers for video adaptation. For both stages 1 and 2, we optimize the framework with a single diffusion loss, enabling unified learning for improved instructional video editing performance while maintaining simplicity and efficiency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 487, + 298, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 487, + 298, + 582 + ], + "spans": [ + { + "bbox": [ + 54, + 487, + 298, + 582 + ], + "type": "text", + "content": "Classifier-Free Guidance during Testing. We employ classifier-free guidance to balance quality and diversity in diffusion-generated samples. Following prior work [3, 17], we apply classifier-free guidance to instructional visual editing considering two conditions: the grounded task tokens and the original video. To obtain unconditional guidance, we set null values " + }, + { + "bbox": [ + 54, + 487, + 298, + 582 + ], + "type": "inline_equation", + "content": "(\\varnothing)" + }, + { + "bbox": [ + 54, + 487, + 298, + 582 + ], + "type": "text", + "content": " for both task tokens and input video. In this case, our score estimate is:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 594, + 289, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 594, + 289, + 638 + ], + "spans": [ + { + "bbox": [ + 61, + 594, + 289, + 638 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {e _ {\\theta}} (z _ {t}, c _ {T}, c _ {V}) = e _ {\\theta} (z _ {t}, \\varnothing , \\varnothing) \\\\ + g _ {T} \\cdot \\left(e _ {\\theta} \\left(z _ {t}, c _ {V}, c _ {T}\\right) - e _ {\\theta} \\left(z _ {t}, c _ {V}, \\varnothing\\right)\\right) \\\\ + g _ {V} \\cdot \\left(e _ {\\theta} \\left(z _ {t}, c _ {V}, \\varnothing\\right) - e _ {\\theta} \\left(z _ {t}, \\varnothing , \\varnothing\\right)\\right), \\\\ \\end{array}", + "image_path": "08127c4d973e01c341e464b900f712f62c230d2e6ec89cf2da94db90ac35387d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": " represents the model parameters, " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "C_T" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "C_V" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": " denote the task tokens and video conditions, " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": " is the null value, " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "z_t" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": " is the noised latent at timestamp " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "g_T" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "g_V" + }, + { + "bbox": [ + 54, + 653, + 296, + 715 + ], + "type": "text", + "content": " are the task guidance and video guidance scales, respectively. More training details are included later in Appendix." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 72, + 446, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 446, + 85 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 446, + 85 + ], + "type": "text", + "content": "3.3. Data Curation Pipeline" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 89, + 555, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 89, + 555, + 233 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 555, + 233 + ], + "type": "text", + "content": "Existing video editing models, both instructional and non-instructional, struggle with diverse editing skills due to the lack of high-quality multitasking fine-tuning data. In this section, we introduce our data curation strategy to support VEGGIE in achieving versatile video editing skills. As listed in Tab. 1, we collect 3.4M image and 133.9K video data from diverse sources to support our VEGGIE curriculum learning as discussed in Sec. 3.2. We create our training dataset from two sources: (1) collecting existing image and video data and converting it into an instructional editing format, and (2) synthesizing new instructional video editing samples using existing datasets and generative models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 234, + 556, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 234, + 556, + 486 + ], + "spans": [ + { + "bbox": [ + 313, + 234, + 556, + 486 + ], + "type": "text", + "content": "Collecting Diverse Multitask Image and Video Data. We bring together instructional editing data from both image (Seed-Data-Edit [18], MagicBrush [83], EraseDraw [5]) and video (InstructV2V [9], VPLM [75]) sources. These datasets provide pairs of original and edited visual contents with user instructions. The tasks include adding, removing, and changing objects, stylizing, and performing global/local edits. Beyond editing datasets, we incorporate segmentation data at both the image level (gRefCoCo [44] and Phrase-Cut [65]) and the video level (RVoS and MeViS). These segmentation tasks are reformulated as color-filling challenges, which guide the model in learning referring grounding (i.e., understanding which object or region to edit) and strengthen its conceptual learning. To further unlock complex instruction understanding via MLLM, we include data that requires more advanced reasoning and implicit referencing. Specifically, we include: reasoning segmentation (LISA [37]), reasoning editing (SmartEdit [29]), interactive video inpainting (LGVI [66]), and motion-grounded video reasoning (GroundMoRe [12]). These tasks help VEGGIE learn implicit references and reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "text", + "content": "Synthesizing Instructional Video Editing Data via Image-to-Video Animation. Recent methods [3, 54] generate synthetic instructional video-editing data by first creating text instructions with LLM, then getting edited videos via T2V models and prompt-to-prompt editing [23]. While these methods adapt image-based editing pipelines [9] to videos, the generated data suffer from temporal-consistency issues. To address this gap, we propose a novel image-to-video animation strategy that leverages the abundance of high-quality image-level instructional editing datasets [64, 83], which provide well-annotated instructions, paired edited images, and well-organized editing skill categories. As illustrated in Fig. 5, given an original image " + }, + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "text", + "content": ", an edited image " + }, + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "inline_equation", + "content": "\\bar{I}" + }, + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "text", + "content": ", and an instruction from an instructional image editing dataset [64], our approach involves three key steps. First, we use an offline MLLM [61, 72] to generate an image caption and an animation prompt that describes plausible motion within the image. Next, an image-to-video (I2V) model animates the image into a video " + }, + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 313, + 486, + 556, + 714 + ], + "type": "text", + "content": ". Finally," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 69, + 276, + 283 + ], + "blocks": [ + { + "bbox": [ + 73, + 69, + 276, + 283 + ], + "lines": [ + { + "bbox": [ + 73, + 69, + 276, + 283 + ], + "spans": [ + { + "bbox": [ + 73, + 69, + 276, + 283 + ], + "type": "image", + "image_path": "e788f4f4b9713f7aa04e3b0723c0bd508449a3c41f0753126f94891fa627fb93.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 286, + 297, + 331 + ], + "lines": [ + { + "bbox": [ + 55, + 286, + 297, + 331 + ], + "spans": [ + { + "bbox": [ + 55, + 286, + 297, + 331 + ], + "type": "text", + "content": "Figure 5. Our data generation pipeline for synthetic instructional video editing data. It injects dynamics into well-constructed instructional image editing datasets via the Image-to-Video (I2V) Model, and generates paired video data for instruction editing." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 341, + 296, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 341, + 296, + 521 + ], + "spans": [ + { + "bbox": [ + 54, + 341, + 296, + 521 + ], + "type": "text", + "content": "we generate the corresponding edited video " + }, + { + "bbox": [ + 54, + 341, + 296, + 521 + ], + "type": "inline_equation", + "content": "\\bar{V}" + }, + { + "bbox": [ + 54, + 341, + 296, + 521 + ], + "type": "text", + "content": " using a first-frame-conditioned video editing model [35], leveraging " + }, + { + "bbox": [ + 54, + 341, + 296, + 521 + ], + "type": "inline_equation", + "content": "\\bar{I}" + }, + { + "bbox": [ + 54, + 341, + 296, + 521 + ], + "type": "text", + "content": " as a strong prior to ensure consistent edits across frames. Finally, to ensure data quality, we evaluate each original-edited video pair with automatic video quality evaluation metrics [30], which assess the generated videos from diverse dimensions, e.g., motion smoothness, image quality, and background consistency. This pipeline transforms carefully curated image-based datasets into instructional video-editing resources while preserving the precision of the original edits. As a result, our data method expands the availability of high-quality synthetic video-editing data, supporting a wider range of editing tasks in our end-to-end unified framework. More details on data generation, prompting, examples, and pre/post-processing are in the Appendix." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 533, + 137, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 533, + 137, + 547 + ], + "spans": [ + { + "bbox": [ + 55, + 533, + 137, + 547 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 554, + 297, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 554, + 297, + 603 + ], + "spans": [ + { + "bbox": [ + 55, + 554, + 297, + 603 + ], + "type": "text", + "content": "We first introduce the VEG-Bench Benchmark and then demonstrate the superiority of VEGGIE across diverse video instructional editing skills. More experiments, visualization, and implementation details are in the Appendix." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 611, + 192, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 611, + 192, + 623 + ], + "spans": [ + { + "bbox": [ + 55, + 611, + 192, + 623 + ], + "type": "text", + "content": "4.1. VEG-Bench and Metrics" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 629, + 297, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 297, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 297, + 715 + ], + "type": "text", + "content": "As no existing benchmark is designed for fine-grained instructional video editing skills, we manually collect and annotate VEG-Bench, containing 132 video-instruction pairs that balanced cover 8 different video generative skills (15-20 for each). Beyond standard metrics, including text-to-video alignment (CLIP-Text [55]), video smoothness (CLIP-F [55]), and image quality (MUSIQ [32]), we also" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "text", + "content": "first introduce MLLM-as-a-Judge to give a holistic evaluation score according to the given original video, edited video, and user instruction. It is achieved by prompting GPT4o [21] to evaluate whether the requested semantic change has been fulfilled, using a scale from 1 to 10. For addition and removal, we also introduce an object detector (GroundingDiNo [45]) to detect if the object is added/removed faithfully. For grounding and reasoning segmentation, we following video grounding tasks [12, 14, 33, 58] and adopt the Jaccard index " + }, + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "inline_equation", + "content": "(\\mathcal{I})" + }, + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "text", + "content": " [31], F-measure " + }, + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "inline_equation", + "content": "(\\mathcal{F})" + }, + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "text", + "content": " [13], and their mean " + }, + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "inline_equation", + "content": "(\\mathcal{J} \\& \\mathcal{F})" + }, + { + "bbox": [ + 313, + 72, + 556, + 228 + ], + "type": "text", + "content": ". We also compute SSIM between the generated video and the original video masked with GT masks. More evaluation/metrics details are included in Appendix." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 238, + 437, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 238, + 437, + 251 + ], + "spans": [ + { + "bbox": [ + 313, + 238, + 437, + 251 + ], + "type": "text", + "content": "4.2. Experimental Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 256, + 556, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 256, + 556, + 472 + ], + "spans": [ + { + "bbox": [ + 313, + 256, + 556, + 472 + ], + "type": "text", + "content": "Instructional Video Editing over Diverse Skills. As shown in Tab. 2, we evaluate 7 different models on VEG-Bench across 8 distinct editing skills. Overall, VEGGIE demonstrates the best performance among instructional video editing models. Compared to VEGGIE, non-instructional models often struggle with concept removal and addition. This limitation arises because these models rely on attention control or additional conditions (e.g., depth maps) that impose strong priors, constraining the model and making object addition or removal challenging. We also observe that InsV2V achieves high scores in quality and smoothness metrics, but underperforms in alignment and MLLM judgment, which demand faithful semantic changes. Qualitative examples in Fig. 6 illustrate that InsV2V often makes minimal changes to the input video, resulting in high video quality but unfaithful outputs. In contrast, VEGGIE strikes a better balance, delivering both high-quality visuals and accurate semantic alignment with the intended edits." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 473, + 556, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 473, + 556, + 628 + ], + "spans": [ + { + "bbox": [ + 313, + 473, + 556, + 628 + ], + "type": "text", + "content": "Can Multi-Task Help Each Other? To test the previous hypothesis, we train our model on the VPLM [75] dataset, which includes paired grounding and removal tasks (approximately 5.5K samples for each task). We focus on these tasks as representative examples due to their straightforward evaluation against ground truth. As shown in Table 3, multitask training yields a lower FVD score and a higher SSIM score, demonstrating that learning to locate and remove a video concept can mutually reinforce performance. We show an example in Fig. 7. However, this conclusion only holds with a balanced data combination. We also observe that an excessive amount of grounding data can introduce more artifacts and negatively impact visual editing skills." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 630, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 556, + 713 + ], + "type": "text", + "content": "Emergent Zero-shot Multimodal Instruction Following. We also highlight the emergent behavior of VEGGIE on multi-modal instruction following, even without dedicated training data for this specific editing instruction. Notably, VEGGIE demonstrates the ability to perform zero-shot multimodal instructional video editing. As illustrated in Fig. 2, VEGGIE can transfer styles or add objects from a reference" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 79, + 225, + 269 + ], + "blocks": [ + { + "bbox": [ + 82, + 72, + 214, + 79 + ], + "lines": [ + { + "bbox": [ + 82, + 72, + 214, + 79 + ], + "spans": [ + { + "bbox": [ + 82, + 72, + 214, + 79 + ], + "type": "text", + "content": "[Addition] Please add a ball in the given video frames." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 79, + 225, + 269 + ], + "lines": [ + { + "bbox": [ + 70, + 79, + 225, + 269 + ], + "spans": [ + { + "bbox": [ + 70, + 79, + 225, + 269 + ], + "type": "image", + "image_path": "769f5b258ad2a40f74015e9fe4b7b6b606d38b4bc700fb57fbc9df38e8c93b20.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 227, + 79, + 383, + 269 + ], + "blocks": [ + { + "bbox": [ + 227, + 72, + 383, + 79 + ], + "lines": [ + { + "bbox": [ + 227, + 72, + 383, + 79 + ], + "spans": [ + { + "bbox": [ + 227, + 72, + 383, + 79 + ], + "type": "text", + "content": "[Removal] Please remove the man in black in given video frames." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 227, + 79, + 383, + 269 + ], + "lines": [ + { + "bbox": [ + 227, + 79, + 383, + 269 + ], + "spans": [ + { + "bbox": [ + 227, + 79, + 383, + 269 + ], + "type": "image", + "image_path": "00f32512d007bed96281648f160b4774880b37f5f9ff4dfdf7bdfebeb8280ea6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 384, + 79, + 540, + 269 + ], + "blocks": [ + { + "bbox": [ + 394, + 72, + 529, + 79 + ], + "lines": [ + { + "bbox": [ + 394, + 72, + 529, + 79 + ], + "spans": [ + { + "bbox": [ + 394, + 72, + 529, + 79 + ], + "type": "text", + "content": "[Swap] Replace golden building with a white mountain." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 384, + 79, + 540, + 269 + ], + "lines": [ + { + "bbox": [ + 384, + 79, + 540, + 269 + ], + "spans": [ + { + "bbox": [ + 384, + 79, + 540, + 269 + ], + "type": "image", + "image_path": "d5a4bed054201a04c482dc9e52feec17fb85da977718e3c6b1ff3aedd5e32725.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 70, + 278, + 225, + 467 + ], + "blocks": [ + { + "bbox": [ + 99, + 271, + 196, + 278 + ], + "lines": [ + { + "bbox": [ + 99, + 271, + 196, + 278 + ], + "spans": [ + { + "bbox": [ + 99, + 271, + 196, + 278 + ], + "type": "text", + "content": "[Environment] Make it on the beach." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 278, + 225, + 467 + ], + "lines": [ + { + "bbox": [ + 70, + 278, + 225, + 467 + ], + "spans": [ + { + "bbox": [ + 70, + 278, + 225, + 467 + ], + "type": "image", + "image_path": "fc7eb7072f3ac10455a05af86de057f09bf8de77d3070a1df4c5cdecfb45e595.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 227, + 278, + 383, + 467 + ], + "blocks": [ + { + "bbox": [ + 266, + 271, + 343, + 278 + ], + "lines": [ + { + "bbox": [ + 266, + 271, + 343, + 278 + ], + "spans": [ + { + "bbox": [ + 266, + 271, + 343, + 278 + ], + "type": "text", + "content": "[Color] Make the swan white." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 227, + 278, + 383, + 467 + ], + "lines": [ + { + "bbox": [ + 227, + 278, + 383, + 467 + ], + "spans": [ + { + "bbox": [ + 227, + 278, + 383, + 467 + ], + "type": "image", + "image_path": "34b49ae75a067c46555833a6dcf6b163d985c2eb0b4af622d4cafc6f1c4f5a16.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 384, + 278, + 540, + 467 + ], + "blocks": [ + { + "bbox": [ + 416, + 271, + 508, + 278 + ], + "lines": [ + { + "bbox": [ + 416, + 271, + 508, + 278 + ], + "spans": [ + { + "bbox": [ + 416, + 271, + 508, + 278 + ], + "type": "text", + "content": "[Texture] Make the rhinocero furry." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 384, + 278, + 540, + 467 + ], + "lines": [ + { + "bbox": [ + 384, + 278, + 540, + 467 + ], + "spans": [ + { + "bbox": [ + 384, + 278, + 540, + 467 + ], + "type": "image", + "image_path": "cedd9c5e501fb9c67b83bffd4cdbec3face648d9021835024e06f5277e66ca55.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 70, + 480, + 225, + 670 + ], + "blocks": [ + { + "bbox": [ + 105, + 473, + 190, + 480 + ], + "lines": [ + { + "bbox": [ + 105, + 473, + 190, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 190, + 480 + ], + "type": "text", + "content": "[Style] Make it chinese ink style." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 480, + 225, + 670 + ], + "lines": [ + { + "bbox": [ + 70, + 480, + 225, + 670 + ], + "spans": [ + { + "bbox": [ + 70, + 480, + 225, + 670 + ], + "type": "image", + "image_path": "d119609c69d9e8f8e016ca37c53edbeabac37f9d31df9ffab230fed4f1bd87fa.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 679, + 553, + 701 + ], + "lines": [ + { + "bbox": [ + 55, + 679, + 553, + 701 + ], + "spans": [ + { + "bbox": [ + 55, + 679, + 553, + 701 + ], + "type": "text", + "content": "Figure 6. Qualitative comparison of editing results across 8 different abilities (splitting visual features into color and texture). We provide zoom-in details for a more detailed comparison. Best viewed in color. More in Appendix." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 227, + 475, + 383, + 670 + ], + "blocks": [ + { + "bbox": [ + 252, + 468, + 360, + 475 + ], + "lines": [ + { + "bbox": [ + 252, + 468, + 360, + 475 + ], + "spans": [ + { + "bbox": [ + 252, + 468, + 360, + 475 + ], + "type": "text", + "content": "[Grounding] Could you locate the knife" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 227, + 475, + 383, + 670 + ], + "lines": [ + { + "bbox": [ + 227, + 475, + 383, + 670 + ], + "spans": [ + { + "bbox": [ + 227, + 475, + 383, + 670 + ], + "type": "image", + "image_path": "ece2036c5a6eb87cd9ef520c69ff7ac773f46d71db767926851aa27e027f4caa.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 383, + 475, + 540, + 670 + ], + "blocks": [ + { + "bbox": [ + 398, + 468, + 528, + 475 + ], + "lines": [ + { + "bbox": [ + 398, + 468, + 528, + 475 + ], + "spans": [ + { + "bbox": [ + 398, + 468, + 528, + 475 + ], + "type": "text", + "content": "[Reasoning] What can be used for heating food?" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 383, + 475, + 540, + 670 + ], + "lines": [ + { + "bbox": [ + 383, + 475, + 540, + 670 + ], + "spans": [ + { + "bbox": [ + 383, + 475, + 540, + 670 + ], + "type": "image", + "image_path": "7fa0623cc60d54f9ad0f0fcf8df5957362b4f09b3621f95288c1e4201f7170bf.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 91, + 70, + 515, + 406 + ], + "blocks": [ + { + "bbox": [ + 91, + 70, + 515, + 406 + ], + "lines": [ + { + "bbox": [ + 91, + 70, + 515, + 406 + ], + "spans": [ + { + "bbox": [ + 91, + 70, + 515, + 406 + ], + "type": "table", + "html": "
Methods\nGeneratorNon-Instructional Editing ModelInstructional Editing Model
VidToMe [41] (SD1.5)TokenFlow [20] (SD2.1)Flatten [10] (SD2.1)InstructDiff [19] (SD1.5)LGVI [66] (SD1.5)InsV2V [9] (SD1.5)VEGGIE (Ours) (SD1.5)
Concept Addition
MLLM-Judge (↑)5.005.806.627.262.735.697.44
Alignment (↑)27.8029.3028.2228.1025.0628.2729.27
Smoothness (↑)96.7997.2695.7493.6696.0996.9494.93
Quality (↑)62.8165.6255.4554.0841.5956.2461.31
Detection (↑)47.9849.5349.7455.3614.4248.0157.96
Concept Removal
MLLM-Judge (↑)2.603.734.466.126.592.785.07
Alignment (↑)75.0175.9978.4075.5175.6774.4175.63
Smoothness (↑)96.1396.4795.8291.8397.0396.9995.04
Quality (↑)66.3271.5250.7755.0842.3158.7950.99
Detection (↑)34.3155.1670.9164.8178.4025.6470.22
Object Changing
MLLM-Judge (↑)5.006.537.377.002.066.606.63
Alignment (↑)25.6928.7627.0627.3622.1726.6027.77
Smoothness (↑)96.2397.2196.1392.0795.6696.7495.44
Quality (↑)64.0669.9759.3755.0138.2060.9058.15
Environment & Background Changing
MLLM-Judge (↑)5.817.357.376.052.376.607.18
Alignment (↑)28.1730.0030.0428.0321.9428.2729.15
Smoothness (↑)95.7696.9695.9089.8595.6696.0394.58
Quality (↑)61.9567.0654.5853.0638.9754.9454.25
Visual Feature Changing (Color & Texture)
MLLM-Judge (↑)5.866.856.606.432.147.537.33
Alignment (↑)27.9929.2529.4627.5423.1828.8828.69
Smoothness (↑)95.9397.1095.8391.7194.7596.6694.52
Quality (↑)65.8069.3153.3258.2936.2759.3657.91
Stylization
MLLM-Judge (↑)7.237.628.317.413.718.078.26
Alignment (↑)29.8430.2529.0027.7422.8029.1429.38
Smoothness (↑)96.3197.2396.7188.9795.6296.5095.69
Quality (↑)64.0568.2253.1854.1535.7662.5957.00
Object Grounding
SSIM (↑)40.4750.4647.2137.9866.8449.6570.90
Jaccard Index J (↑)13.8519.2925.6219.881.5213.8937.74
F-measure F (↑)15.5016.8617.6012.813.0717.3721.83
Reasoning Segmentation
SSIM (↑)---32.3944.4759.8668.41
Jaccard Index J (↑)---14.0210.1216.8922.53
F-measure F (↑)---8.079.0610.4515.97
Avg. Ranking2.611.411.963.003.212.001.78
", + "image_path": "fd2127f64af65331f9ef175acf2c164e15803575e2f78951103783d194d03106.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 72, + 441, + 284, + 492 + ], + "blocks": [ + { + "bbox": [ + 55, + 409, + 555, + 433 + ], + "lines": [ + { + "bbox": [ + 55, + 409, + 555, + 433 + ], + "spans": [ + { + "bbox": [ + 55, + 409, + 555, + 433 + ], + "type": "text", + "content": "Table 2. Comparison of video editing task with instructional / non-instructional models on VEG-Bench. -: the task is not capable of non-instructional models. We gray out numbers of non-instructional models that are in different categories." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 72, + 441, + 284, + 492 + ], + "lines": [ + { + "bbox": [ + 72, + 441, + 284, + 492 + ], + "spans": [ + { + "bbox": [ + 72, + 441, + 284, + 492 + ], + "type": "table", + "html": "
SettingsRemoval (FVD ↓)Grounding (SSIM ↑)
Grd.-only-52.34
Rmv.-only1098.52-
Mixed987.8055.21
", + "image_path": "72a21b6c9815d103f96dd8cd3baa1af72c116e890688bd264c8b890c2b98b54f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 501, + 295, + 534 + ], + "lines": [ + { + "bbox": [ + 55, + 501, + 295, + 534 + ], + "spans": [ + { + "bbox": [ + 55, + 501, + 295, + 534 + ], + "type": "text", + "content": "Table 3. An ablation study on whether multi-task learning provides transferable benefits that enhance performance across tasks. We focus on removal and grounding tasks as representative examples." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 70, + 544, + 282, + 656 + ], + "blocks": [ + { + "bbox": [ + 70, + 544, + 282, + 656 + ], + "lines": [ + { + "bbox": [ + 70, + 544, + 282, + 656 + ], + "spans": [ + { + "bbox": [ + 70, + 544, + 282, + 656 + ], + "type": "image", + "image_path": "26fbd8a917fd53e373c29389d3c2a59e6baaf3dc3521674392c00f0a449b214f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 665, + 295, + 689 + ], + "lines": [ + { + "bbox": [ + 55, + 665, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 295, + 689 + ], + "type": "text", + "content": "Figure 7. Comparison between single- and multi-skill models with different data training. We find tasks can help each other." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 443, + 511, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 443, + 511, + 454 + ], + "spans": [ + { + "bbox": [ + 313, + 443, + 511, + 454 + ], + "type": "text", + "content": "image into the input video based on instructions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 455, + 555, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 455, + 555, + 539 + ], + "spans": [ + { + "bbox": [ + 313, + 455, + 555, + 539 + ], + "type": "text", + "content": "Emergent Few-shot In-Context Editing. As shown in Fig. 3, VEGGIE can effectively utilize a few example image pairs to transfer the intended editing changes seamlessly to the input video. We observe that VEGGIE exhibits in-context learning for image editing without the need for language instructions. Instead, it uses image pairs as examples to infer and apply the desired editing intention directly." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 550, + 388, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 550, + 388, + 562 + ], + "spans": [ + { + "bbox": [ + 313, + 550, + 388, + 562 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 570, + 556, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 556, + 703 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 556, + 703 + ], + "type": "text", + "content": "We present VEGGIE, a unified end-to-end model for instructional video editing that handles diverse pixel-level tasks. VEGGIE leverages MLLM for robust instruction understanding and employs a video diffusion model to execute pixel-level edits. Our framework uses a single diffusion loss for end-to-end optimization across varied tasks/skills. We also introduce a novel synthetic data generation pipeline and VEG-Bench, a benchmark that assesses a broad range of editing skills. Our VEGGIE outperforms previous methods as a versatile, all-in-one solution. We hope our model, data, and benchmark to advance research on instructional generative video models." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 297, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 91, + 297, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 297, + 145 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 297, + 145 + ], + "type": "text", + "content": "[1] Zechen Bai, Tong He, Haiyang Mei, Pichao Wang, Ziteng Gao, Joya Chen, Lei Liu, Zheng Zhang, and Mike Zheng Shou. One token to seg them all: Language instructed reasoning segmentation in videos. arXiv preprint arXiv:2409.19603, 2024. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 146, + 296, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 146, + 296, + 201 + ], + "spans": [ + { + "bbox": [ + 61, + 146, + 296, + 201 + ], + "type": "text", + "content": "[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 202, + 296, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 202, + 296, + 246 + ], + "spans": [ + { + "bbox": [ + 62, + 202, + 296, + 246 + ], + "type": "text", + "content": "[3] Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18392-18402, 2023. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 247, + 296, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 247, + 296, + 312 + ], + "spans": [ + { + "bbox": [ + 62, + 247, + 296, + 312 + ], + "type": "text", + "content": "[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. Video generation models as world simulators. OpenAI, https://openai.com/research/video-generation-models-as-world-simulators, 2024.2.3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 313, + 296, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 313, + 296, + 356 + ], + "spans": [ + { + "bbox": [ + 62, + 313, + 296, + 356 + ], + "type": "text", + "content": "[5] Alper Canberk, Maksym Bondarenko, Ege Ozguroglu, Ruoshi Liu, and Carl Vondrick. Erasedraw: Learning to insert objects by erasing them from images. arXiv preprint arXiv:2409.00522, 2024. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 357, + 296, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 357, + 296, + 402 + ], + "spans": [ + { + "bbox": [ + 62, + 357, + 296, + 402 + ], + "type": "text", + "content": "[6] Duygu Ceylan, Chun-Hao P Huang, and Niloy J Mitra. Pix2video: Video editing using image diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23206-23217, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 403, + 296, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 403, + 296, + 446 + ], + "spans": [ + { + "bbox": [ + 62, + 403, + 296, + 446 + ], + "type": "text", + "content": "[7] Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. Stablevideo: Text-driven consistency-aware diffusion video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23040-23050, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 447, + 296, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 447, + 296, + 490 + ], + "spans": [ + { + "bbox": [ + 62, + 447, + 296, + 490 + ], + "type": "text", + "content": "[8] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 491, + 296, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 491, + 296, + 535 + ], + "spans": [ + { + "bbox": [ + 62, + 491, + 296, + 535 + ], + "type": "text", + "content": "[9] Jiaxin Cheng, Tianjun Xiao, and Tong He. Consistent video-to-video transfer using synthetic dataset. In The Twelfth International Conference on Learning Representations, 2024. 5, 8, 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 536, + 296, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 536, + 296, + 602 + ], + "spans": [ + { + "bbox": [ + 57, + 536, + 296, + 602 + ], + "type": "text", + "content": "[10] Yuren Cong, Mengmeng Xu, christian simon, Shoufa Chen, Jiawei Ren, Yanping Xie, Juan-Manuel Perez-Rua, Bodo Rosenhahn, Tao Xiang, and Sen He. FLATTEN: optical FLOW-guided ATTENtion for consistent text-to-video editing. In The Twelfth International Conference on Learning Representations, 2024. 8, 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 603, + 296, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 603, + 296, + 658 + ], + "spans": [ + { + "bbox": [ + 57, + 603, + 296, + 658 + ], + "type": "text", + "content": "[11] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Mohit Bansal, and Chen Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. arXiv preprint arXiv:2411.09921, 2024. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 658, + 296, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 658, + 296, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 658, + 296, + 712 + ], + "type": "text", + "content": "[12] Andong Deng, Tongjia Chen, Shoubin Yu, Taojiannan Yang, Lincoln Spencer, Yapeng Tian, Ajmal Saeed Mian, Bansal Mohit, and Chen. Chen. Motion-grounded video reasoning: Understanding and perceiving motion at pixel level. 2024. 5, 6, 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 95 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 95 + ], + "type": "text", + "content": "[13] Lee R Dice. Measures of the amount of ecologic association between species. Ecology, 26(3):297-302, 1945. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 95, + 555, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 555, + 139 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 555, + 139 + ], + "type": "text", + "content": "[14] Henghui Ding, Chang Liu, Shuting He, Xudong Jiang, and Chen Change Loy. Mevis: A large-scale benchmark for video segmentation with motion expressions. In ICCV, pages 2694-2703, 2023. 5, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 140, + 555, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 140, + 555, + 183 + ], + "spans": [ + { + "bbox": [ + 317, + 140, + 555, + 183 + ], + "type": "text", + "content": "[15] Patrick Esser, Johnathan Chiu, Parmida Atighehchian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In CVPR, pages 7346-7356, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 184, + 554, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 184, + 554, + 217 + ], + "spans": [ + { + "bbox": [ + 316, + 184, + 554, + 217 + ], + "type": "text", + "content": "[16] Hao Fei, Shengqiong Wu, Hanwang Zhang, Tat-Seng Chua, and Shuicheng Yan. Vitron: A unified pixel-level vision llm for understanding, generating, segmenting, editing, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 217, + 554, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 217, + 554, + 259 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 554, + 259 + ], + "type": "text", + "content": "[17] Tsu-Jui Fu, Wenze Hu, Xianzhi Du, William Yang Wang, Yinfei Yang, and Zhe Gan. Guiding instruction-based image editing via multimodal large language models. arXiv preprint arXiv:2309.17102, 2023. 2, 3, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 261, + 555, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 261, + 555, + 303 + ], + "spans": [ + { + "bbox": [ + 316, + 261, + 555, + 303 + ], + "type": "text", + "content": "[18] Yuying Ge, Sijie Zhao, Chen Li, Yixiao Ge, and Ying Shan. Seed-data-edit technical report: A hybrid dataset for instructional image editing. arXiv preprint arXiv:2405.04007, 2024. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 304, + 555, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 304, + 555, + 371 + ], + "spans": [ + { + "bbox": [ + 316, + 304, + 555, + 371 + ], + "type": "text", + "content": "[19] Zigang Geng, Binxin Yang, Tiankai Hang, Chen Li, Shuyang Gu, Ting Zhang, Jianmin Bao, Zheng Zhang, Houqiang Li, Han Hu, et al. Instructdiffusion: A generalist modeling interface for vision tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12709-12720, 2024. 8, 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 372, + 554, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 372, + 554, + 404 + ], + "spans": [ + { + "bbox": [ + 316, + 372, + 554, + 404 + ], + "type": "text", + "content": "[20] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 2, 3, 8, 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 405, + 555, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 405, + 555, + 425 + ], + "spans": [ + { + "bbox": [ + 316, + 405, + 555, + 425 + ], + "type": "text", + "content": "[21] gpt 4o. https://openai.com/index/hello-gpt-4o/.2024.6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 426, + 555, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 426, + 555, + 480 + ], + "spans": [ + { + "bbox": [ + 317, + 426, + 555, + 480 + ], + "type": "text", + "content": "[22] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 5, 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 482, + 555, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 482, + 555, + 525 + ], + "spans": [ + { + "bbox": [ + 317, + 482, + 555, + 525 + ], + "type": "text", + "content": "[23] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022.5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 526, + 554, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 526, + 554, + 581 + ], + "spans": [ + { + "bbox": [ + 317, + 526, + 554, + 581 + ], + "type": "text", + "content": "[24] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsanko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 582, + 554, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 582, + 554, + 624 + ], + "spans": [ + { + "bbox": [ + 317, + 582, + 554, + 624 + ], + "type": "text", + "content": "[25] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. Advances in Neural Information Processing Systems, 2022. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 625, + 554, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 625, + 554, + 669 + ], + "spans": [ + { + "bbox": [ + 316, + 625, + 554, + 669 + ], + "type": "text", + "content": "[26] Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. In The Eleventh International Conference on Learning Representations, 2023. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 670, + 554, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 554, + 712 + ], + "type": "text", + "content": "[27] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. ICLR, 1(2):3, 2022. 1" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 297, + 117 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 297, + 117 + ], + "type": "text", + "content": "[28] Jiahao Hu, Tianxiong Zhong, Xuebo Wang, Boyuan Jiang, Xingye Tian, Fei Yang, Pengfei Wan, and Di Zhang. Vivid-10m: A dataset and baseline for versatile and interactive video local editing. arXiv preprint arXiv:2411.15260, 2024. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 297, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 297, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 297, + 194 + ], + "type": "text", + "content": "[29] Yuzhou Huang, Liangbin Xie, Xintao Wang, Ziyang Yuan, Xiaodong Cun, Yixiao Ge, Jiantao Zhou, Chao Dong, Rui Huang, Ruimao Zhang, et al. Smartedit: Exploring complex instruction-based image editing with multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8362-8371, 2024. 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 196, + 296, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 196, + 296, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 196, + 296, + 262 + ], + "type": "text", + "content": "[30] Ziqi Huang, Yinan He, Jiashuo Yu, Fan Zhang, Chenyang Si, Yuming Jiang, Yuanhan Zhang, Tianxing Wu, Qingyang Jin, Nattapol Chanpaisit, et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21807-21818, 2024. 6, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 263, + 296, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 263, + 296, + 285 + ], + "spans": [ + { + "bbox": [ + 57, + 263, + 296, + 285 + ], + "type": "text", + "content": "[31] Paul Jaccard. The distribution of the flora in the alpine zone. 1. New phytologist, 11(2):37-50, 1912. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 286, + 296, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 286, + 296, + 330 + ], + "spans": [ + { + "bbox": [ + 57, + 286, + 296, + 330 + ], + "type": "text", + "content": "[32] Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. Musiq: Multi-scale image quality transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 5148-5157, 2021. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 331, + 296, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 331, + 296, + 396 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 296, + 396 + ], + "type": "text", + "content": "[33] Anna Khoreva, Anna Rohrbach, and Bernt Schiele. Video object segmentation with language referring expressions. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part IV 14, pages 123–141. Springer, 2019. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 399, + 296, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 296, + 442 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 296, + 442 + ], + "type": "text", + "content": "[34] Weijie Kong, Qi Tian, Zijian Zhang, Rox Min, Zuozhuo Dai, Jin Zhou, Jiangfeng Xiong, Xin Li, Bo Wu, Jianwei Zhang, et al. Hunyuanvideo: A systematic framework for large video generative models. arXiv preprint arXiv:2412.03603, 2024. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 444, + 296, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 444, + 296, + 486 + ], + "spans": [ + { + "bbox": [ + 57, + 444, + 296, + 486 + ], + "type": "text", + "content": "[35] Max Ku, Cong Wei, Weiming Ren, Harry Yang, and Wenhu Chen. Anyv2v: A tuning-free framework for any video-to-video editing tasks. arXiv preprint arXiv:2403.14468, 2024. 2, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 488, + 296, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 488, + 296, + 531 + ], + "spans": [ + { + "bbox": [ + 56, + 488, + 296, + 531 + ], + "type": "text", + "content": "[36] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 534, + 296, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 534, + 296, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 534, + 296, + 588 + ], + "type": "text", + "content": "[37] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 590, + 296, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 296, + 634 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 296, + 634 + ], + "type": "text", + "content": "[38] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 635, + 296, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 296, + 678 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 296, + 678 + ], + "type": "text", + "content": "[39] Jialu Li, Shoubin Yu, Han Lin, Jaemin Cho, Jaehong Yoon, and Mohit Bansal. Training-free guidance in text-to-video generation via multimodal planning and structured noise initialization. arXiv preprint arXiv:2504.08641, 2025. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 680, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 680, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 680, + 296, + 713 + ], + "type": "text", + "content": "[40] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 333, + 73, + 555, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 555, + 105 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 555, + 105 + ], + "type": "text", + "content": "image pre-training. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10965-10975, 2022. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 107, + 555, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 107, + 555, + 162 + ], + "spans": [ + { + "bbox": [ + 316, + 107, + 555, + 162 + ], + "type": "text", + "content": "[41] Xirui Li, Chao Ma, Xiaokang Yang, and Ming-Hsuan Yang. Vidthome: Video token merging for zero-shot video editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7486-7495, 2024. 2, 8, 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 163, + 555, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 163, + 555, + 195 + ], + "spans": [ + { + "bbox": [ + 316, + 163, + 555, + 195 + ], + "type": "text", + "content": "[42] Long Lian, Baifeng Shi, Adam Yala, Trevor Darrell, and Boyi Li. Llm-grounded video diffusion models. arXiv preprint arXiv:2309.17444, 2023. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 197, + 555, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 197, + 555, + 239 + ], + "spans": [ + { + "bbox": [ + 316, + 197, + 555, + 239 + ], + "type": "text", + "content": "[43] Han Lin, Abhay Zala, Jaemin Cho, and Mohit Bansal. Videodirectorgpt: Consistent multi-scene video generation via llm-guided planning. arXiv preprint arXiv:2309.15091, 2023. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 241, + 555, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 241, + 555, + 285 + ], + "spans": [ + { + "bbox": [ + 316, + 241, + 555, + 285 + ], + "type": "text", + "content": "[44] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023. 3, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 287, + 554, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 287, + 554, + 341 + ], + "spans": [ + { + "bbox": [ + 316, + 287, + 554, + 341 + ], + "type": "text", + "content": "[45] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 343, + 554, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 343, + 554, + 387 + ], + "spans": [ + { + "bbox": [ + 316, + 343, + 554, + 387 + ], + "type": "text", + "content": "[46] Shaoteng Liu, Tianyu Wang, Jui-Hsien Wang, Qing Liu, Zhifei Zhang, Joon-Young Lee, Yijun Li, Bei Yu, Zhe Lin, Soo Ye Kim, et al. Generative video propagation. arXiv preprint arXiv:2412.19761, 2024. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 388, + 554, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 388, + 554, + 443 + ], + "spans": [ + { + "bbox": [ + 316, + 388, + 554, + 443 + ], + "type": "text", + "content": "[47] Ziqiao Ma, Jiayi Pan, and Joyce Chai. World-to-words: Grounded open vocabulary acquisition through fast mapping in vision-language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 524–544, 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 445, + 555, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 445, + 555, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 445, + 555, + 498 + ], + "type": "text", + "content": "[48] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11-20, 2016. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 500, + 554, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 500, + 554, + 533 + ], + "spans": [ + { + "bbox": [ + 316, + 500, + 554, + 533 + ], + "type": "text", + "content": "[49] Kangfu Mei and Vishal Patel. Vidm: Video implicit diffusion models. In Proceedings of the AAAI conference on artificial intelligence, pages 9117-9125, 2023. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 535, + 555, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 535, + 555, + 578 + ], + "spans": [ + { + "bbox": [ + 317, + 535, + 555, + 578 + ], + "type": "text", + "content": "[50] Bo Miao, Mohammed Bennamoun, Yongsheng Gao, Mubarak Shah, and Ajmal Mian. Towards temporally consistent referring video object segmentation. https://arxiv.org/abs/2403.19407, 2024. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 579, + 555, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 579, + 555, + 633 + ], + "spans": [ + { + "bbox": [ + 316, + 579, + 555, + 633 + ], + "type": "text", + "content": "[51] Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, Qixiang Ye, and Furu Wei. Grounding multimodal large language models to the world. In The Twelfth International Conference on Learning Representations, 2024. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 635, + 555, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 635, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 316, + 635, + 555, + 689 + ], + "type": "text", + "content": "[52] Renjie Pi, Jiahui Gao, Shizhe Diao, Rui Pan, Hanze Dong, Jipeng Zhang, Lewei Yao, Jianhua Han, Hang Xu, Lingpeng Kong, et al. Detgpt: Detect what you need via reasoning. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14172-14189, 2023. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 691, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 554, + 713 + ], + "type": "text", + "content": "[53] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "type": "text", + "content": " attentions for zero-shot text-based video editing. In ICCV, pages 15932-15942, 2023. 2, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 296, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 296, + 151 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 296, + 151 + ], + "type": "text", + "content": "[54] Bosheng Qin, Juncheng Li, Siliang Tang, Tat-Seng Chua, and Yueting Zhuang. Instructvid2vid: Controllable video editing with natural language instructions. In 2024 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2024. 2, 3, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 296, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 296, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 296, + 217 + ], + "type": "text", + "content": "[55] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 220, + 296, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 220, + 296, + 285 + ], + "spans": [ + { + "bbox": [ + 56, + 220, + 296, + 285 + ], + "type": "text", + "content": "[56] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Erix Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 286, + 296, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 286, + 296, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 286, + 296, + 342 + ], + "type": "text", + "content": "[57] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 343, + 296, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 343, + 296, + 407 + ], + "spans": [ + { + "bbox": [ + 56, + 343, + 296, + 407 + ], + "type": "text", + "content": "[58] Seonguk Seo, Joon-Young Lee, and Bohyung Han. Urvos: Unified referring video object segmentation network with a large-scale benchmark. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part XV 16, pages 208–223. Springer, 2020. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 410, + 296, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 410, + 296, + 453 + ], + "spans": [ + { + "bbox": [ + 56, + 410, + 296, + 453 + ], + "type": "text", + "content": "[59] Shangkun Sun, Xiaoyu Liang, Songlin Fan, Wenxu Gao, and Wei Gao. Ve-bench: Subjective-aligned benchmark suite for text-driven video editing quality assessment. In Proceedings of the AAAI Conference on Artificial Intelligence, 2025. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 456, + 296, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 456, + 296, + 510 + ], + "spans": [ + { + "bbox": [ + 56, + 456, + 296, + 510 + ], + "type": "text", + "content": "[60] Carles Ventura, Miriam Bellver, Andreu Girbau, Amaia Salvador, Ferran Marques, and Xavier Giro-i Nieto. Rvos: End-to-end recurrent network for video object segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5277-5286, 2019. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 512, + 296, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 512, + 296, + 586 + ], + "spans": [ + { + "bbox": [ + 56, + 512, + 296, + 586 + ], + "type": "text", + "content": "[61] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 590, + 296, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 296, + 644 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 296, + 644 + ], + "type": "text", + "content": "[62] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 646, + 296, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 646, + 296, + 688 + ], + "spans": [ + { + "bbox": [ + 56, + 646, + 296, + 688 + ], + "type": "text", + "content": "[63] Zhenyu Wang, Aoxue Li, Zhenguo Li, and Xihui Liu. Genartist: Multimodal IIm as an agent for unified image generation and editing. arXiv preprint arXiv:2407.05600, 2024. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 691, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 296, + 713 + ], + "type": "text", + "content": "[64] Cong Wei, Zheyang Xiong, Weiming Ren, Xinrun Du, Ge Zhang, and Wenhu Chen. Omniedit: Building image edit" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 333, + 72, + 554, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 72, + 554, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 72, + 554, + 95 + ], + "type": "text", + "content": "ing generalist models through specialist supervision. arXiv preprint arXiv:2411.07199, 2024. 5, 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 96, + 555, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 555, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 555, + 150 + ], + "type": "text", + "content": "[65] Chenyun Wu, Zhe Lin, Scott Cohen, Trung Bui, and Subhransu Maji. Phrasecut: Language-based image segmentation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10216-10225, 2020. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 152, + 555, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 555, + 218 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 555, + 218 + ], + "type": "text", + "content": "[66] Jianzong Wu, Xiangtai Li, Chenyang Si, Shangchen Zhou, Jingkang Yang, Jiangning Zhang, Yining Li, Kai Chen, Yunhai Tong, Ziwei Liu, et al. Towards language-driven video inpainting via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12501-12511, 2024. 2, 3, 5, 8, 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 220, + 555, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 220, + 555, + 274 + ], + "spans": [ + { + "bbox": [ + 316, + 220, + 555, + 274 + ], + "type": "text", + "content": "[67] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623-7633, 2023. 3, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 276, + 555, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 276, + 555, + 320 + ], + "spans": [ + { + "bbox": [ + 316, + 276, + 555, + 320 + ], + "type": "text", + "content": "[68] Jay Zhangjie Wu, Xiuyu Li, Difei Gao, Zhen Dong, Jinbin Bai, Aishani Singh, Xiaoyu Xiang, Youzeng Li, Zuwei Huang, Yuanxi Sun, et al. Cvpr 2023 text guided video editing competition. arXiv preprint arXiv:2310.16003, 2023. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 322, + 555, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 322, + 555, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 322, + 555, + 376 + ], + "type": "text", + "content": "[69] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 378, + 555, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 378, + 555, + 422 + ], + "spans": [ + { + "bbox": [ + 316, + 378, + 555, + 422 + ], + "type": "text", + "content": "[70] Zhen Xing, Qi Dai, Zihao Zhang, Hui Zhang, Han Hu, Zuxuan Wu, and Yu-Gang Jiang. Vidiff: Translating videos via multi-modal instructions with diffusion models. arXiv preprint arXiv:2311.18837, 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 423, + 555, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 423, + 555, + 456 + ], + "spans": [ + { + "bbox": [ + 316, + 423, + 555, + 456 + ], + "type": "text", + "content": "[71] Wilson Yan, Yunzhi Zhang, Pieter Abbeel, and Aravind Srinivas. Videogpt: Video generation using vq-vae and transformers. arXiv preprint arXiv:2104.10157, 2021. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 458, + 555, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 458, + 555, + 611 + ], + "spans": [ + { + "bbox": [ + 316, + 458, + 555, + 611 + ], + "type": "text", + "content": "[72] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 613, + 555, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 613, + 555, + 656 + ], + "spans": [ + { + "bbox": [ + 316, + 613, + 555, + 656 + ], + "type": "text", + "content": "[73] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 658, + 555, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 555, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 555, + 712 + ], + "type": "text", + "content": "[74] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024. 2" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 711 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 296, + 106 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 296, + 106 + ], + "type": "text", + "content": "[75] Jaehong Yoon, Shoubin Yu, and Mohit Bansal. Raccoon: Remove, add, and change video content with auto-generated narratives. arXiv preprint arXiv:2405.18406, 2024. 2, 5, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 296, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 296, + 161 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 296, + 161 + ], + "type": "text", + "content": "[76] Haoxuan You, Haotian Zhang, Zhe Gan, Xianzhi Du, Bowen Zhang, Zirui Wang, Liangliang Cao, Shih-Fu Chang, and Yinfei Yang. Ferret: Refer and ground anything anywhere at any granularity. In The Twelfth International Conference on Learning Representations, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 162, + 296, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 162, + 296, + 225 + ], + "spans": [ + { + "bbox": [ + 56, + 162, + 296, + 225 + ], + "type": "text", + "content": "[77] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer, 2016. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 228, + 296, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 228, + 296, + 292 + ], + "spans": [ + { + "bbox": [ + 56, + 228, + 296, + 292 + ], + "type": "text", + "content": "[78] Shoubin Yu, Jacob Zhiyuan Fang, Jian Zheng, Gunnar Sigurdsson, Vicente Ordonez, Robinson Piramuthu, and Mohit Bansal. Zero-shot controllable image-to-video animation via motion decomposition. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 3332-3341, 2024. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 293, + 296, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 293, + 296, + 337 + ], + "spans": [ + { + "bbox": [ + 56, + 293, + 296, + 337 + ], + "type": "text", + "content": "[79] Tao Yu, Runseng Feng, Ruoyu Feng, Jinming Liu, Xin Jin, Wenjun Zeng, and Zhibo Chen. Inpaint anything: Segment anything meets image inpainting. arXiv preprint arXiv:2304.06790, 2023. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 338, + 296, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 338, + 296, + 370 + ], + "spans": [ + { + "bbox": [ + 56, + 338, + 296, + 370 + ], + "type": "text", + "content": "[80] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 372, + 296, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 372, + 296, + 424 + ], + "spans": [ + { + "bbox": [ + 56, + 372, + 296, + 424 + ], + "type": "text", + "content": "[81] Hao Zhang, Hongyang Li, Feng Li, Tianhe Ren, Xueyan Zou, Shilong Liu, Shijia Huang, Jianfeng Gao, Lei Zhang, Chunyuan Li, et al. Llava-grounding: Grounded visual chat with large multimodal models. arXiv preprint arXiv:2312.02949, 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 426, + 296, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 296, + 480 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 296, + 480 + ], + "type": "text", + "content": "[82] Haotian Zhang, Haoxuan You, Philipp Dufter, Bowen Zhang, Chen Chen, Hong-You Chen, Tsu-Jui Fu, William Yang Wang, Shih-Fu Chang, Zhe Gan, et al. Ferret-v2: An improved baseline for referring and grounding with large language models. arXiv preprint arXiv:2404.07973, 2024. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 482, + 296, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 482, + 296, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 482, + 296, + 525 + ], + "type": "text", + "content": "[83] Kai Zhang, Lingbo Mo, Wenhu Chen, Huan Sun, and Yu Su. Magicbrush: A manually annotated dataset for instruction-guided image editing. Advances in Neural Information Processing Systems, 36, 2024. 4, 5, 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 526, + 296, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 526, + 296, + 580 + ], + "spans": [ + { + "bbox": [ + 56, + 526, + 296, + 580 + ], + "type": "text", + "content": "[84] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 582, + 296, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 582, + 296, + 635 + ], + "spans": [ + { + "bbox": [ + 56, + 582, + 296, + 635 + ], + "type": "text", + "content": "[85] Zhixing Zhang, Bichen Wu, Xiaoyan Wang, Yaqiao Luo, Luxin Zhang, Yinan Zhao, Peter Vajda, Dimitris Metaxas, and Licheng Yu. Avid: Any-length video inpainting with diffusion model. arXiv preprint arXiv:2312.03816, 2023. 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 636, + 296, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 636, + 296, + 669 + ], + "spans": [ + { + "bbox": [ + 56, + 636, + 296, + 669 + ], + "type": "text", + "content": "[86] Zhenghao Zhang, Zuozhuo Dai, Long Qin, and Weizhi Wang. Effived: Efficient video editing via text-instruction diffusion models. arXiv preprint arXiv:2403.11568, 2024. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 670, + 296, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 670, + 296, + 711 + ], + "spans": [ + { + "bbox": [ + 56, + 670, + 296, + 711 + ], + "type": "text", + "content": "[87] Yang Zhao, Zhijie Lin, Daquan Zhou, Zilong Huang, Jiashi Feng, and Bingyi Kang. Bubogpt: Enabling visual grounding in multi-modal llms. arXiv preprint arXiv:2307.08581, 2023. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 153, + 70, + 457, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 70, + 457, + 108 + ], + "spans": [ + { + "bbox": [ + 153, + 70, + 457, + 108 + ], + "type": "text", + "content": "VEGGIE: Instructional Editing and Reasoning Video Concepts with Grounded Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 115, + 376, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 115, + 376, + 131 + ], + "spans": [ + { + "bbox": [ + 233, + 115, + 376, + 131 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 144, + 121, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 121, + 159 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 121, + 159 + ], + "type": "text", + "content": "6. Appendix" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 166, + 234, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 166, + 234, + 178 + ], + "spans": [ + { + "bbox": [ + 55, + 166, + 234, + 178 + ], + "type": "text", + "content": "In this Appendix, we provide extra details on" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 181, + 295, + 274 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 55, + 181, + 295, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 181, + 295, + 202 + ], + "spans": [ + { + "bbox": [ + 55, + 181, + 295, + 202 + ], + "type": "text", + "content": "- Implementation details of VEGGIE training, and evaluation and baseline evaluations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 205, + 295, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 205, + 295, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 295, + 239 + ], + "type": "text", + "content": "- Extra details on our data generation pipeline, including each module's details, prompts for each promptable module, data filtering, and visualization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 241, + 294, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 241, + 294, + 263 + ], + "spans": [ + { + "bbox": [ + 56, + 241, + 294, + 263 + ], + "type": "text", + "content": "- Extra visualizations for each task and the comparison with the other 6 strong baseline models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 265, + 221, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 265, + 221, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 265, + 221, + 274 + ], + "type": "text", + "content": "- Limitation and future work discussion." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 286, + 188, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 286, + 188, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 286, + 188, + 300 + ], + "type": "text", + "content": "6.1. Implementation Details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 304, + 296, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 304, + 296, + 483 + ], + "spans": [ + { + "bbox": [ + 55, + 304, + 296, + 483 + ], + "type": "text", + "content": "Model Architecture. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. It is a strong MLLM consisting of Qwen2 [73] LLM with 32K context window, SigLIP [80] visual encoder, and a 2-layer-MLP projector. LLaVA-OV can handle diverse visual-language tasks (including interleaved-frame, video). It provides a good starting point for our VEGGIE to understand complex user instructions and can respond with multiple frame-wise implicit planning thanks to its long context window. Our video diffusion model is initialized from the instructional image editing model, MagicBrush [83]. We further inflated 2D convolution layers to 3D form and inserted temporal attention layers followingAnimateDiff [22] to adapt videos. Our alignment network is a single-layer MLP. We set 32 grounded task tokens for each frame." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 485, + 296, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 485, + 296, + 664 + ], + "spans": [ + { + "bbox": [ + 55, + 485, + 296, + 664 + ], + "type": "text", + "content": "Training Details. Our MLLM is initialized with LLaVA-OneVision-7B (LLaVA-OV) [38]. Our VidDM is initialized from the instructional image editing model, MagicBrush [83] with Stable Diffusion v1.5 backbone [57]. We further inflated 2D convolution layers with temporal attention layers, followingAnimateDiff [22] to adapt videos. Our VEGGIE adopts a 2-stage curriculum training strategy (Sec. 3.2). In the first stage, we fully fine-tune the 2D convolution layers in the UNet, the alignment network, and the task query tokens in the MLLM on image data, with 862M trainable parameters. In the second stage, we train all 3 dimensions in the UNet, the alignment network, the task query tokens, and a LoRA in the MLLM, leading to 1.3B trainable parameters. Both stages are trained end-to-end with only a diffusion loss. More details are in the Appendix." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "content": "We keep the VAE encoder and decoder frozen during the entire training process. In the first stage, we keep the MLLM (including visual encoder, MLP projector, and LLM) frozen, and fully fine-tune learnable grounded task queries," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 145, + 555, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 145, + 555, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 145, + 555, + 312 + ], + "type": "text", + "content": "alignment network, and diffusion model, leading to around 800M training parameters. We set " + }, + { + "bbox": [ + 313, + 145, + 555, + 312 + ], + "type": "inline_equation", + "content": "1e^{-4}" + }, + { + "bbox": [ + 313, + 145, + 555, + 312 + ], + "type": "text", + "content": " learning rate, and 96 batch size on each GPU. We use 16 A100 GPUs for the first stage of fine-tuning with 25K steps. In the second stage, we insert LoRA [27] modules into the LLM backbone, and inflate diffusion models by inserting extra temporal layers as inAnimateDiff [22]. We fine-tune LoRA, alignment network, learnable grounded task query tokens, and the diffusion model, leading to around 1.3B trainable parameters. We set " + }, + { + "bbox": [ + 313, + 145, + 555, + 312 + ], + "type": "inline_equation", + "content": "5e^{-4}" + }, + { + "bbox": [ + 313, + 145, + 555, + 312 + ], + "type": "text", + "content": " learning rate, and 1 batch size with 8 gradient accumulation steps on 32 A100 GPUs. For LoRA, we set lora rank 64, lora alpha 16, and lora dropout 0.05. We train the second stage video model 2.5K step with 8 uniformly sampled frames." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 316, + 556, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 316, + 556, + 399 + ], + "spans": [ + { + "bbox": [ + 313, + 316, + 556, + 399 + ], + "type": "text", + "content": "Evaluation and Baseline Details. We primarily compare our model with strong instructional editing models [9, 19, 66]. Additionally, we include non-instructional editing models [10, 20, 41] for completeness, although these are not fair baselines since they are not end-to-end and rely on additional conditions, such as depth maps or intermediate captions." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "spans": [ + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "content": "We randomly sample 3 seeds for both our method and baseline methods. In our experiments, we use different classifier-free guidance scores (" + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "inline_equation", + "content": "g_{T}" + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "inline_equation", + "content": "g_{V}" + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "content": " in Sec. 3.2) for different skills. Specifically, we set " + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "inline_equation", + "content": "g_{T} = 14.5" + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "inline_equation", + "content": "g_{V} = 1.5" + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "content": " for grounding and reasoning segmentation, while for other editing skills, we use " + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "inline_equation", + "content": "g_{T} = 10.5" + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "inline_equation", + "content": "g_{V} = 2.0" + }, + { + "bbox": [ + 313, + 403, + 555, + 475 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 477, + 555, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 477, + 555, + 537 + ], + "spans": [ + { + "bbox": [ + 313, + 477, + 555, + 537 + ], + "type": "text", + "content": "For baseline methods, we adopt their default settings (e.g., diffusion steps, guidance scores, frame numbers) as provided in their GitHub repositories. To ensure fair evaluation, we sample the same eight frames from each method's video editing results." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 540, + 556, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 556, + 612 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 556, + 612 + ], + "type": "text", + "content": "For alignment and smoothness metrics, we use CLIP-B/32 to measure text-image and image-image similarity, averaging across all frames to obtain video-level scores. For detection metrics, we use GroundingDINO (Swin-T OGC) to detect target objects frame by frame, averaging confidence scores across all frames for the final video-level metric." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 614, + 556, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 614, + 556, + 651 + ], + "spans": [ + { + "bbox": [ + 313, + 614, + 556, + 651 + ], + "type": "text", + "content": "For the removal task, where fewer detected objects and lower alignment with the original text prompt are desired, we compute alignment and detection metrics as " + }, + { + "bbox": [ + 313, + 614, + 556, + 651 + ], + "type": "inline_equation", + "content": "1 -" + }, + { + "bbox": [ + 313, + 614, + 556, + 651 + ], + "type": "text", + "content": " value." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 654, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 556, + 714 + ], + "type": "text", + "content": "We compare the model judged best for each video sample. The agreement between human and MLLM judgments is 0.74, whereas the agreement between human and CLIP is only 0.45. We conducted 5 times of the MLLM evaluation and took an average." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 294, + 184 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 294, + 184 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 294, + 184 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 294, + 184 + ], + "type": "table", + "html": "
MethodsGroundingReasoning
JFJ&FJFJ&F
Segmentation Models
HTR [50]47.1147.6047.3520.0128.0224.01
VideoLISA [1]53.2354.3753.8038.4839.2038.84
MoRA [12]57.7353.6355.6838.9237.4840.36
Generative Editing Models
InstructDiff [19]19.8812.8116.3514.028.0711.05
InsV2V [9]13.8917.3715.6316.8910.4513.67
VEGGIE (Ours)37.7421.8329.7922.5315.9719.25
", + "image_path": "efd4caf16a889c87b28d59462f54107cedcabbbf03aacab091df80dc1de2ac82.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 192, + 296, + 226 + ], + "lines": [ + { + "bbox": [ + 55, + 192, + 296, + 226 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 296, + 226 + ], + "type": "text", + "content": "Table 4. Comparison of video concept grounding and reasoning segmentation tasks with other instructional generative models and expert segmentation models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 251, + 187, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 251, + 187, + 264 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 187, + 264 + ], + "type": "text", + "content": "6.2. Data Collection Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 272, + 296, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 272, + 296, + 320 + ], + "spans": [ + { + "bbox": [ + 55, + 272, + 296, + 320 + ], + "type": "text", + "content": "As mentioned in the earlier Sec. 3.3, beyond collecting existing data, we proposed a novel data synthesis pipeline to generate instructional video data by animating images in the instructional image dataset." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 323, + 296, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 323, + 296, + 359 + ], + "spans": [ + { + "bbox": [ + 55, + 323, + 296, + 359 + ], + "type": "text", + "content": "Specifically, we first select images from Omni-Edit [64], an instructional image editing dataset with carefully designed tasks/skills." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 361, + 296, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 361, + 296, + 470 + ], + "spans": [ + { + "bbox": [ + 54, + 361, + 296, + 470 + ], + "type": "text", + "content": "We first use QWen2-VL [61] to caption the original image and give an animation prompt to animate the image via CogVideX1.5-I2V [74]. Please refer Tab. 5 and Tab. 6 to our prompt for caption and animation. After getting the animated video, we utilize AnyV2V [35] to edit the video based on the reference image (edited image from image dataset). The reference image gives a strong prior to maintaining the image dataset's high-quality edit and thus transfer it to the video via the video editing model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 473, + 297, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 473, + 297, + 558 + ], + "spans": [ + { + "bbox": [ + 55, + 473, + 297, + 558 + ], + "type": "text", + "content": "Next, we filter out videos by evaluating VBenchmark metrics [30], including aesthetic quality, motion smoothness, image quality, subject consistency, and background consistency. We set thresholds at 0.6 for aesthetic quality, 65 for imaging quality, 0.9 for motion smoothness, subject consistency, and background consistency. We provide our generated data visualization in Fig. 9." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 574, + 259, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 574, + 259, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 574, + 259, + 586 + ], + "type": "text", + "content": "6.3. More Quantative Results & Discussion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 594, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 296, + 715 + ], + "type": "text", + "content": "Video Concept Grounding & Reasoning Segmentation We include additional results on video concept grounding and reasoning segmentation in Tab. 4. VEGGIE outperforms the diffusion-based baseline by a significant margin, showcasing its superior ability to accurately locate fine-grained object references and handle complex reasoning tasks. We hypothesize that through grounded generation, VEGGIE demonstrates remarkable precision in concept editing. For example, as shown in Fig. 11 in the Appendix, VEGGIE can remove the woman without altering the nearby girl." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 335, + 72, + 535, + 220 + ], + "blocks": [ + { + "bbox": [ + 335, + 72, + 535, + 220 + ], + "lines": [ + { + "bbox": [ + 335, + 72, + 535, + 220 + ], + "spans": [ + { + "bbox": [ + 335, + 72, + 535, + 220 + ], + "type": "image", + "image_path": "f3c07174590d1a47445555b05acfd26b07ef6ca5174fecceef5bc55c8bdd1138.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 228, + 555, + 251 + ], + "lines": [ + { + "bbox": [ + 313, + 228, + 555, + 251 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 555, + 251 + ], + "type": "text", + "content": "Figure 8. t-SNE Visualization of different task query distribution. Different colors represent different tasks/skills. Best view in color." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 274, + 476, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 274, + 476, + 286 + ], + "spans": [ + { + "bbox": [ + 313, + 274, + 476, + 286 + ], + "type": "text", + "content": "6.4. Limitation and Future Works" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 292, + 555, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 292, + 555, + 412 + ], + "spans": [ + { + "bbox": [ + 313, + 292, + 555, + 412 + ], + "type": "text", + "content": "Our current method, VEGGIE, is built upon Stable-Diffusion 1.5, which inevitably constrains its editing quality compared to cutting-edge video generation models that rely on DiT or flow-based architectures. In addition, the video outputs we produce are relatively short, lagging behind some recent state-of-the-art methods in terms of length and temporal consistency. Furthermore, we observe increased editing artifacts when incorporating large amounts of grounding data, suggesting that multi-task data mixture strategies play a key role in maintaining high-quality edits." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 413, + 556, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 413, + 556, + 544 + ], + "spans": [ + { + "bbox": [ + 313, + 413, + 556, + 544 + ], + "type": "text", + "content": "Despite these limitations, our results demonstrate promising directions for improvement in terms of model design, data curation, and evaluation. Future work could explore integrating more advanced base architectures (e.g., DiT [34, 74] or flow-based models), extending the maximum video duration, developing more systematic data [28] with more advanced method [46] and carefully designed mixture strategies to balance fidelity and flexibility, and conducting scalable training. We hope our findings will inspire further research into these directions, pushing the boundaries of instructional video editing performance." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 545, + 557, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 557, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 557, + 713 + ], + "type": "text", + "content": "Task Query Visualization & Analysis via t-SNE. To analyze task/skill correlations, we project their grounded queries into lower-dimensional spaces using PCA and t-SNE. As shown in Fig. 8, distinct clusters form for each category (e.g., Addition), indicating effective differentiation by the model. Reasoning and Grounding appear together on the right. It may be because they both require cognitive/semantic understanding or logical reference. Color, Env, and Change clusters are closer to each other, indicating that the model views them as similar operations focusing on changing different visual attributes. Style lies in the lower-left region but remains relatively close to Color, Env, and Change. This proximity may reflect that \"stylization\" is conceptually similar to these visual attribute tasks, although it targets different" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 154, + 553, + 293 + ], + "blocks": [ + { + "bbox": [ + 219, + 137, + 391, + 148 + ], + "lines": [ + { + "bbox": [ + 219, + 137, + 391, + 148 + ], + "spans": [ + { + "bbox": [ + 219, + 137, + 391, + 148 + ], + "type": "text", + "content": "Table 5. Qwen2-VL prompt for Image caption." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 154, + 553, + 293 + ], + "lines": [ + { + "bbox": [ + 57, + 154, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 57, + 154, + 553, + 293 + ], + "type": "table", + "html": "
Please describe this image shortly, try to capture main details in the image.\nHere are some examples of image caption styles:\n1. A Couple In A Public Display Of Affection\n2. A kitten turning its head on a wooden floor\n3. An Old Man Doing Exercises For The Body And Mind\n4. Man Walking\nNow, please describe the given image briefly in one sentence, please do not say something like 'The image shows...' or 'The image depicts...'
", + "image_path": "06c86a484814fdc576446edcb73b36614248174c9f2efbcb21db237e6b677342.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 318, + 297, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 318, + 297, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 318, + 297, + 403 + ], + "type": "text", + "content": "transformations. Removal stands apart on the top, especially distant from Addition, indicating the model perceives them as distinct rather than inverse operations. In contrast, Addition lies closer to tasks like Reasoning and Grounding. It suggests that the act of adding elements may rely on similar semantic or referential processes (e.g., deciding what to add and how to reference the newly added element)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 410, + 170, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 410, + 170, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 410, + 170, + 422 + ], + "type": "text", + "content": "6.5. Extra Visualization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 427, + 249, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 427, + 249, + 439 + ], + "spans": [ + { + "bbox": [ + 55, + 427, + 249, + 439 + ], + "type": "text", + "content": "We provide extra visualization in Figs. 10 to 16" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 129, + 93, + 481, + 200 + ], + "blocks": [ + { + "bbox": [ + 129, + 93, + 481, + 200 + ], + "lines": [ + { + "bbox": [ + 129, + 93, + 481, + 200 + ], + "spans": [ + { + "bbox": [ + 129, + 93, + 481, + 200 + ], + "type": "image", + "image_path": "8e960bad5a16bce703b5fcb70a569c7d16770762185ba1588063f1036611943d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 129, + 211, + 481, + 318 + ], + "blocks": [ + { + "bbox": [ + 129, + 211, + 481, + 318 + ], + "lines": [ + { + "bbox": [ + 129, + 211, + 481, + 318 + ], + "spans": [ + { + "bbox": [ + 129, + 211, + 481, + 318 + ], + "type": "image", + "image_path": "735c86bcdb4d0957b6364d6294b5509271fe312247cae910fd452e2351e8cbed.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 129, + 323, + 481, + 434 + ], + "blocks": [ + { + "bbox": [ + 129, + 323, + 481, + 434 + ], + "lines": [ + { + "bbox": [ + 129, + 323, + 481, + 434 + ], + "spans": [ + { + "bbox": [ + 129, + 323, + 481, + 434 + ], + "type": "image", + "image_path": "df0f11d18bdfdc3b9c21b6fa529d5ccddccb0ca06469997d62359d8a0bfa75fb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 129, + 440, + 481, + 536 + ], + "blocks": [ + { + "bbox": [ + 129, + 440, + 481, + 536 + ], + "lines": [ + { + "bbox": [ + 129, + 440, + 481, + 536 + ], + "spans": [ + { + "bbox": [ + 129, + 440, + 481, + 536 + ], + "type": "image", + "image_path": "b679e36fe777360ebc2be8336cb67c250d84b550eb22f8ebeb7bb187463a7703.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 539, + 409, + 550 + ], + "lines": [ + { + "bbox": [ + 216, + 539, + 409, + 550 + ], + "spans": [ + { + "bbox": [ + 216, + 539, + 409, + 550 + ], + "type": "text", + "content": "Instruction: transform the setting to a snowy scene" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 129, + 557, + 481, + 604 + ], + "blocks": [ + { + "bbox": [ + 129, + 557, + 481, + 604 + ], + "lines": [ + { + "bbox": [ + 129, + 557, + 481, + 604 + ], + "spans": [ + { + "bbox": [ + 129, + 557, + 481, + 604 + ], + "type": "image", + "image_path": "f3bc012cf2e6a9d62134a6b3ee005b89853723b1c73534b08c36befb6f9ba667.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 129, + 609, + 481, + 667 + ], + "blocks": [ + { + "bbox": [ + 129, + 609, + 481, + 667 + ], + "lines": [ + { + "bbox": [ + 129, + 609, + 481, + 667 + ], + "spans": [ + { + "bbox": [ + 129, + 609, + 481, + 667 + ], + "type": "image", + "image_path": "96751fe61944764a81c1e8306ccadab6aa99b31163bed4354804d679da1bd518.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 677, + 430, + 689 + ], + "lines": [ + { + "bbox": [ + 179, + 677, + 430, + 689 + ], + "spans": [ + { + "bbox": [ + 179, + 677, + 430, + 689 + ], + "type": "text", + "content": "Figure 9. Examples of our generated instructional video editing data." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 192, + 146, + 416, + 158 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 146, + 416, + 158 + ], + "spans": [ + { + "bbox": [ + 192, + 146, + 416, + 158 + ], + "type": "text", + "content": "Table 6. Qwen2-VL prompt for generating animation prompt." + } + ] + } + ], + "index": 0, + "type": "text" + }, + { + "bbox": [ + 70, + 171, + 533, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 171, + 533, + 196 + ], + "spans": [ + { + "bbox": [ + 70, + 171, + 533, + 196 + ], + "type": "text", + "content": "I want to animate this image using an Image-Text-to-Video model. Your task is to generate a detailed and reasonable text prompt that describes how the image should be animated." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 208, + 119, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 208, + 119, + 218 + ], + "spans": [ + { + "bbox": [ + 70, + 208, + 119, + 218 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 232, + 533, + 292 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 70, + 232, + 533, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 232, + 533, + 255 + ], + "spans": [ + { + "bbox": [ + 70, + 232, + 533, + 255 + ], + "type": "text", + "content": "1. Clarity & Realism - The animation description should be logical based on the given image, ensuring the movement makes sense for the scene." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 267, + 533, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 267, + 533, + 292 + ], + "spans": [ + { + "bbox": [ + 70, + 267, + 533, + 292 + ], + "type": "text", + "content": "2. Short & Vivid Description - Use expressive language to guide the animation model effectively, ensuring high-quality and visually engaging results." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 304, + 533, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 304, + 533, + 327 + ], + "spans": [ + { + "bbox": [ + 70, + 304, + 533, + 327 + ], + "type": "text", + "content": "Ensure that your animation prompt aligns with the content of the provided image and describes a visually compelling motion sequence." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 339, + 446, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 339, + 446, + 351 + ], + "spans": [ + { + "bbox": [ + 70, + 339, + 446, + 351 + ], + "type": "text", + "content": "Do not output animation prompts that contain objects/scenes not included in the given image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 363, + 262, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 363, + 262, + 375 + ], + "spans": [ + { + "bbox": [ + 70, + 363, + 262, + 375 + ], + "type": "text", + "content": "Make sure the prompt is short in 1-2 sentences." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 58, + 416, + 552, + 681 + ], + "blocks": [ + { + "bbox": [ + 58, + 416, + 552, + 681 + ], + "lines": [ + { + "bbox": [ + 58, + 416, + 552, + 681 + ], + "spans": [ + { + "bbox": [ + 58, + 416, + 552, + 681 + ], + "type": "image", + "image_path": "cff8fe39e7c04b9e27b42c5b598f301ac5e411f00152bdff7bf3bb1b67d1da5a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 215, + 690, + 393, + 701 + ], + "lines": [ + { + "bbox": [ + 215, + 690, + 393, + 701 + ], + "spans": [ + { + "bbox": [ + 215, + 690, + 393, + 701 + ], + "type": "text", + "content": "Figure 10. More Examples of Concept Addition." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 124, + 218, + 485, + 229 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 218, + 485, + 229 + ], + "spans": [ + { + "bbox": [ + 124, + 218, + 485, + 229 + ], + "type": "text", + "content": "Table 7. GPT-4o prompt for MLLM-as-a-Judge for automatic instructional video editing evaluation." + } + ] + } + ], + "index": 0, + "type": "text" + }, + { + "bbox": [ + 71, + 243, + 94, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 243, + 94, + 254 + ], + "spans": [ + { + "bbox": [ + 71, + 243, + 94, + 254 + ], + "type": "text", + "content": "User" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 256, + 533, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 256, + 533, + 279 + ], + "spans": [ + { + "bbox": [ + 71, + 256, + 533, + 279 + ], + "type": "text", + "content": "You are an evaluator for instructional video editing tasks. Your job is to assess how well the edited video fulfills the user's specific instructions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 281, + 130, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 281, + 130, + 290 + ], + "spans": [ + { + "bbox": [ + 72, + 281, + 130, + 290 + ], + "type": "text", + "content": "I will provide:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 293, + 245, + 327 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 72, + 293, + 201, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 293, + 201, + 303 + ], + "spans": [ + { + "bbox": [ + 72, + 293, + 201, + 303 + ], + "type": "text", + "content": "1. The original video (first GIF)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 304, + 207, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 304, + 207, + 315 + ], + "spans": [ + { + "bbox": [ + 72, + 304, + 207, + 315 + ], + "type": "text", + "content": "2. The edited video (second GIF)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 316, + 245, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 316, + 245, + 327 + ], + "spans": [ + { + "bbox": [ + 72, + 316, + 245, + 327 + ], + "type": "text", + "content": "3. The user's instruction: [user instruction]" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 72, + 329, + 315, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 329, + 315, + 339 + ], + "spans": [ + { + "bbox": [ + 72, + 329, + 315, + 339 + ], + "type": "text", + "content": "Please evaluate the editing result using the following format:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 72, + 340, + 264, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 340, + 264, + 351 + ], + "spans": [ + { + "bbox": [ + 72, + 340, + 264, + 351 + ], + "type": "text", + "content": "INSTRUCTION: [Repeat the user's instruction]" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 72, + 352, + 138, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 352, + 138, + 361 + ], + "spans": [ + { + "bbox": [ + 72, + 352, + 138, + 361 + ], + "type": "text", + "content": "EVALUATION:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 71, + 364, + 249, + 411 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 71, + 364, + 222, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 364, + 222, + 375 + ], + "spans": [ + { + "bbox": [ + 71, + 364, + 222, + 375 + ], + "type": "text", + "content": "- Accuracy score (1-10): [Your score]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 71, + 376, + 214, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 376, + 214, + 387 + ], + "spans": [ + { + "bbox": [ + 71, + 376, + 214, + 387 + ], + "type": "text", + "content": "- Quality score (1-10): [Your score]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 71, + 388, + 249, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 388, + 249, + 399 + ], + "spans": [ + { + "bbox": [ + 71, + 388, + 249, + 399 + ], + "type": "text", + "content": "- Appropriateness score (1-10): [Your score]" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 71, + 400, + 234, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 400, + 234, + 411 + ], + "spans": [ + { + "bbox": [ + 71, + 400, + 234, + 411 + ], + "type": "text", + "content": "- Overall score (1-10): [Your final score]" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 71, + 423, + 535, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 423, + 535, + 446 + ], + "spans": [ + { + "bbox": [ + 71, + 423, + 535, + 446 + ], + "type": "text", + "content": "EXPLANATION: [Provide a brief justification for your scores, highlighting specific strengths and weaknesses of the edit]" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 72, + 448, + 333, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 448, + 333, + 459 + ], + "spans": [ + { + "bbox": [ + 72, + 448, + 333, + 459 + ], + "type": "text", + "content": "RECOMMENDATION: [Optional suggestions for improvement]" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 72, + 472, + 170, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 472, + 170, + 482 + ], + "spans": [ + { + "bbox": [ + 72, + 472, + 170, + 482 + ], + "type": "text", + "content": "When scoring, consider:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 71, + 483, + 533, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 483, + 533, + 507 + ], + "spans": [ + { + "bbox": [ + 71, + 483, + 533, + 507 + ], + "type": "text", + "content": "- Accuracy: Does the edit precisely follow the given instruction? - Quality: Is the edit visually seamless and natural-looking? - Appropriateness: Does the edit maintain coherence with the original video context?" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 72, + 519, + 153, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 519, + 153, + 529 + ], + "spans": [ + { + "bbox": [ + 72, + 519, + 153, + 529 + ], + "type": "text", + "content": "The overall scale is:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 72, + 531, + 324, + 578 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 72, + 531, + 222, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 531, + 222, + 542 + ], + "spans": [ + { + "bbox": [ + 72, + 531, + 222, + 542 + ], + "type": "text", + "content": "1-3: Poor - Major issues with the edit" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 72, + 544, + 324, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 544, + 324, + 554 + ], + "spans": [ + { + "bbox": [ + 72, + 544, + 324, + 554 + ], + "type": "text", + "content": "4-6: Acceptable - Follows instruction but with noticeable flaws" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 72, + 555, + 274, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 555, + 274, + 566 + ], + "spans": [ + { + "bbox": [ + 72, + 555, + 274, + 566 + ], + "type": "text", + "content": "7-8: Good - Clear, effective edit with minor issues" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 72, + 567, + 290, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 567, + 290, + 578 + ], + "spans": [ + { + "bbox": [ + 72, + 567, + 290, + 578 + ], + "type": "text", + "content": "9-10: Excellent - Flawless execution of the instruction" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 72, + 591, + 113, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 591, + 113, + 601 + ], + "spans": [ + { + "bbox": [ + 72, + 591, + 113, + 601 + ], + "type": "text", + "content": "Assistant" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 72, + 603, + 229, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 603, + 229, + 614 + ], + "spans": [ + { + "bbox": [ + 72, + 603, + 229, + 614 + ], + "type": "text", + "content": "Scores, Explanation, Recommendation" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 86, + 232, + 352 + ], + "blocks": [ + { + "bbox": [ + 59, + 86, + 232, + 352 + ], + "lines": [ + { + "bbox": [ + 59, + 86, + 232, + 352 + ], + "spans": [ + { + "bbox": [ + 59, + 86, + 232, + 352 + ], + "type": "image", + "image_path": "898b5c247cabf3586adb7f29b21e553779aa2d29ee0be6fa5155a19233834d09.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 360, + 394, + 372 + ], + "lines": [ + { + "bbox": [ + 216, + 360, + 394, + 372 + ], + "spans": [ + { + "bbox": [ + 216, + 360, + 394, + 372 + ], + "type": "text", + "content": "Figure 11. More Examples of Concept Removal." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 234, + 87, + 392, + 351 + ], + "blocks": [ + { + "bbox": [ + 234, + 87, + 392, + 351 + ], + "lines": [ + { + "bbox": [ + 234, + 87, + 392, + 351 + ], + "spans": [ + { + "bbox": [ + 234, + 87, + 392, + 351 + ], + "type": "image", + "image_path": "a2d9cf4b40f420ebc7ee5761705d95f8a45f39fffee54758e9f4300b770235fa.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 392, + 86, + 553, + 351 + ], + "blocks": [ + { + "bbox": [ + 392, + 86, + 553, + 351 + ], + "lines": [ + { + "bbox": [ + 392, + 86, + 553, + 351 + ], + "spans": [ + { + "bbox": [ + 392, + 86, + 553, + 351 + ], + "type": "image", + "image_path": "dff3a5c584a585ae6935604dbf379d8dda7efa447ac5855c81317330be2c93ed.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 409, + 232, + 674 + ], + "blocks": [ + { + "bbox": [ + 58, + 409, + 232, + 674 + ], + "lines": [ + { + "bbox": [ + 58, + 409, + 232, + 674 + ], + "spans": [ + { + "bbox": [ + 58, + 409, + 232, + 674 + ], + "type": "image", + "image_path": "3f4b79d95de3df3c6f13c06609009e729bf430bbafcfbde1a4c1fa43b302c53d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 683, + 391, + 696 + ], + "lines": [ + { + "bbox": [ + 219, + 683, + 391, + 696 + ], + "spans": [ + { + "bbox": [ + 219, + 683, + 391, + 696 + ], + "type": "text", + "content": "Figure 12. More Examples of Object Changes." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 234, + 409, + 392, + 674 + ], + "blocks": [ + { + "bbox": [ + 234, + 409, + 392, + 674 + ], + "lines": [ + { + "bbox": [ + 234, + 409, + 392, + 674 + ], + "spans": [ + { + "bbox": [ + 234, + 409, + 392, + 674 + ], + "type": "image", + "image_path": "c8db04c3c4081c16c71effca6b95f22414b3b755bb96289cd405d314e86121e6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 392, + 409, + 553, + 674 + ], + "blocks": [ + { + "bbox": [ + 392, + 409, + 553, + 674 + ], + "lines": [ + { + "bbox": [ + 392, + 409, + 553, + 674 + ], + "spans": [ + { + "bbox": [ + 392, + 409, + 553, + 674 + ], + "type": "image", + "image_path": "f34bb0162cf39f27cfe5a820676923452b4f9c7ec312b17e8153f0754ef01982.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 86, + 232, + 350 + ], + "blocks": [ + { + "bbox": [ + 58, + 86, + 232, + 350 + ], + "lines": [ + { + "bbox": [ + 58, + 86, + 232, + 350 + ], + "spans": [ + { + "bbox": [ + 58, + 86, + 232, + 350 + ], + "type": "image", + "image_path": "65f4482a764a319b6cd11cf875fc5fe37281b580a14bcf9f6fbdb6669e30fcb8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 228, + 360, + 382, + 372 + ], + "lines": [ + { + "bbox": [ + 228, + 360, + 382, + 372 + ], + "spans": [ + { + "bbox": [ + 228, + 360, + 382, + 372 + ], + "type": "text", + "content": "Figure 13. More Examples of Stylization." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 234, + 86, + 392, + 351 + ], + "blocks": [ + { + "bbox": [ + 234, + 86, + 392, + 351 + ], + "lines": [ + { + "bbox": [ + 234, + 86, + 392, + 351 + ], + "spans": [ + { + "bbox": [ + 234, + 86, + 392, + 351 + ], + "type": "image", + "image_path": "859b2b54f7e4ea3e5f151b40abda2a47c11a5e041a6aaf089e7c57ae94b7af8c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 392, + 86, + 553, + 351 + ], + "blocks": [ + { + "bbox": [ + 392, + 86, + 553, + 351 + ], + "lines": [ + { + "bbox": [ + 392, + 86, + 553, + 351 + ], + "spans": [ + { + "bbox": [ + 392, + 86, + 553, + 351 + ], + "type": "image", + "image_path": "eb5bbecd6f86f818c07a1fe8931a93f16ea43d8ea49eec4634c2689b86050898.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 408, + 232, + 674 + ], + "blocks": [ + { + "bbox": [ + 58, + 408, + 232, + 674 + ], + "lines": [ + { + "bbox": [ + 58, + 408, + 232, + 674 + ], + "spans": [ + { + "bbox": [ + 58, + 408, + 232, + 674 + ], + "type": "image", + "image_path": "b124f1339966a72d4c67b2ab4b76301ff8a7af2fc60b52d2755cbe999b6fb7fa.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 683, + 430, + 695 + ], + "lines": [ + { + "bbox": [ + 179, + 683, + 430, + 695 + ], + "spans": [ + { + "bbox": [ + 179, + 683, + 430, + 695 + ], + "type": "text", + "content": "Figure 14. More Examples of Environment and Background Editing." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 234, + 409, + 392, + 674 + ], + "blocks": [ + { + "bbox": [ + 234, + 409, + 392, + 674 + ], + "lines": [ + { + "bbox": [ + 234, + 409, + 392, + 674 + ], + "spans": [ + { + "bbox": [ + 234, + 409, + 392, + 674 + ], + "type": "image", + "image_path": "b8d1ff15c2cfbed82a207580b53d1b76e1ed13920065e6313097883e59d67805.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 392, + 409, + 553, + 674 + ], + "blocks": [ + { + "bbox": [ + 392, + 409, + 553, + 674 + ], + "lines": [ + { + "bbox": [ + 392, + 409, + 553, + 674 + ], + "spans": [ + { + "bbox": [ + 392, + 409, + 553, + 674 + ], + "type": "image", + "image_path": "eddea6f13444e5092d3c40d6739d4fdaba2cf2dc9765b24e0c382a4a6bc599e4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 83, + 553, + 350 + ], + "blocks": [ + { + "bbox": [ + 59, + 83, + 553, + 350 + ], + "lines": [ + { + "bbox": [ + 59, + 83, + 553, + 350 + ], + "spans": [ + { + "bbox": [ + 59, + 83, + 553, + 350 + ], + "type": "image", + "image_path": "42d74fb3a6be2224e7ee95810b48350e42a1b56d9e77e28004e90ee22ded0ed0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 206, + 358, + 403, + 369 + ], + "lines": [ + { + "bbox": [ + 206, + 358, + 403, + 369 + ], + "spans": [ + { + "bbox": [ + 206, + 358, + 403, + 369 + ], + "type": "text", + "content": "Figure 15. More Examples of Visual Features Editing." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 58, + 403, + 552, + 674 + ], + "blocks": [ + { + "bbox": [ + 58, + 403, + 552, + 674 + ], + "lines": [ + { + "bbox": [ + 58, + 403, + 552, + 674 + ], + "spans": [ + { + "bbox": [ + 58, + 403, + 552, + 674 + ], + "type": "image", + "image_path": "4328af1b9cf4afc2d45f60b901b77275b097f605ff43bd121e485851d5fd828d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 215, + 685, + 394, + 696 + ], + "lines": [ + { + "bbox": [ + 215, + 685, + 394, + 696 + ], + "spans": [ + { + "bbox": [ + 215, + 685, + 394, + 696 + ], + "type": "text", + "content": "Figure 16. More Examples of Object Grounding." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 292, + 555, + 468 + ], + "blocks": [ + { + "bbox": [ + 59, + 292, + 555, + 468 + ], + "lines": [ + { + "bbox": [ + 59, + 292, + 555, + 468 + ], + "spans": [ + { + "bbox": [ + 59, + 292, + 555, + 468 + ], + "type": "image", + "image_path": "41ce929097f8588a945d953564c371ef38f463eb2eb718e38767cc9dce0eee6b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 476, + 420, + 488 + ], + "lines": [ + { + "bbox": [ + 189, + 476, + 420, + 488 + ], + "spans": [ + { + "bbox": [ + 189, + 476, + 420, + 488 + ], + "type": "text", + "content": "Figure 17. More Examples of Object Reasoning Segmentation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file