diff --git a/.gitattributes b/.gitattributes index a0347ea6ea0c6057255e14e1f16152b52fc9fbb6..0799978f8c8ad8913f69e8385e1425aafbec23e9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3742,3 +3742,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2025/gRNAde_[[:space:]]Geometric[[:space:]]Deep[[:space:]]Learning[[:space:]]for[[:space:]]3D[[:space:]]RNA[[:space:]]inverse[[:space:]]design/c299f76d-d075-4d18-96c9-c5ab59a25415_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/u-$_mu$P_[[:space:]]The[[:space:]]Unit-Scaled[[:space:]]Maximal[[:space:]]Update[[:space:]]Parametrization/8f19909b-5011-4030-bfce-534524dc855c_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/uniINF_[[:space:]]Best-of-Both-Worlds[[:space:]]Algorithm[[:space:]]for[[:space:]]Parameter-Free[[:space:]]Heavy-Tailed[[:space:]]MABs/7c6dea0d-64f2-4b07-8426-d6f9deaeadcd_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/TimeKAN_[[:space:]]KAN-based[[:space:]]Frequency[[:space:]]Decomposition[[:space:]]Learning[[:space:]]Architecture[[:space:]]for[[:space:]]Long-term[[:space:]]Time[[:space:]]Series[[:space:]]Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/TimeSuite_[[:space:]]Improving[[:space:]]MLLMs[[:space:]]for[[:space:]]Long[[:space:]]Video[[:space:]]Understanding[[:space:]]via[[:space:]]Grounded[[:space:]]Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/Timer-XL_[[:space:]]Long-Context[[:space:]]Transformers[[:space:]]for[[:space:]]Unified[[:space:]]Time[[:space:]]Series[[:space:]]Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/To[[:space:]]CoT[[:space:]]or[[:space:]]not[[:space:]]to[[:space:]]CoT_[[:space:]]Chain-of-thought[[:space:]]helps[[:space:]]mainly[[:space:]]on[[:space:]]math[[:space:]]and[[:space:]]symbolic[[:space:]]reasoning/78080855-33d6-4037-9b8c-edc307a2e575_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/To[[:space:]]Code[[:space:]]or[[:space:]]Not[[:space:]]To[[:space:]]Code_[[:space:]]Exploring[[:space:]]Impact[[:space:]]of[[:space:]]Code[[:space:]]in[[:space:]]Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/To[[:space:]]Tackle[[:space:]]Adversarial[[:space:]]Transferability_[[:space:]]A[[:space:]]Novel[[:space:]]Ensemble[[:space:]]Training[[:space:]]Method[[:space:]]with[[:space:]]Fourier[[:space:]]Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_content_list.json b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..dfa6ac4e221f9566a474b7ee40e7ad266f7795f3 --- /dev/null +++ b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_content_list.json @@ -0,0 +1,1975 @@ +[ + { + "type": "text", + "text": "TIMEKAN: KAN-BASED FREQUENCY DECOMPOSITION LEARNING ARCHITECTURE FOR LONG-TERM TIME SERIES FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 98, + 823, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Songtao Huang $^{1,2}$ , Zhen Zhao $^{1}$ , Can Li $^{3}$ , Lei Bai $^{4}$", + "bbox": [ + 179, + 193, + 545, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Shanghai Artificial Intelligence Laboratory, Shanghai, China", + "bbox": [ + 179, + 209, + 589, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ School of Information Science and Engineering, Lanzhou University, Lanzhou, China", + "bbox": [ + 179, + 223, + 751, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3The Key Laboratory of Road and Traffic Engineering of the Ministry of Education, Tongji University, Shanghai, China", + "bbox": [ + 179, + 238, + 733, + 267 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "huangsongtao@pjlab.org.cn, zhen.zhao@outlook.com,", + "bbox": [ + 179, + 267, + 614, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "lchelen1005@gmail.com, baisanshi@gmail.com", + "bbox": [ + 179, + 281, + 557, + 294 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 330, + 547, + 345 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Real-world time series often have multiple frequency components that are intertwined with each other, making accurate time series forecasting challenging. Decomposing the mixed frequency components into multiple single frequency components is a natural choice. However, the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterization. To address this challenges, inspired by the flexibility of the recent Kolmogorov-Arnold Network (KAN), we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex forecasting challenges caused by multiple frequency mixtures. Specifically, TimeKAN mainly consists of three components: Cascaded Frequency Decomposition (CFD) blocks, Multi-order KAN Representation Learning (M-KAN) blocks and Frequency Mixing blocks. CFD blocks adopt a bottom-up cascading approach to obtain series representations for each frequency band. Benefiting from the high flexibility of KAN, we design a novel M-KAN block to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks is used to recombine the frequency bands into the original format. Extensive experimental results across multiple real-world time series datasets demonstrate that TimeKAN achieves state-of-the-art performance as an extremely lightweight architecture. Code is available at https://github.com/huangst21/TimeKAN.", + "bbox": [ + 228, + 359, + 767, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 662, + 336, + 679 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Time series forecasting (TSF) has garnered significant interest due to its wide range of applications, including finance (Huang et al., 2024), energy management (Yin et al., 2023), traffic flow planning (Jiang & Luo, 2022), and weather forecasting (Lam et al., 2023). Recently, deep learning has led to substantial advancements in TSF, with the most state-of-the-art performances achieved by CNN-based methods (Wang et al., 2023; donghao & wang xue, 2024), Transformer-based methods(Nie et al., 2023; Liu et al., 2024b) and MLP-based methods (Zeng et al., 2023; Wang et al., 2024a).", + "bbox": [ + 169, + 694, + 823, + 779 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Due to the complex nature of the real world, observed multivariate time series are often nonstationary and exhibit diverse patterns. These intertwined patterns complicate the internal relationships within the time series, making it challenging to capture and establish connections between historical observations and future targets. To address the complex temporal patterns in time series, an increasing number of studies focus on leveraging prior knowledge to decompose time series into simpler components that provide a basis for forecasting. For instance, Autoformer (Wu et al., 2021) decomposes time series into seasonal and trend components. This idea is also adopted by DLinear (Zeng et al., 2023) and FEDFormer (Zhou et al., 2022b). Building on this foundation, TimeMixer (Wang et al., 2024a) further introduces multi-scale seasonal-trend decomposition and highlights the importance of interactions between different scales. Recent models like TimesNet (Wu et al.,", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2023), PDF (Dai et al., 2024), and SparseTSF (Lin et al., 2024) emphasize the inherent periodicity in time series and decompose long sequences into multiple shorter ones based on the period length, thereby enabling the separate modeling of inter-period and intra-period dependencies within temporal patterns. In summary, these different decomposition methods share a common goal: utilizing the simplified subsequences to provide critical information for future predictions, thereby achieving accurate forecasting.", + "bbox": [ + 169, + 103, + 823, + 188 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "It is worth noting that time series are often composed of multiple frequency components, where the low-frequency components represent long-term periodic variations and the high-frequency components capture certain abrupt events. The mixture of different frequency components makes accurate forecasting particularly challenging. The aforementioned decomposition approaches motivate us to design a frequency decomposition framework that decouples different frequency components in a time series and independently learns the temporal patterns associated with each frequency. However, this introduces another challenge: the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterizations, resulting in sub-optimal results. Fortunately, a new neural network architecture, known as Kolmogorov-Arnold Networks (KAN) (Liu et al., 2024c), has recently gained significant attention in the deep learning community due to its outstanding data-fitting capabilities and flexibility, showing potential as a substitute for traditional MLP. Compared to MLP, KAN offers optional kernels and allows for the adjustment of kernel order to control its fitting capacity. This consideration leads us to explore the use of Multi-order KANs to represent temporal patterns across different frequencies, thereby providing more accurate information for forecasting.", + "bbox": [ + 169, + 194, + 826, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivated by these observations, we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex prediction challenges caused by multiple frequency mixtures. Specifically, TimeKAN first employs moving average to progressively remove relatively high-frequency components from the sequence. Subsequently, Cascaded Frequency Decomposition (CFD) blocks adopt a bottom-up cascading approach to obtain sequence representations for each frequency band. Multi-order KAN Representation Learning (M-KAN) blocks leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that this Decomposition-Learning-Mixing process is repeatable, thereby modeling different temporal patterns at various frequencies more accurately. The final high-level sequence is then mapped to the desired forecasting output via a simple linear mapping. With our meticulously designed architecture, TimeKAN achieves state-of-the-art performance across multiple long-term time series forecasting tasks, while also being a lightweight architecture that outperforms complex TSF models with fewer computational resources.", + "bbox": [ + 169, + 409, + 826, + 603 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are summarized as follows:", + "bbox": [ + 171, + 609, + 473, + 625 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We revisit time series forecasting from the perspective of frequency decoupling, effectively disentangling time series characteristics through a frequency Decomposition-Learning-Mixing architecture to address challenges caused by complex information coupling in time series.", + "- We introduce TimeKAN as a lightweight yet effective forecasting model and design a novel M-KAN blocks to effectively modeling and representing patterns at different frequencies by maximizing the flexibility of KAN.", + "- TimeKAN demonstrates superior performance across multiple TSF prediction tasks, while having a parameter count significantly lower than that of state-of-the-art TSF models." + ], + "bbox": [ + 215, + 638, + 823, + 781 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 806, + 346, + 821 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 KOLMOGOROV-ARNOLD NETWORK", + "text_level": 1, + "bbox": [ + 171, + 840, + 460, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) leverages this theorem to propose an innovative alternative to traditional MLP. Unlike MLP, which use fixed activation functions at the nodes, KAN introduces", + "bbox": [ + 169, + 867, + 823, + 924 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "learnable activation functions along the edges. Due to the flexibility and adaptability, KAN is considered as a promising alternative to MLP.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The original KAN was parameterized using spline functions. However, due to the inherent complexity of spline functions, the speed and scalability of the original KAN were not satisfactory. Consequently, subsequent research explored the use of simpler basis functions to replace splines, thereby achieving higher efficiency. ChebyshevKAN (SS, 2024) incorporates Chebyshev polynomials to parametrize the learnable functions. FastKAN (Li, 2024) uses faster Gaussian radial basis functions to approximate third-order B-spline functions.", + "bbox": [ + 169, + 138, + 826, + 224 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Moreover, KAN has been applied as alternatives to MLP in various domains. Convolutional KAN (Bodner et al., 2024) replaces the linear weight matrices in traditional convolutional networks with learnable spline function matrices. U-KAN (Li et al., 2024) integrates KAN layers into the U-Net architecture, demonstrating impressive accuracy and efficiency in several medical image segmentation tasks. KAN has also been used to bridge the gap between AI and science. Works such as PIKAN (Shukla et al., 2024) and PINN (Wang et al., 2024b) utilize KAN to build physics-informed machine learning models. This paper aims to introduce KAN into TSF and demonstrate the strong potential of KAN in representing time series data.", + "bbox": [ + 169, + 229, + 826, + 343 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 TIME SERIES FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 361, + 413, + 376 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Traditional time series forecasting (TSF) methods, such as ARIMA (Zhang, 2003), can provide sufficient interpretability for the forecasting results but often fail to achieve satisfactory accuracy. In recent years, deep learning methods have dominated the field of TSF, mainly including CNN-based, Transformer-based, and MLP-based approaches. CNN-based models primarily apply convolution operations along the temporal dimension to extract temporal patterns. For example, MICN (Wang et al., 2023) and TimesNet (Wu et al., 2023) enhance the precision of sequence modeling by adjusting the receptive field to capture both short-term and long-term views within the sequences. ModernTCN (donghao & wang xue, 2024) advocates using large convolution kernels along the temporal dimension and capture both cross-time and cross-variable dependencies. Compared to CNN-based methods, which have limited receptive field, Transformer-based methods offer global modeling capabilities, making them more suitable for handling long and complex sequence data. They have become the cornerstone of modern time series forecasting. Informer (Zhou et al., 2021) is one of the early implementations of Transformer models in TSF, making efficient forecasting possible by carefully modifying the internal Transformer architecture. PatchTST (Nie et al., 2023) divides the sequence into multiple patches along the temporal dimension, which are then fed into the Transformer, establishing it as an important benchmark in the time series domain. In contrast, iTransformer (Liu et al., 2024b) treats each variable as an independent token to capture cross-variable dependencies in multivariate time series. However, Transformer-based methods face challenges due to the large number of parameters and high memory consumption. Recent research on MLP-based methods has shown that with appropriately designed architectures leveraging prior knowledge, simple MLPs can outperform complex Transformer-based methods. DLinear (Zeng et al., 2023), for instance, preprocesses sequences using a trend-season decomposition strategy. FITS (Xu et al., 2024b) performs linear transformations in the frequency domain, while TimeMixer (Wang et al., 2024a) uses MLP to facilitate information interaction at different scales. These MLP-based methods have demonstrated strong performance regarding both forecasting accuracy and efficiency. Unlike the aforementioned methods, this paper introduces the novel KAN to TSF to represent time series data more accurately. It also proposes a well-designed Decomposition-Learning-Mixing architecture to fully unlock the potential of KAN for time series forecasting.", + "bbox": [ + 169, + 388, + 826, + 779 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 TIME SERIES DECOMPOSITION", + "text_level": 1, + "bbox": [ + 171, + 799, + 431, + 811 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Real-world time series often consist of various underlying patterns. To leverage the characteristics of different patterns, recent approaches tend to decompose the series into multiple subcomponents, including trend-seasonal decomposition, multi-scale decomposition, and multi-period decomposition. DLinear (Zeng et al., 2023) employs moving averages to decouple the seasonal and trend components. SCINet (Liu et al., 2022) uses a hierarchical downsampling tree to iteratively extract and exchange information at multiple temporal resolutions. TimeMixer (Wang et al., 2024a) follows a fine-to-coarse principle to decompose the sequence into multiple scales across different", + "bbox": [ + 169, + 825, + 826, + 926 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/60360e90e37b535a3c68ba4c2afa0235d4eda70752a48627a4173e1bc04fa0df.jpg", + "image_caption": [ + "Figure 1: The architecture of TimeKAN, which mainly consists of Cascaded Frequency Decomposition block, Multi-order KAN Representation Learning block, and Frequency Mixing block. Here, we divide the frequency range of the time series into three frequency bands as an example." + ], + "image_footnote": [], + "bbox": [ + 178, + 104, + 823, + 262 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "time spans and further splits each scale into seasonal and periodic components. TimesNet (Wu et al., 2023) and PDF (Dai et al., 2024) utilize Fourier periodic analysis to decouple sequence into multiple sub-period sequences based on the calculated period. Inspired by these works, this paper proposes a novel Decomposition-Learning-Mixing architecture, which examines time series from a multi-frequency perspective to accurately model the complex patterns within time series.", + "bbox": [ + 169, + 359, + 823, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 TIMEKAN", + "text_level": 1, + "bbox": [ + 171, + 450, + 295, + 465 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 OVERALL ARCHITECTURE", + "text_level": 1, + "bbox": [ + 171, + 482, + 398, + 494 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given a historical multivariate time series input $\\mathbf{X} \\in \\mathbb{R}^{N \\times T}$ , the aim of time series forecasting is to predict the future output series $\\mathbf{X}_O \\in \\mathbb{R}^{N \\times F}$ , where $T, F$ is the look-back window length and the future window length, and $N$ represents the number of variates. In this paper, we propose TimeKAN to tackle the challenges arising from the complex mixture of multi-frequency components in time series. The overall architecture of TimeKAN is shown in Figure 1. We adopt variate-independent manner (Nie et al., 2023) to predict each univariate series independently. Each univariate input time series is denoted as $X \\in \\mathbb{R}^T$ and we consider univariate time series as the instance in the following calculation. In our TimeKAN, the first step is to progressively remove the relatively high-frequency components using moving averages and generate multi-level sequences followed by projecting each sequence into a high-dimensional space. Next, adhering to the Decomposition-Learning-Mixing architecture design principle, we first design Cascaded Frequency Decomposition (CFD) blocks to obtain sequence representations for each frequency band, adopting a bottom-up cascading approach. Then, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that the Decomposition-Learning-Mixing process is repeatable. More details about our TimeKAN are described as follow.", + "bbox": [ + 169, + 508, + 826, + 731 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 HIERARCHICAL SEQUENCE PREPROCESSING", + "text_level": 1, + "bbox": [ + 171, + 748, + 524, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Assume that we divide the frequency range of raw time series $X$ into predefined $k$ frequency bands. We first use moving average to progressively remove the relatively high-frequency components and generate multi-level sequences $\\{x_{1},\\dots ,x_{k}\\}$ , where $x_{i}\\in \\mathbb{R}^{\\frac{T}{d^{i - 1}}}\\left(i\\in \\{1,\\dots ,k\\}\\right)$ . $x_{1}$ is equal to the input series $X$ and $d$ denotes the length of moving average window. The process of producing multi-level sequences is as follows:", + "bbox": [ + 169, + 773, + 823, + 848 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nx _ {i} = \\operatorname {A v g P o o l} (\\text {P a d d i n g} (x _ {i - 1})) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 857, + 823, + 873 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After obtaining the multi-level sequences, each sequence is independently embedded into a higher dimension through a Linear layer:", + "bbox": [ + 169, + 880, + 823, + 907 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nx _ {i} = \\operatorname {L i n e a r} \\left(x _ {i}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 909, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $x_{i} \\in \\mathbb{R}_{d^{i - 1}}^{\\frac{T}{T - 1} \\times D}$ and $D$ is embedding dimension. We define $x_{1}$ as the highest level sequence and $x_{k}$ as the lowest level sequence. Notably, each lower-level sequence is derived from the sequence one level higher by removing a portion of the high-frequency information. The above process is a preprocessing process and only occurs once in TimeKAN.", + "bbox": [ + 169, + 102, + 823, + 161 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 CASCADED FREQUENCY DECOMPOSITION", + "text_level": 1, + "bbox": [ + 171, + 178, + 509, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Real-world time series are often composed of multiple frequency components, with the low-frequency component representing long-term changes in the time series and the high-frequency component representing short-term fluctuations or unexpected events. These different frequency components complement each other and provide a comprehensive perspective for accurately modeling time series. Therefore, we design the Cascaded Frequency Decomposition (CFD) block to accurately decompose each frequency component in a cascade way, thus laying the foundation for accurately modeling different frequency components.", + "bbox": [ + 169, + 204, + 823, + 303 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The aim of CFD block is to obtain the representation of each frequency component. Here, we take obtaining the representation of the $i$ -th frequency band as an example. To achieve it, we first employ the Fast Fourier Transform (FFT) to obtain the representation of $x_{i+1}$ in the frequency domain. Then, Zero-Padding is used to extend the length of the frequency domain sequence, so that it can have the same length as the upper sequence $x_i$ after transforming back to the time domain. Next, we use Inverse Fast Fourier Transform (IFFT) to transform it back into the time domain. We refer to this upsampling process as Frequency Upsampling, which ensures that the frequency information remains unchanged before and after the upsampling. The process of Frequency Upsampling can be described as:", + "bbox": [ + 169, + 308, + 823, + 431 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {x} _ {i} = \\operatorname {I F F T} (\\text {P a d d i n g} (\\operatorname {F F T} (x _ {i + 1}))) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 433, + 823, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $\\hat{x}_i$ and $x_i$ have the same sequence length. Notably, compared to $x_i$ , $\\hat{x}_i$ lacks the $i$ -th frequency component. The reason is that $x_{i+1}$ is originally formed by removing $i$ -th frequency component from $x_i$ in the hierarchical sequence preprocessing and $x_{i+1}$ is now transformed into $\\hat{x}_i$ through a lossless frequency conversion process, thereby aligning length with $x_i$ in the time domain. Therefore, to get the series representation of the $i$ -th frequency component $f_i$ in time domain, we only need to get the residuals between $x_i$ and $\\hat{x}_i$ :", + "bbox": [ + 169, + 453, + 823, + 537 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i} = x _ {i} - \\hat {x} _ {i} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 452, + 545, + 823, + 560 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 MULTI-ORDER KAN REPRESENTATION LEARNING", + "text_level": 1, + "bbox": [ + 171, + 577, + 568, + 592 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the multi-level frequency component representation $\\{f_1, \\dots, f_k\\}$ generated by the CFD block, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn specific representations and temporal dependencies at each frequency. M-KAN adopts a dual-branch parallel architecture to separately model temporal representation learning and temporal dependency learning in a frequency-specific way, using Multi-order KANs to learn the representation of each frequency component and employing Depthwise Convolution to capture the temporal dependency. The details of Depthwise Convolution and Multi-order KAN will be given as follows.", + "bbox": [ + 169, + 603, + 823, + 702 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Depthwise Convolution To separate the modeling of temporal dependency from learning sequence representation, we adopt a specific type of group convolution known as Depthwise Convolution, in which the number of groups matches the embedding dimension. Depthwise Convolution employs $D$ groups of convolution kernels to perform independent convolution operations on the series of each channel. This allows the model to focus on capturing temporal patterns without interference from inter channel relationships. The process of Depthwise Convolution is:", + "bbox": [ + 169, + 717, + 823, + 801 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i, 1} = \\operatorname {C o n v} _ {D \\rightarrow D} \\left(f _ {i}, \\text {g r o u p} = D\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 809, + 823, + 825 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multi-order KANs Compared with traditional MLP, KAN replaces linear weights with learnable univariate functions, allowing complex nonlinear relationships to be modeled with fewer parameters and greater interpretability. (Xu et al., 2024a). Assume that KAN is composed of $L + 1$ layer neurons and the number of neurons in layer $l$ is $n_{l}$ . The transmission relationship between the $j$ -th neuron in layer $l + 1$ and all neurons in layer $l$ can be expressed as $z_{l + 1,j} = \\sum_{i = 1}^{n_l}\\phi_{l,j,i}(z_{l,i})$ , where $z_{l + 1,j}$ is the $j$ -th neuron at layer $l + 1$ and $z_{l,i}$ is the $i$ -th neuron at layer $l$ . We can simply understand", + "bbox": [ + 169, + 839, + 825, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "that each neuron is connected to other neurons in the previous layer through a learnable univariate function $\\phi$ . The vanilla KAN (Liu et al., 2024c) employs spline function as the learnable univariate basic functions $\\phi$ , but suffering from the complex recursive computation process, which hinders the efficiency of KAN. Here, we adopt ChebyshevKAN (SS, 2024) to learn the representation of each frequency component, i.e., channel learning. ChebyshevKAN is constructed from linear combinations of Chebyshev polynomial. That is, using the linear combination of Chebyshev polynomial with different order to generate learnable univariate function $\\phi$ . The Chebyshev polynomial is defined by:", + "bbox": [ + 169, + 104, + 826, + 203 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nT _ {n} (x) = \\cos (n \\operatorname {a r c c o s} (x)) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 210, + 823, + 227 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $n$ is the highest order of Chebyshev polynomials and the complexity of Chebyshev polynomials is increasing with increasing order. A 1-layer ChebyshevKAN applied to channel dimension can be expressed as:", + "bbox": [ + 169, + 234, + 823, + 277 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {o} (x) = \\sum_ {j = 1} ^ {D} \\sum_ {i = 0} ^ {n} \\Theta_ {o, j, i} T _ {i} (\\tanh (x _ {j})) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 279, + 825, + 321 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {K A N} (x) = \\left\\{ \\begin{array}{c} \\phi_ {1} (x) \\\\ \\dots \\\\ \\phi_ {D} (x) \\end{array} \\right\\} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 339, + 825, + 382 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $o$ is the index of output neuron and $\\Theta \\in \\mathbb{R}^{D\\times D\\times (n + 1)}$ are the learnable coefficients used to linearly combine the Chebyshev polynomials. It is worth noting that the frequency components within the time series exhibit increasingly complex temporal dynamics as the frequency increases, necessitating a network with stronger representation capabilities to learn these characteristics. ChebyshevKAN allows for the adjustment of the highest order of Chebyshev polynomials $n$ to enhance its representation ability. Therefore, from the low-frequency to high-frequency components, we adopt an increasing order of Chebyshev polynomials to align the frequency components with the complexity of the KAN, thereby accurately learning the representations of different frequency components. We refer to this group of KANs with varying highest Chebyshev polynomials orders as Multi-order KANs. We set an lower bound order $b$ , and the representation learning process for $x_{i}$ can be expressed as:", + "bbox": [ + 169, + 397, + 823, + 551 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i, 2} = \\mathrm {K A N} \\left(f _ {i}, \\text {o r d e r} = b + k - i\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 560, + 823, + 575 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The final output of the M-KAN block is the sum of the outputs from the Multi-order KANs and the Depthwise Convolution.", + "bbox": [ + 169, + 592, + 823, + 619 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {f} _ {i} = f _ {i, 1} + f _ {i, 2} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 622, + 823, + 640 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.5 FREQUENCY MIXING", + "text_level": 1, + "bbox": [ + 171, + 657, + 362, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "After specifically learning the representation of each frequency component, we need to re-transform the frequency representations into the form of multi-level sequences before entering next CFD block, ensuring that the Decomposition-Learning-Mixing process is repeatable. Therefore, we designed Frequency Mixing blocks to convert the frequency component at $i$ -th level $\\hat{f}_i$ into multi-level sequences $x_i$ , enabling it to serve as input for the next CFD block. To transform the frequency component at $i$ -th level $\\hat{f}_i$ into multi-level sequences $x_i$ , we simply need to to supplement the frequency information from levels $i + 1$ to $k$ back into the $i$ -th level. Thus, we employ Frequency Upsampling again to incrementally reintegrate the information into the higher frequency components:", + "bbox": [ + 169, + 684, + 823, + 800 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nx _ {i} = \\operatorname {I F F T} (\\text {P a d d i n g} (\\operatorname {F F T} (x _ {i + 1}))) + f _ {i} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 808, + 823, + 825 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For the last Frequency Mixing block, we extract the highest-level sequence $x_{1}$ and use a simple linear layer to produce the forecasting results $X_{O}$ .", + "bbox": [ + 169, + 833, + 823, + 862 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nX _ {O} = \\operatorname {L i n e a r} \\left(x _ {1}\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 434, + 869, + 823, + 887 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Due to the use of a variate-independent strategy, we also need to stack the predicted results of all variables together to obtain the final multivariate prediction $\\mathbf{X}_{\\mathrm{O}}$ .", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bd2650de916f0276ce0ddda006581d17498b5ea606d1ab136a0c4928828b55c0.jpg", + "table_caption": [ + "Table 1: Full results of the multivariate long-term forecasting result comparison. The input sequence length is set to 96 for all baselines and the prediction lengths $F \\in \\{96, 192, 336, 720\\}$ . Avg means the average results from all four prediction lengths." + ], + "table_footnote": [], + "table_body": "
ModelsTimeKAN OursTimeMixer 2024aiTransformer 2024bTime-FFM 2024aPatchTST 2023TimesNet 2023MICN 2023DLinear 2023FreTS 2024FiLM 2022aFEDformer 2022bAutoformer 2021
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETThI960.3670.3950.3850.4020.3860.4050.3850.4000.4600.4470.3840.4020.4260.4460.3970.4120.3950.4070.4380.4330.3950.4240.4490.459
1920.4140.4200.4430.4300.4410.4360.4390.4300.5120.4770.4360.4290.4540.4640.4460.4410.4900.4770.4940.4660.4690.4700.5000.482
3360.4450.4340.5120.4700.4870.4580.4800.4490.5460.4960.6380.4690.4930.4870.4890.4670.5100.4800.5470.4950.4900.4770.5210.496
7200.4440.4590.4970.4760.5030.4910.4620.4560.5440.5170.5210.5000.5260.5260.5130.5100.5680.5380.5860.5380.5980.5440.5140.512
Avg0.4170.4270.4590.4440.4540.4470.4420.4340.5160.4840.4950.4500.4750.4800.4610.4570.4910.4750.5160.4830.4980.4840.4960.487
ETThI960.2900.3400.2890.3420.2970.3490.3010.3510.3080.3550.3400.3740.3720.4240.3400.3940.3320.3870.3220.3640.3580.3970.3460.388
1920.3750.3920.3780.3970.3800.4000.3780.3970.3930.4050.4020.4140.4920.4920.4820.4790.4510.4570.4050.4140.4290.4390.4560.452
3360.4230.4350.4320.4340.4280.4320.4220.4310.4270.4360.4520.4520.6070.5550.5910.5410.4660.4730.4350.4450.4960.4870.4820.486
7200.4430.4490.4640.4640.4270.4450.4270.4440.4360.4500.4620.4680.8240.6550.8390.6610.4850.4710.4450.4570.4630.4740.5150.511
Avg0.3830.4040.3900.4090.3830.4070.3820.4060.3910.4110.4140.4270.5740.5310.5630.5190.4330.4460.4020.4200.4370.4490.4500.459
ETThI960.3220.3610.3170.3560.3340.3680.3360.3690.3520.3740.3380.3750.3650.3870.3460.3740.3370.3740.3530.3700.3790.4190.5050.475
1920.3570.3830.3670.3840.3770.3910.3780.3890.3900.3930.3740.3870.4030.4080.3820.3910.3820.3980.3870.4260.4410.5530.496
3360.3820.4010.3910.4060.4260.4200.4110.4100.4210.4140.4100.4110.4360.4310.4150.4150.4200.4230.4210.4080.4450.4590.6210.537
7200.4450.4350.4540.4410.4910.4590.4690.4410.4620.4490.4780.4500.4890.4620.4730.4510.4900.4710.4810.4410.5430.4900.6710.561
Avg0.3760.3950.3820.3970.4070.4100.3990.4020.4060.4070.4000.4060.4230.4220.4040.4080.4070.4170.4120.4020.4480.4520.5880.517
ETThI960.1740.2550.1750.2570.1800.2640.1810.2670.1830.2700.1870.2670.1970.2960.1930.2930.1860.2750.1830.2660.2030.2870.2550.339
1920.2390.2990.2400.3020.2500.3090.2470.3080.2550.3140.2490.3090.2840.3610.2840.3610.2590.3230.2480.3050.2690.3280.2810.340
3360.3010.3400.3030.3430.3110.3480.3090.3470.3090.3470.3210.3510.3810.4290.3820.4290.3490.3860.3090.3430.3250.3660.3390.372
7200.3950.3960.3920.3960.4120.4070.4060.4040.4120.4040.4080.4030.5490.5220.5580.5250.5590.5110.4100.4000.4210.4150.4330.432
Avg0.2770.3220.2770.3240.2880.3320.2860.3320.2900.3340.2910.3330.3530.4020.3540.4020.3390.3740.2880.3280.3050.3490.3270.371
Weather960.1620.2080.1630.2090.1740.2140.1910.2300.1860.2270.1720.2200.1980.2610.1950.2520.1710.2270.1950.2360.2170.2960.2660.336
1920.2070.2490.2110.2540.2210.2540.2360.2670.2340.2650.2190.2610.2390.2990.2370.2950.2180.2800.2390.2710.2760.3360.3070.367
3360.2630.2900.2630.2930.2780.2960.2890.3030.2840.3010.2460.3370.2850.3360.2820.3310.2650.3170.2890.3060.3390.3800.359
7200.3380.3400.3440.3480.3580.3470.3620.3500.3560.3490.3650.3590.3510.3880.3450.3820.3260.3510.3600.3510.4030.4280.4190.428
Avg0.2420.2720.2450.2760.2580.2780.2700.2880.2650.2850.2510.2940.2680.3210.2650.3150.2450.2940.2710.2900.3090.3600.3380.382
Electricity960.1740.2660.1530.2450.1480.2400.1980.2820.1900.2960.1680.2720.1800.2930.2100.3020.1710.2600.1980.2740.1930.3080.2010.317
1920.1820.2730.1660.2570.1620.2530.1990.2850.1990.3040.1840.3220.1890.3020.2100.3050.1770.2680.1980.2780.2010.3150.2220.334
3360.1970.2860.1850.2750.1780.2690.2120.2980.2170.3190.1980.3000.1980.3120.2230.3190.1900.2840.2170.3000.2140.3290.2310.443
7200.2360.3200.2240.3120.2250.3170.2530.3300.2580.3520.2200.3200.2170.3300.2580.3500.2280.3160.2780.3560.2460.3550.2540.361
Avg0.1970.2860.1820.2720.1780.2700.2700.2880.2160.3180.1930.3040.1960.3090.2250.3190.1920.2820.2230.3020.2140.3270.2270.338
", + "bbox": [ + 174, + 157, + 823, + 478 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 500, + 328, + 513 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Datasets We conduct extensive experiments on six real-world time series datasets, including Weather, ETTh1, ETTh2, ETTm1, ETTm2 and Electricity for long-term forecasting. Following previous work (Wu et al., 2021), we split the ETT series dataset into training, validation, and test sets in a ratio of 6:2:2. For the remaining datasets, we adopt a split ratio of 7:1:2.", + "bbox": [ + 169, + 530, + 823, + 585 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Baseline We carefully select eleven well-acknowledged methods in the field of long-term time series forecasting as our baselines, including (1) Transformer-based methods: Autoformer (2021), FEDformer (2022b), PatchTST (2023), iTransformer (2024b); (2) MLP-based methods: DLinear (2023) and TimeMixer (2024a) (3) CNN-based method: MICN (2023), TimesNet (2023); (4) Frequency-based methods: FreTS (2024) and FiLM (2022a). And a time series foundation model Time-FFM (2024a).", + "bbox": [ + 169, + 601, + 823, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experimental Settings To ensure fair comparisons, we adopt the same look-back window length $T = 96$ and the same prediction length $F = \\{96,192,336,720\\}$ . We utilize the L2 loss for model training and use Mean Square Error (MSE) and Mean Absolute Error (MAE) metrics to evaluate the performance of each method.", + "bbox": [ + 169, + 700, + 823, + 757 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 MAIN RESULTS", + "text_level": 1, + "bbox": [ + 171, + 772, + 323, + 786 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The comprehensive forecasting results are presented in Table 1, where the best results are highlighted in bold red and the second-best are underlined in blue. A lower MSE/MAE indicates a more accurate prediction result. We observe that TimeKAN demonstrates superior predictive performance across all datasets, except for the Electricity dataset, where iTransformer achieves the best result. This is due to iTransformer's use of channel-wise self-attention mechanisms to model inter-variable dependencies, which is particularly effective for high-dimensional datasets like Electricity. Additionally, both TimeKAN and TimeMixer perform consistently well in long-term forecasting tasks, showcasing the generalizability of well-designed time-series decomposition architectures for accurate predictions. Compared with other state-of-the-art methods, TimeKAN introduces a novel", + "bbox": [ + 169, + 799, + 825, + 924 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5c77d357baa2f67e10bbff3063cef2bd45f772e6987c4003fead093d73c3040d.jpg", + "table_caption": [ + "Table 2: Ablation study of the Frequency Upsampling. The best results are in bold." + ], + "table_footnote": [], + "table_body": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2WeatherElectricity
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
Linear Mapping0.4010.4130.3120.3620.3280.3650.1800.2630.1640.2110.1840.275
Linear Interpolation0.3830.3980.2960.3470.3360.3700.1810.2630.1650.2100.1960.277
Transposed Convolution0.3770.4070.2900.3440.3260.3660.1780.2610.1630.2110.1880.274
Frequency Upsampling0.3670.3950.2900.3400.3220.3610.1740.2550.1620.2080.1740.266
", + "bbox": [ + 174, + 130, + 823, + 227 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4b7a1ce71709331718e998581dae0bbeeac94f55ab0dc7ac780ad781a1329780.jpg", + "table_caption": [ + "Table 3: Ablation study of the Multi-order KANs. The best results are in bold." + ], + "table_footnote": [], + "table_body": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
MLPs0.3760.3970.2980.3480.3190.3610.1780.2640.1620.211
Fixed Low-order KANs0.3760.3980.2920.3410.3270.3660.1750.2570.1640.211
Fixed High-order KANs0.3800.4070.3100.3630.3270.2690.1760.2570.1640.212
Multi-order KANs0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
", + "bbox": [ + 210, + 271, + 785, + 368 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Decomposition-Learning-Mixing framework, closely integrating the characteristics of Multi-order KANs with this hierarchical architecture, enabling superior performance in a wide range of long-term forecasting tasks.", + "bbox": [ + 169, + 395, + 823, + 438 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 ABLATION STUDY", + "text_level": 1, + "bbox": [ + 171, + 455, + 341, + 469 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we investigate several key components of TimeKAN, including Frequency Upsampling, Depthwise Convolution and Multi-order KANs.", + "bbox": [ + 169, + 482, + 823, + 512 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Frequency Upsampling To investigate the effectiveness of Frequency Upsampling, we compared it with three alternative upsampling methods that may not preserve frequency information before and after transformation: (1) Linear Mapping; (2) Linear Interpolation; and (3) Transposed Convolution. As shown in Table 2, replacing Frequency Upsampling with any of these three methods resulted in a decline in performance. This indicates that these upsampling techniques fail to maintain the integrity of frequency information after transforming, leading to the Decomposition-Learning-Mixing framework ineffective. This strongly demonstrates that the chosen Frequency Upsampling, as a non-parametric method, is an irreplaceable component of the TimeKAN framework.", + "bbox": [ + 169, + 527, + 826, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Multi-order KANs We designed the following modules to investigate the effectiveness of Multi-order KANs: (1) MLPs, which means using MLP to replace each KAN; (2) Fixed Low-order KANs, which means using a KAN of order 2 at each frequency level; and (3) Fixed High-order KANs, which means using a KAN of order 5 at each frequency level. The comparison results are shown in Table 3. Overall, Multi-order KANs achieved the best performance. Compared to MLPs, Multi-order KANs perform significantly better, demonstrating that well-designed KANs possess stronger representation capabilities than MLPs and are a compelling alternative. Both Low-order KANs and High-order KANs performed worse than Multi-order KANs, indicating the validity of our design choice to incrementally increase the order of KANs to adapt to the representation of different frequency components. Thus, the learnable functions of KANs are indeed a double-edged sword; achieving satisfactory results requires selecting the appropriate level of function complexity for specific tasks.", + "bbox": [ + 169, + 656, + 826, + 824 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Depthwise Convolution To assess the effectiveness of Depthwise Convolution, we replace it with the following choice: (1) w/o Depthwise Convolution; (2) Standard Convolution; (3) Multi-head Self-Attention. The results are shown in Table 4. Overall, Depthwise Convolution is the best choice. We clearly observe that removing Depthwise Convolution or replacing it with Multi-head Self-Attention leads to a significant drop in performance, highlighting the effectiveness of using convolution to learn temporal dependencies. When Depthwise Convolution is replaced with Standard", + "bbox": [ + 169, + 839, + 826, + 926 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/5140572fdcf0b103f43c0a59f6fc6a674f7fa68a9b3190106ec11f1644c9e00e.jpg", + "table_caption": [ + "Table 4: Ablation study of the Depthwise Convolution. The best results are in bold." + ], + "table_footnote": [], + "table_body": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
w/o Depthwise Conv0.3790.3970.2960.3430.3370.3730.1800.2630.1680.211
Standard Conv0.3640.3930.2950.3450.3230.3640.1800.2640.1620.210
Self-Attention0.3770.4060.2930.3420.3290.3650.1840.2720.1740.225
Depthwise Conv0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
", + "bbox": [ + 220, + 130, + 777, + 228 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/cb2d292288b505849be1ebccd78db2174a67eb9322af381215248d8bf02f49c6.jpg", + "image_caption": [ + "Figure 2: Comparison of forecasting performance between TimeKAN and other three models with varying look-back windows on ETTm2 and Weather datasets. The look-back windows are selected to be $T \\in \\{48,96,192,336,512,720\\}$ , and the prediction length is fixed to $F = 96$ ." + ], + "image_footnote": [], + "bbox": [ + 176, + 243, + 500, + 401 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/5406045b8d3dd7cdbaee74bf072ecba19610f7495c9691ff5b71a89f25be987c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 243, + 821, + 400 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Convolution, there are declines in most metrics, which implies that focusing on extracting temporal dependencies individually with Depthwise Convolution, without interference from inter-channel relationships, is a reasonable design.", + "bbox": [ + 169, + 502, + 823, + 546 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Varing Look-back Window In principle, extending the look-back window can provide more information for predicting future, leading to a potential improvement in forecasting performance. A effective long-term TSF method equipped with a strong temporal relation extraction capability should be able to improve forecasting performance when look-back window length increasing (Zeng et al., 2023). As a model based on frequency decomposition learning, TimeKAN should achieve better predictive performance as the look-back window lengths, since more incremental frequency information is available for prediction. To demonstrate that TimeKAN benefits from a larger look-back window, we select look-back window lengths from $T = \\{48,96,192,336,512,720\\}$ while keeping the prediction length fixed at 96. As demonstrated in Figure 2, our TimeKAN consistently reduces the MSE scores as the look-back window increases, indicating that TimeKAN can effectively learn from long time series.", + "bbox": [ + 169, + 560, + 826, + 715 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 MODEL EFFICIENCY", + "text_level": 1, + "bbox": [ + 171, + 729, + 357, + 744 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We compare TimeKAN with MLP-based method TimeMier and Transformer-based methods iTransformer and PatchTST, in terms of model parameters and Multiply-Accumulate Operations (MACs), to validate that TimeKAN is a lightweight and efficient architecture. To ensure a fair comparison, we fix the prediction length $F = 96$ and input length $T = 96$ , and set the input batch size to 32. The comparison results are summarized in Table 5. It is clear that our TimeKAN demonstrates significant advantages in both model parameter size and MACs, particularly when compared to Transformer-based models. For instance, on the Electricity dataset, the parameter count of PatchTST is nearly 295 times that of TimeKAN, and its MACs are almost 118 times greater. Even when compared to the relatively lightweight MLP-based method TimeMixer, TimeKAN shows superior efficiency. On the Weather dataset, TimeKAN requires only $20.05\\%$ of the parameters needed by TimeMixer and only $36.14\\%$ of the MACs. This remarkable efficiency advantage is primarily attributed to the lightweight architectural design. The main computations of the TimeKAN model are concentrated", + "bbox": [ + 169, + 757, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/5f15770d99924dd348277e9a8fc7ffae4b18e431b48c93fc67aac8efba01159d.jpg", + "table_caption": [ + "Table 5: A comparison of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and three other models. To ensure a fair comparison, we fix the prediction length $F = 96$ and the input length $T = 96$ , and set the input batch size to 32. The lowest computational cost is highlighted in bold." + ], + "table_footnote": [], + "table_body": "
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
", + "bbox": [ + 181, + 169, + 828, + 265 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "in the M-KAN block, and the Depthwise Convolution we employed significantly reduces the number of parameters through grouped operations. Additionally, the powerful representation capabilities afforded by Multi-order KANs allow us to represent time series with very few neurons. Therefore, we cannot overlook that TimeKAN achieves outstanding forecasting performance while requiring minimal computational resources.", + "bbox": [ + 169, + 292, + 826, + 364 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 386, + 320, + 402 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We proposed an efficient KAN-based Frequency Decomposition Learning architecture (TimeKAN) for long-term time series forecasting. Based on Decomposition-Learning-Mixing architecture, TimeKAN obtains series representations for each frequency band using a Cascaded Frequency Decomposition blocks. Additionally, a Multi-order KAN Representation Learning blocks further leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format. Extensive experiments on real-world datasets demonstrate that TimeKAN achieves the state of the art forecasting performance and extremely lightweight computational consumption.", + "bbox": [ + 169, + 417, + 823, + 532 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENTS", + "text_level": 1, + "bbox": [ + 171, + 553, + 369, + 566 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work is supported by Shanghai Artificial Intelligence Laboratory. This work was done during Songtao Huang's internship at Shanghai Artificial Intelligence Laboratory.", + "bbox": [ + 169, + 584, + 823, + 613 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 636, + 287, + 651 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alexander Dylan Bodner, Antonio Santiago Tepsich, Jack Natan Spolski, and Santiago Pourteau. Convolutional kolmogorov-arnold networks. arXiv preprint arXiv:2406.13155, 2024.", + "Tao Dai, Beiliang Wu, Peiyuan Liu, Naiqi Li, Jigang Bao, Yong Jiang, and Shu-Tao Xia. Periodicity decoupling framework for long-term series forecasting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=dp27P5HBBt.", + "Luo donghao and wang xue. ModernTCN: A modern pure convolution structure for general time series analysis. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vpJMJerXHU.", + "Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. In ICML, 2024. URL https://openreview.net/forum?id=FVvf69a5rx.", + "Hongbin Huang, Minghua Chen, and Xiao Qiao. Generative learning for financial time series with irregular and scale-invariant patterns. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=CdjnzWsQax." + ], + "bbox": [ + 169, + 660, + 825, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Weiwei Jiang and Jiayun Luo. Graph neural network for traffic forecasting: A survey. Expert Systems with Applications, 207:117921, 2022. ISSN 0957-4174. doi: https://doi.org/10.1016/j.eswa.2022.117921. URL https://www.sciencedirect.com/science/article/pii/S0957417422011654.", + "Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, Alexander Merose, Stephan Hoyer, George Holland, Oriol Vinyals, Jacklynn Stott, Alexander Pritzel, Shakir Mohamed, and Peter Battaglia. Learning skillful medium-range global weather forecasting. Science, 382(6677):1416-1421, 2023. doi: 10.1126/science.adi2336. URL https://www.science.org/doi/abs/10.1126/science.adi2336.", + "Chenxin Li, Xinyu Liu, Wuyang Li, Cheng Wang, Hengyu Liu, and Yixuan Yuan. U-kan makes strong backbone for medical image segmentation and generation. arXiv preprint arXiv:2406.02918, 2024.", + "Ziyao Li. Kolmogorov-arnold networks are radial basis function networks. arXiv preprint arXiv:2405.06721, 2024.", + "Shengsheng Lin, Weiwei Lin, Wentai Wu, Haojun Chen, and Junjie Yang. SparseTSF: Modeling long-term time series forecasting with $^{*}1\\mathrm{k}^{*}$ parameters. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=54NSHO01Fe.", + "Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022.", + "Qingxiang Liu, Xu Liu, Chenghao Liu, Qingsong Wen, and Yuxuan Liang. Time-FFM: Towards LM-empowered federated foundation model for time series forecasting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=HS0faHRhWD.", + "Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=JePfAI8fah.", + "Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljacic, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024c.", + "Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Jbdc0vTOcol.", + "Khemraj Shukla, Juan Diego Toscano, Zhicheng Wang, Zongren Zou, and George Em Karniadakis. A comprehensive and fair comparison between mlp and kan representations for differential equations and operator networks. arXiv preprint arXiv:2406.02917, 2024.", + "Sidharth SS. Chebyshev polynomial-based kolmogorov-arnold networks: An efficient architecture for nonlinear function approximation. arXiv preprint arXiv:2405.07200, 2024.", + "Huiqiang Wang, Jian Peng, Feihu Huang, Jince Wang, Junhui Chen, and Yifei Xiao. MICN: Multiscale local and global context modeling for long-term series forecasting. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=zt53IDUR1U.", + "Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, and JUN ZHOU. Timemixer: Decomposable multiscale mixing for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=7oLshfEIC2." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yizheng Wang, Jia Sun, Jinshuai Bai, Cosmin Anitescu, Mohammad Sadegh Eshaghi, Xiaoying Zhuang, Timon Rabczuk, and Yinghua Liu. Kolmogorov arnold informed neural network: A physics-informed deep learning framework for solving pdes based on kolmogorov arnold networks. arXiv preprint arXiv:2406.11045, 2024b.", + "Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 22419-22430. Curran Associates, Inc., 2021. URL https://proceedings.neurips.cc/paper_files/paper/2021/file/bcc0d400288793e8bcdcd7c19a8ac0c2b-Paper.pdf.", + "Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ju_Uqw384Oq.", + "Kunpeng Xu, Lifei Chen, and Shengrui Wang. Are kan effective for identifying and tracking concept drift in time series? arXiv preprint arXiv:2410.10041, 2024a.", + "Zhijian Xu, Ailing Zeng, and Qiang Xu. FITS: Modeling time series with $10k$ parameters. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=bWcvvZ3qMb.", + "Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. Frequency-domain mlps are more effective learners in time series forecasting. Advances in Neural Information Processing Systems, 36, 2024.", + "Linfei Yin, Xinghui Cao, and Dongduan Liu. Weighted fully-connected regression networks for one-day-ahead hourly photovoltaic power forecasting. Applied Energy, 332:120527, 2023. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2022.120527. URL https://www.sciencedirect.com/science/article/pii/S0306261922017846.", + "Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023.", + "G.Peter Zhang. Time series forecasting using a hybrid arima and neural network model. Neurocomputing, 50:159-175, 2003. ISSN 0925-2312. doi: https://doi.org/10.1016/S0925-2312(01)00702-0. URL https://www.sciencedirect.com/science/article/pii/S0925231201007020.", + "Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021.", + "Tian Zhou, Ziqing Ma, Qingsong Wen, Liang Sun, Tao Yao, Wotao Yin, Rong Jin, et al. Film: Frequency improved legendre memory model for long-term time series forecasting. Advances in neural information processing systems, 35:12677-12690, 2022a.", + "Tian Zhou, Ziqing Ma, Qingsong Wen, Xue Wang, Liang Sun, and Rong Jin. Fedformer: Frequency enhanced decomposed transformer for long-term series forecasting. In International conference on machine learning, pp. 27268-27286. PMLR, 2022b." + ], + "bbox": [ + 171, + 102, + 825, + 797 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A ADDITIONAL MODEL ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 102, + 480, + 118 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/8158e74fad2c54f26135ce46a28fc89e6c8304f8ee7fccf6f6c159ce516f523b.jpg", + "table_caption": [ + "Table 6: Full comparison results of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and other models. To ensure a fair comparison, we fix the prediction length $F = 96$ and the input length $T = 96$ , and set the input batch size to 32. The lowest computational cost is highlighted in bold." + ], + "table_footnote": [], + "table_body": "
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimesNet605.48K18.13G1.19M36.28G4.71M144G1.19M36.28G1.19M36.28G150.30M4.61T
MICN25.20M71.95G25.20M71.95G25.20M71.95G25.20M71.95G111.03K295.07M6.64M19.5G
Dlinear18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M
FreTS3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M
FILM12.58M2.82G12.58M2.82G12.58M2.82G12.58M2.82G12.58M8.46G12.58M8.46G
FEDFormer23.38M24.96G23.38M24.96G23.38M24.96G23.38M24.96G23.45M25.23G24.99M30.89G
AutoFormer10.54M22.82G10.54M22.82G10.54M22.82G10.54M22.82G10.61M23.08G12.14M28.75G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
", + "bbox": [ + 181, + 218, + 861, + 405 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.1 COMPUTATIONAL COMPLEXITY ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 449, + 514, + 462 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In our TimeKAN, the main computational complexity lies in Fast Fourier Transform (FFT), Depthwise Convolution block and Multi-order KAN block. Consider a time series with length $L$ and the hidden state of each time point is $D$ . For FFT, the computation complexity is $\\mathcal{O}(L\\log L)$ . For Depthwise Convolution block, if we set the convolutional kernel to $M$ and stride to 1, the complexity is $\\mathcal{O}(LDM)$ . Finally, assuming that the highest order of Chebyshev polynomials is $K$ , the complexity of Multi-order KAN block is $\\mathcal{O}(LD^2K)$ . Since $M, D, K$ are constants that are independent of the input length $L$ , the computational complexity of both the Depthwise Convolution block and the Multi-order KAN block can be reduced to $\\mathcal{O}(L)$ , which is linear about the sequence length. In summary, the overall computational complexity is $\\max(\\mathcal{O}(L\\log L), \\mathcal{O}(L) = \\mathcal{O}(L\\log L)$ . When the input is a multivariate sequence with $M$ variables, the computational complexity will expand to $\\mathcal{O}(ML\\log L)$ due to our variable-independent strategy.", + "bbox": [ + 169, + 478, + 826, + 632 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.2 MODEL EFFICIENCY", + "text_level": 1, + "bbox": [ + 171, + 657, + 361, + 672 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Here, we provide the complete results of model efficiency in terms of parameters and MACs in Table 6. As can be seen, except for DLinear, our TimeKAN consistently demonstrates a significant advantage in both parameter count and MACs compared to any other model. DLinear is a model consisting of only a single linear layer, which makes it the most lightweight in terms of parameters and MACs. However, the performance of DLinear already shows a significant gap when compared to state-of-the-art methods. Therefore, our TimeKAN actually achieves superior performance in both forecasting accuracy and efficiency.", + "bbox": [ + 169, + 686, + 826, + 787 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.3 ERROR BARS", + "text_level": 1, + "bbox": [ + 171, + 810, + 310, + 824 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To evaluate the robustness of TimeKAN, we repeated the experiments on three randomly selected seeds and compared it with the second-best model (TimeMixer). We report the mean and standard deviation of the results across the three experiments, as well as the confidence level of TimeKAN's superiority over TimeMixer. The results are averaged over four prediction horizons (96, 192, 336, and 720). As shown in the Table 7, in most cases, we have over $90\\%$ confidence that TimeKAN outperforms the second-best model and demonstrates good robustne of TimeKAN.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/fcf89873993419466062669ffd52ede44eb6974539327d9184ab21e59164c34d.jpg", + "table_caption": [ + "Table 7: Standard deviation and statistical tests for our TimeKAN method and second-best method (TimeMixer) on five datasets." + ], + "table_footnote": [], + "table_body": "
MetricMSEMAE
DatasetTimeKANTimeMixerConfidenceTimeKANTimeMixerConfidence
ETTh10.422±0.0040.462±0.00699%0.430±0.0020.448±0.00499%
ETTh20.387±0.0030.392±0.00399%0.408±0.0030.412±0.00490%
ETTm10.378±0.0020.386±0.00399%0.396±0.0010.399±0.00199%
ETTm20.278±0.0010.278±0.0010.324±0.0010.325±0.00190%
Weather0.243±0.0010.245±0.00199%0.273±0.0010.276±0.00199%
", + "bbox": [ + 233, + 140, + 764, + 252 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/03656b0acd40951533c21c03f799d3d3cf850937be9909523840600027e3d014.jpg", + "table_caption": [ + "Table 8: Comparison on the Electricity dataset when the look back window is expanded to 512." + ], + "table_footnote": [], + "table_body": "
Models96192336720
MSEMAEMSEMAEMSEMAEMSEMAE
MOMENT0.1360.2330.1520.2470.1670.2640.2050.295
TimeMixer0.1350.2310.1490.2450.1720.2680.2030.295
TimeKAN0.1330.2300.1490.2470.1650.2610.2030.294
", + "bbox": [ + 305, + 311, + 691, + 400 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.4 FREQUENCY LEARNING WITH LONGER WINDOW", + "text_level": 1, + "bbox": [ + 171, + 445, + 558, + 459 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In Table 1, TimeKAN performs relatively poorly on the Electricity dataset. We infer that its poor performance on the electricity dataset is due to the overly short look-back window ( $T = 96$ ), which cannot provide sufficient frequency information. To verify this, we compare the average number of effective frequency components under a specific look-back window. Specifically, we randomly select a sequence of length $T$ from the electricity dataset and transform it into the frequency domain using FFT. We define effective frequencies as those with amplitudes greater than 0.1 times the maximum amplitude. Then, we take the average number of effective frequencies obtained across all variables to reflect the amount of effective frequency information provided by the sequence. When $T = 96$ (the setting in this paper), the average number of effective frequencies is 10.69. When we extend the sequence length to 512, the average number of effective frequencies becomes 19.74. Therefore, the effective frequency information provided by 512 time steps is nearly twice that of 96 time steps. This indicates that $T = 96$ loses a substantial amount of effective information.", + "bbox": [ + 169, + 479, + 826, + 647 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To validate whether using $T = 512$ allows us to leverage more frequency information, we extend the look-back window of TimeKAN to 512 on the electricity dataset and compare it with the state-of-the-art methods TimeMixer and time series foundation model MOMENT (Goswami et al., 2024). The results are shown in Table 8. Although TimeKAN performs significantly worse than TimeMixer when $T = 96$ , it achieves the best performance on the electricity dataset when the look-back window is extended to 512. This also demonstrates that TimeKAN can benefit significantly from richer frequency information.", + "bbox": [ + 169, + 652, + 826, + 752 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.5 IMPACT OF NUMBER OF FREQUENCY BANDS", + "text_level": 1, + "bbox": [ + 171, + 791, + 529, + 806 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To explore the impact of the number of frequency bands on performance, we set the number of frequency bands to 2, 3, 4, and 5. The effects of different frequency band divisions on performance are shown in the Table 9. As we can see, in most cases, dividing the frequency bands into 3 or 4 layers yields the best performance. This aligns with our prior intuition: dividing into two bands results in excessive frequency overlap, while dividing into five bands leads to too little information within each band, making it difficult to accurately model the information within that frequency range.", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/0073a41c665bce2da16e409a0311de07c167ee109ec36819ab56b2a80eb2e03d.jpg", + "table_caption": [ + "Table 9: Impact of number of frequency bands on performance under the 96-to-96 prediction setting." + ], + "table_footnote": [], + "table_body": "
Number of FrequencyETTh2WeatherElectricity
MSEMAEMSEMAEMSEMAE
20.2920.3400.1640.2090.1830.270
30.2900.3390.1630.2090.1770.268
40.2900.3400.1620.2080.1740.266
50.2950.3460.1640.2110.1770.273
", + "bbox": [ + 307, + 128, + 691, + 220 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B MATHEMATICAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 241, + 426, + 257 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1 KOLMOGOROV-ARNOLD NETWORK", + "text_level": 1, + "bbox": [ + 171, + 272, + 464, + 286 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. More specifically, a multivariate continuous function $g:[0,1]^n\\Rightarrow \\mathbb{R}$ can be defined as:", + "bbox": [ + 169, + 297, + 823, + 340 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\ng (x) = g \\left(x _ {1}, \\dots , x _ {n}\\right) = \\sum_ {i = 1} ^ {2 n + 1} \\Phi_ {i} \\left(\\sum_ {j = 1} ^ {n} \\phi_ {i j} \\left(x _ {j}\\right)\\right) \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 345, + 821, + 386 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\phi_{ij}$ and $\\Phi_i$ are univariate functions. Following the pattern of MLP, Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) extends the Kolmogorov-Arnoldtheorem to deep representations, i.e., stacked multilayer Kolmogorov-Arnold representations. Assume that KAN is composed of $L + 1$ layer neurons and the number of neurons in layer $l$ is $n_l$ . The transmission relationship between the $j$ -th neuron in layer $l + 1$ and all neurons in layer $l$ can be expressed as:", + "bbox": [ + 169, + 390, + 823, + 460 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nx _ {l + 1, j} = \\sum_ {i = 1} ^ {n _ {l}} \\phi_ {l, j, i} \\left(x _ {l, i}\\right) \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 415, + 464, + 823, + 503 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We can simply understand that each neuron is connected to other neurons in the previous layer through a univariate function $\\phi$ . Similar to MLP, the computation of all neurons at layer $l$ can be reorganized as a function matrix multiplication $\\Phi_{l-1}$ . Therefore, given a input vector $x \\in \\mathbb{R}^{n_0}$ , the final output of KAN network is:", + "bbox": [ + 169, + 506, + 823, + 561 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {K A N} (x) = \\left(\\Phi_ {L - 1} \\circ \\dots \\circ \\Phi_ {1} \\circ \\Phi_ {0}\\right) x \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 566, + 823, + 582 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In vanilla KAN (Liu et al., 2024c), the univariate function $\\phi_{l,j,i}$ is parametrized using B-splines, which is a class of smooth curves constructed via segmented polynomial basis functions. To ensure the stability and enhance the representational capacity, KAN overlays the spline function on a fixed basis function $b$ , which is typically the SiLU function:", + "bbox": [ + 169, + 585, + 823, + 642 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\phi (x) = w _ {b} b (x) + w _ {s} \\operatorname {s p l i n e} (\\mathrm {x}) \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 645, + 823, + 662 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {s p l i n e} (x) = \\sum_ {i} c _ {i} B _ {i} (x) \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 664, + 823, + 695 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $w_{b}$ and $w_{s}$ are learnable weights and $\\mathrm{spline(x)}$ is the spline function constructed from the linear combination of B-spline basis functions $B_{i}$ . However, the complex recursive computation process of high-order B-spline functions hinders the efficiency of KAN. Therefore, in this work, we adopt the simpler Chebyshev polynomial as the univariate function to replace the B-spline function (SS, 2024). The univariate function defined by the Chebyshev polynomial is given as follows:", + "bbox": [ + 169, + 696, + 823, + 768 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nT _ {k} (x) = \\cos (k \\operatorname {a r c c o s} (x)) \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 771, + 823, + 787 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Here, $k$ represents the order of the polynomial. Then, we consider the univariate function $\\Phi$ as a linear combination of Chebyshev polynomials with different orders:", + "bbox": [ + 169, + 790, + 823, + 820 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nx _ {l + 1, j} = \\sum_ {i = 1} ^ {n _ {l}} \\phi_ {l, j, i} \\left(x _ {l, i}\\right) = \\sum_ {i = 1} ^ {n _ {l}} \\sum_ {k = 0} ^ {K} \\Theta_ {i, k} T _ {k} \\left(\\tanh \\left(x _ {l, i}\\right)\\right) \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 824, + 823, + 864 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Where $\\Theta_{i,k}$ is the coefficients of $k$ -th order Chebyshev polynomials acting on the $x_{l,i}$ and $\\tanh$ is the tanh activation function used to normalize the inputs to between -1 and 1. By adjusting the highest order of the Chebyshev polynomial $K$ , we can control the fitting capability of KAN. This also inspires tour design of the Multi-order KAN to dynamically represent different frequencies.", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.2 FOURIER TRANSFORM", + "text_level": 1, + "bbox": [ + 171, + 103, + 375, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Time series are often composed of multiple frequency components superimposed on each other, and it is difficult to observe these individual frequency components directly in the time domain. Therefore, transforming a time series from the time domain to the frequency domain for analysis is often necessary. The Discrete Fourier Transform (DFT) is a commonly used domain transformation algorithm that converts a discrete-time signal from the time domain to the complex frequency domain. Mathematically, given a sequence of real numbers $x[n]$ in time domain, where $n = 0,1,\\dots ,N - 1$ the DFT process can be described as:", + "bbox": [ + 169, + 128, + 826, + 227 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nX [ k ] = \\sum_ {n = 0} ^ {N - 1} x [ n ] \\cdot e ^ {- i \\frac {2 \\pi}{N} k n} = \\sum_ {n = 0} ^ {N - 1} x [ n ] \\left(\\cos \\left(\\frac {2 \\pi}{N} k n\\right) - i \\sin \\left(\\frac {2 \\pi}{N} k n\\right)\\right), \\quad k = 0, 1, \\dots , N - 1 \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 234, + 823, + 287 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $X[k]$ is the $k$ -th frequency component of frequency domain signal and $i$ is the imaginary unit. Similarly, we can use Inverse DFT (iDFT) to convert a frequency domain signal back to the time domain.", + "bbox": [ + 169, + 287, + 823, + 330 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nx [ n ] = \\frac {1}{N} \\sum_ {k = 0} ^ {N - 1} X [ k ] \\cdot e ^ {i \\frac {2 \\pi}{N} k n} = \\frac {1}{N} \\sum_ {k = 0} ^ {N - 1} X [ k ] \\left(\\cos \\left(\\frac {2 \\pi}{N} k n\\right) + i \\sin \\left(\\frac {2 \\pi}{N} k n\\right)\\right) \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 335, + 825, + 377 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The computational complexity of the DFT is typically $\\mathcal{O}(N^2)$ (Zhou et al., 2022b). In practice, we use the Fast Fourier Transform (FFT) to efficiently compute the Discrete Fourier Transform (DFT) of complex sequences, which reduces the computational complexity to $\\mathcal{O}(N\\log N)$ . Additionally, by employing the Real FFT (rFFT), we can compress an input sequence of $N$ real numbers into a signal sequence in the complex frequency domain containing $N / 2 + 1$ frequency components.", + "bbox": [ + 169, + 385, + 826, + 457 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_model.json b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_model.json new file mode 100644 index 0000000000000000000000000000000000000000..099941dfce98ccf7099e29f22e81bf7f981b5ea1 --- /dev/null +++ b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_model.json @@ -0,0 +1,2355 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.825, + 0.173 + ], + "angle": 0, + "content": "TIMEKAN: KAN-BASED FREQUENCY DECOMPOSITION LEARNING ARCHITECTURE FOR LONG-TERM TIME SERIES FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.194, + 0.546, + 0.21 + ], + "angle": 0, + "content": "Songtao Huang\\(^{1,2}\\), Zhen Zhao\\(^{1}\\), Can Li\\(^{3}\\), Lei Bai\\(^{4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.21, + 0.59, + 0.224 + ], + "angle": 0, + "content": "\\(^{1}\\)Shanghai Artificial Intelligence Laboratory, Shanghai, China" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.224, + 0.753, + 0.239 + ], + "angle": 0, + "content": "\\(^{2}\\)School of Information Science and Engineering, Lanzhou University, Lanzhou, China" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.239, + 0.735, + 0.268 + ], + "angle": 0, + "content": "3The Key Laboratory of Road and Traffic Engineering of the Ministry of Education, Tongji University, Shanghai, China" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.268, + 0.615, + 0.282 + ], + "angle": 0, + "content": "huangsongtao@pjlab.org.cn, zhen.zhao@outlook.com," + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.282, + 0.558, + 0.295 + ], + "angle": 0, + "content": "lchelen1005@gmail.com, baisanshi@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.331, + 0.548, + 0.347 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.361, + 0.768, + 0.64 + ], + "angle": 0, + "content": "Real-world time series often have multiple frequency components that are intertwined with each other, making accurate time series forecasting challenging. Decomposing the mixed frequency components into multiple single frequency components is a natural choice. However, the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterization. To address this challenges, inspired by the flexibility of the recent Kolmogorov-Arnold Network (KAN), we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex forecasting challenges caused by multiple frequency mixtures. Specifically, TimeKAN mainly consists of three components: Cascaded Frequency Decomposition (CFD) blocks, Multi-order KAN Representation Learning (M-KAN) blocks and Frequency Mixing blocks. CFD blocks adopt a bottom-up cascading approach to obtain series representations for each frequency band. Benefiting from the high flexibility of KAN, we design a novel M-KAN block to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks is used to recombine the frequency bands into the original format. Extensive experimental results across multiple real-world time series datasets demonstrate that TimeKAN achieves state-of-the-art performance as an extremely lightweight architecture. Code is available at https://github.com/huangst21/TimeKAN." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.664, + 0.338, + 0.68 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.695, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Time series forecasting (TSF) has garnered significant interest due to its wide range of applications, including finance (Huang et al., 2024), energy management (Yin et al., 2023), traffic flow planning (Jiang & Luo, 2022), and weather forecasting (Lam et al., 2023). Recently, deep learning has led to substantial advancements in TSF, with the most state-of-the-art performances achieved by CNN-based methods (Wang et al., 2023; donghao & wang xue, 2024), Transformer-based methods(Nie et al., 2023; Liu et al., 2024b) and MLP-based methods (Zeng et al., 2023; Wang et al., 2024a)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Due to the complex nature of the real world, observed multivariate time series are often nonstationary and exhibit diverse patterns. These intertwined patterns complicate the internal relationships within the time series, making it challenging to capture and establish connections between historical observations and future targets. To address the complex temporal patterns in time series, an increasing number of studies focus on leveraging prior knowledge to decompose time series into simpler components that provide a basis for forecasting. For instance, Autoformer (Wu et al., 2021) decomposes time series into seasonal and trend components. This idea is also adopted by DLinear (Zeng et al., 2023) and FEDFormer (Zhou et al., 2022b). Building on this foundation, TimeMixer (Wang et al., 2024a) further introduces multi-scale seasonal-trend decomposition and highlights the importance of interactions between different scales. Recent models like TimesNet (Wu et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.189 + ], + "angle": 0, + "content": "2023), PDF (Dai et al., 2024), and SparseTSF (Lin et al., 2024) emphasize the inherent periodicity in time series and decompose long sequences into multiple shorter ones based on the period length, thereby enabling the separate modeling of inter-period and intra-period dependencies within temporal patterns. In summary, these different decomposition methods share a common goal: utilizing the simplified subsequences to provide critical information for future predictions, thereby achieving accurate forecasting." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.827, + 0.403 + ], + "angle": 0, + "content": "It is worth noting that time series are often composed of multiple frequency components, where the low-frequency components represent long-term periodic variations and the high-frequency components capture certain abrupt events. The mixture of different frequency components makes accurate forecasting particularly challenging. The aforementioned decomposition approaches motivate us to design a frequency decomposition framework that decouples different frequency components in a time series and independently learns the temporal patterns associated with each frequency. However, this introduces another challenge: the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterizations, resulting in sub-optimal results. Fortunately, a new neural network architecture, known as Kolmogorov-Arnold Networks (KAN) (Liu et al., 2024c), has recently gained significant attention in the deep learning community due to its outstanding data-fitting capabilities and flexibility, showing potential as a substitute for traditional MLP. Compared to MLP, KAN offers optional kernels and allows for the adjustment of kernel order to control its fitting capacity. This consideration leads us to explore the use of Multi-order KANs to represent temporal patterns across different frequencies, thereby providing more accurate information for forecasting." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.41, + 0.827, + 0.604 + ], + "angle": 0, + "content": "Motivated by these observations, we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex prediction challenges caused by multiple frequency mixtures. Specifically, TimeKAN first employs moving average to progressively remove relatively high-frequency components from the sequence. Subsequently, Cascaded Frequency Decomposition (CFD) blocks adopt a bottom-up cascading approach to obtain sequence representations for each frequency band. Multi-order KAN Representation Learning (M-KAN) blocks leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that this Decomposition-Learning-Mixing process is repeatable, thereby modeling different temporal patterns at various frequencies more accurately. The final high-level sequence is then mapped to the desired forecasting output via a simple linear mapping. With our meticulously designed architecture, TimeKAN achieves state-of-the-art performance across multiple long-term time series forecasting tasks, while also being a lightweight architecture that outperforms complex TSF models with fewer computational resources." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.611, + 0.475, + 0.625 + ], + "angle": 0, + "content": "Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.64, + 0.825, + 0.695 + ], + "angle": 0, + "content": "- We revisit time series forecasting from the perspective of frequency decoupling, effectively disentangling time series characteristics through a frequency Decomposition-Learning-Mixing architecture to address challenges caused by complex information coupling in time series." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.704, + 0.825, + 0.745 + ], + "angle": 0, + "content": "- We introduce TimeKAN as a lightweight yet effective forecasting model and design a novel M-KAN blocks to effectively modeling and representing patterns at different frequencies by maximizing the flexibility of KAN." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.754, + 0.825, + 0.782 + ], + "angle": 0, + "content": "- TimeKAN demonstrates superior performance across multiple TSF prediction tasks, while having a parameter count significantly lower than that of state-of-the-art TSF models." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.64, + 0.825, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.807, + 0.347, + 0.822 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.841, + 0.462, + 0.855 + ], + "angle": 0, + "content": "2.1 KOLMOGOROV-ARNOLD NETWORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) leverages this theorem to propose an innovative alternative to traditional MLP. Unlike MLP, which use fixed activation functions at the nodes, KAN introduces" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "learnable activation functions along the edges. Due to the flexibility and adaptability, KAN is considered as a promising alternative to MLP." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.225 + ], + "angle": 0, + "content": "The original KAN was parameterized using spline functions. However, due to the inherent complexity of spline functions, the speed and scalability of the original KAN were not satisfactory. Consequently, subsequent research explored the use of simpler basis functions to replace splines, thereby achieving higher efficiency. ChebyshevKAN (SS, 2024) incorporates Chebyshev polynomials to parametrize the learnable functions. FastKAN (Li, 2024) uses faster Gaussian radial basis functions to approximate third-order B-spline functions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.23, + 0.828, + 0.344 + ], + "angle": 0, + "content": "Moreover, KAN has been applied as alternatives to MLP in various domains. Convolutional KAN (Bodner et al., 2024) replaces the linear weight matrices in traditional convolutional networks with learnable spline function matrices. U-KAN (Li et al., 2024) integrates KAN layers into the U-Net architecture, demonstrating impressive accuracy and efficiency in several medical image segmentation tasks. KAN has also been used to bridge the gap between AI and science. Works such as PIKAN (Shukla et al., 2024) and PINN (Wang et al., 2024b) utilize KAN to build physics-informed machine learning models. This paper aims to introduce KAN into TSF and demonstrate the strong potential of KAN in representing time series data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.362, + 0.414, + 0.377 + ], + "angle": 0, + "content": "2.2 TIME SERIES FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.828, + 0.78 + ], + "angle": 0, + "content": "Traditional time series forecasting (TSF) methods, such as ARIMA (Zhang, 2003), can provide sufficient interpretability for the forecasting results but often fail to achieve satisfactory accuracy. In recent years, deep learning methods have dominated the field of TSF, mainly including CNN-based, Transformer-based, and MLP-based approaches. CNN-based models primarily apply convolution operations along the temporal dimension to extract temporal patterns. For example, MICN (Wang et al., 2023) and TimesNet (Wu et al., 2023) enhance the precision of sequence modeling by adjusting the receptive field to capture both short-term and long-term views within the sequences. ModernTCN (donghao & wang xue, 2024) advocates using large convolution kernels along the temporal dimension and capture both cross-time and cross-variable dependencies. Compared to CNN-based methods, which have limited receptive field, Transformer-based methods offer global modeling capabilities, making them more suitable for handling long and complex sequence data. They have become the cornerstone of modern time series forecasting. Informer (Zhou et al., 2021) is one of the early implementations of Transformer models in TSF, making efficient forecasting possible by carefully modifying the internal Transformer architecture. PatchTST (Nie et al., 2023) divides the sequence into multiple patches along the temporal dimension, which are then fed into the Transformer, establishing it as an important benchmark in the time series domain. In contrast, iTransformer (Liu et al., 2024b) treats each variable as an independent token to capture cross-variable dependencies in multivariate time series. However, Transformer-based methods face challenges due to the large number of parameters and high memory consumption. Recent research on MLP-based methods has shown that with appropriately designed architectures leveraging prior knowledge, simple MLPs can outperform complex Transformer-based methods. DLinear (Zeng et al., 2023), for instance, preprocesses sequences using a trend-season decomposition strategy. FITS (Xu et al., 2024b) performs linear transformations in the frequency domain, while TimeMixer (Wang et al., 2024a) uses MLP to facilitate information interaction at different scales. These MLP-based methods have demonstrated strong performance regarding both forecasting accuracy and efficiency. Unlike the aforementioned methods, this paper introduces the novel KAN to TSF to represent time series data more accurately. It also proposes a well-designed Decomposition-Learning-Mixing architecture to fully unlock the potential of KAN for time series forecasting." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.8, + 0.433, + 0.813 + ], + "angle": 0, + "content": "2.3 TIME SERIES DECOMPOSITION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Real-world time series often consist of various underlying patterns. To leverage the characteristics of different patterns, recent approaches tend to decompose the series into multiple subcomponents, including trend-seasonal decomposition, multi-scale decomposition, and multi-period decomposition. DLinear (Zeng et al., 2023) employs moving averages to decouple the seasonal and trend components. SCINet (Liu et al., 2022) uses a hierarchical downsampling tree to iteratively extract and exchange information at multiple temporal resolutions. TimeMixer (Wang et al., 2024a) follows a fine-to-coarse principle to decompose the sequence into multiple scales across different" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.105, + 0.825, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.29, + 0.825, + 0.334 + ], + "angle": 0, + "content": "Figure 1: The architecture of TimeKAN, which mainly consists of Cascaded Frequency Decomposition block, Multi-order KAN Representation Learning block, and Frequency Mixing block. Here, we divide the frequency range of the time series into three frequency bands as an example." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.36, + 0.825, + 0.43 + ], + "angle": 0, + "content": "time spans and further splits each scale into seasonal and periodic components. TimesNet (Wu et al., 2023) and PDF (Dai et al., 2024) utilize Fourier periodic analysis to decouple sequence into multiple sub-period sequences based on the calculated period. Inspired by these works, this paper proposes a novel Decomposition-Learning-Mixing architecture, which examines time series from a multi-frequency perspective to accurately model the complex patterns within time series." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.451, + 0.296, + 0.466 + ], + "angle": 0, + "content": "3 TIMEKAN" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.483, + 0.4, + 0.496 + ], + "angle": 0, + "content": "3.1 OVERALL ARCHITECTURE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.509, + 0.827, + 0.732 + ], + "angle": 0, + "content": "Given a historical multivariate time series input \\(\\mathbf{X} \\in \\mathbb{R}^{N \\times T}\\), the aim of time series forecasting is to predict the future output series \\(\\mathbf{X}_O \\in \\mathbb{R}^{N \\times F}\\), where \\(T, F\\) is the look-back window length and the future window length, and \\(N\\) represents the number of variates. In this paper, we propose TimeKAN to tackle the challenges arising from the complex mixture of multi-frequency components in time series. The overall architecture of TimeKAN is shown in Figure 1. We adopt variate-independent manner (Nie et al., 2023) to predict each univariate series independently. Each univariate input time series is denoted as \\(X \\in \\mathbb{R}^T\\) and we consider univariate time series as the instance in the following calculation. In our TimeKAN, the first step is to progressively remove the relatively high-frequency components using moving averages and generate multi-level sequences followed by projecting each sequence into a high-dimensional space. Next, adhering to the Decomposition-Learning-Mixing architecture design principle, we first design Cascaded Frequency Decomposition (CFD) blocks to obtain sequence representations for each frequency band, adopting a bottom-up cascading approach. Then, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that the Decomposition-Learning-Mixing process is repeatable. More details about our TimeKAN are described as follow." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.749, + 0.525, + 0.763 + ], + "angle": 0, + "content": "3.2 HIERARCHICAL SEQUENCE PREPROCESSING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.775, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Assume that we divide the frequency range of raw time series \\(X\\) into predefined \\(k\\) frequency bands. We first use moving average to progressively remove the relatively high-frequency components and generate multi-level sequences \\(\\{x_{1},\\dots ,x_{k}\\}\\), where \\(x_{i}\\in \\mathbb{R}^{\\frac{T}{d^{i - 1}}}\\left(i\\in \\{1,\\dots ,k\\}\\right)\\). \\(x_{1}\\) is equal to the input series \\(X\\) and \\(d\\) denotes the length of moving average window. The process of producing multi-level sequences is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.858, + 0.825, + 0.874 + ], + "angle": 0, + "content": "\\[\nx _ {i} = \\operatorname {A v g P o o l} (\\text {P a d d i n g} (x _ {i - 1})) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.881, + 0.825, + 0.909 + ], + "angle": 0, + "content": "After obtaining the multi-level sequences, each sequence is independently embedded into a higher dimension through a Linear layer:" + }, + { + "type": "equation", + "bbox": [ + 0.441, + 0.91, + 0.825, + 0.926 + ], + "angle": 0, + "content": "\\[\nx _ {i} = \\operatorname {L i n e a r} \\left(x _ {i}\\right) \\tag {2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.162 + ], + "angle": 0, + "content": "where \\( x_{i} \\in \\mathbb{R}_{d^{i - 1}}^{\\frac{T}{T - 1} \\times D} \\) and \\( D \\) is embedding dimension. We define \\( x_{1} \\) as the highest level sequence and \\( x_{k} \\) as the lowest level sequence. Notably, each lower-level sequence is derived from the sequence one level higher by removing a portion of the high-frequency information. The above process is a preprocessing process and only occurs once in TimeKAN." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.179, + 0.51, + 0.193 + ], + "angle": 0, + "content": "3.3 CASCADED FREQUENCY DECOMPOSITION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.205, + 0.825, + 0.304 + ], + "angle": 0, + "content": "Real-world time series are often composed of multiple frequency components, with the low-frequency component representing long-term changes in the time series and the high-frequency component representing short-term fluctuations or unexpected events. These different frequency components complement each other and provide a comprehensive perspective for accurately modeling time series. Therefore, we design the Cascaded Frequency Decomposition (CFD) block to accurately decompose each frequency component in a cascade way, thus laying the foundation for accurately modeling different frequency components." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.309, + 0.825, + 0.433 + ], + "angle": 0, + "content": "The aim of CFD block is to obtain the representation of each frequency component. Here, we take obtaining the representation of the \\(i\\)-th frequency band as an example. To achieve it, we first employ the Fast Fourier Transform (FFT) to obtain the representation of \\(x_{i+1}\\) in the frequency domain. Then, Zero-Padding is used to extend the length of the frequency domain sequence, so that it can have the same length as the upper sequence \\(x_i\\) after transforming back to the time domain. Next, we use Inverse Fast Fourier Transform (IFFT) to transform it back into the time domain. We refer to this upsampling process as Frequency Upsampling, which ensures that the frequency information remains unchanged before and after the upsampling. The process of Frequency Upsampling can be described as:" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.434, + 0.825, + 0.451 + ], + "angle": 0, + "content": "\\[\n\\hat {x} _ {i} = \\operatorname {I F F T} (\\text {P a d d i n g} (\\operatorname {F F T} (x _ {i + 1}))) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.825, + 0.538 + ], + "angle": 0, + "content": "Here, \\(\\hat{x}_i\\) and \\(x_i\\) have the same sequence length. Notably, compared to \\(x_i\\), \\(\\hat{x}_i\\) lacks the \\(i\\)-th frequency component. The reason is that \\(x_{i+1}\\) is originally formed by removing \\(i\\)-th frequency component from \\(x_i\\) in the hierarchical sequence preprocessing and \\(x_{i+1}\\) is now transformed into \\(\\hat{x}_i\\) through a lossless frequency conversion process, thereby aligning length with \\(x_i\\) in the time domain. Therefore, to get the series representation of the \\(i\\)-th frequency component \\(f_i\\) in time domain, we only need to get the residuals between \\(x_i\\) and \\(\\hat{x}_i\\):" + }, + { + "type": "equation", + "bbox": [ + 0.454, + 0.546, + 0.825, + 0.561 + ], + "angle": 0, + "content": "\\[\nf _ {i} = x _ {i} - \\hat {x} _ {i} \\tag {4}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.578, + 0.569, + 0.593 + ], + "angle": 0, + "content": "3.4 MULTI-ORDER KAN REPRESENTATION LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.825, + 0.703 + ], + "angle": 0, + "content": "Given the multi-level frequency component representation \\(\\{f_1, \\dots, f_k\\}\\) generated by the CFD block, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn specific representations and temporal dependencies at each frequency. M-KAN adopts a dual-branch parallel architecture to separately model temporal representation learning and temporal dependency learning in a frequency-specific way, using Multi-order KANs to learn the representation of each frequency component and employing Depthwise Convolution to capture the temporal dependency. The details of Depthwise Convolution and Multi-order KAN will be given as follows." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.825, + 0.802 + ], + "angle": 0, + "content": "Depthwise Convolution To separate the modeling of temporal dependency from learning sequence representation, we adopt a specific type of group convolution known as Depthwise Convolution, in which the number of groups matches the embedding dimension. Depthwise Convolution employs \\( D \\) groups of convolution kernels to perform independent convolution operations on the series of each channel. This allows the model to focus on capturing temporal patterns without interference from inter channel relationships. The process of Depthwise Convolution is:" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.81, + 0.825, + 0.827 + ], + "angle": 0, + "content": "\\[\nf _ {i, 1} = \\operatorname {C o n v} _ {D \\rightarrow D} \\left(f _ {i}, \\text {g r o u p} = D\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Multi-order KANs Compared with traditional MLP, KAN replaces linear weights with learnable univariate functions, allowing complex nonlinear relationships to be modeled with fewer parameters and greater interpretability. (Xu et al., 2024a). Assume that KAN is composed of \\( L + 1 \\) layer neurons and the number of neurons in layer \\( l \\) is \\( n_{l} \\). The transmission relationship between the \\( j \\)-th neuron in layer \\( l + 1 \\) and all neurons in layer \\( l \\) can be expressed as \\( z_{l + 1,j} = \\sum_{i = 1}^{n_l}\\phi_{l,j,i}(z_{l,i}) \\), where \\( z_{l + 1,j} \\) is the \\( j \\)-th neuron at layer \\( l + 1 \\) and \\( z_{l,i} \\) is the \\( i \\)-th neuron at layer \\( l \\). We can simply understand" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.105, + 0.827, + 0.204 + ], + "angle": 0, + "content": "that each neuron is connected to other neurons in the previous layer through a learnable univariate function \\(\\phi\\). The vanilla KAN (Liu et al., 2024c) employs spline function as the learnable univariate basic functions \\(\\phi\\), but suffering from the complex recursive computation process, which hinders the efficiency of KAN. Here, we adopt ChebyshevKAN (SS, 2024) to learn the representation of each frequency component, i.e., channel learning. ChebyshevKAN is constructed from linear combinations of Chebyshev polynomial. That is, using the linear combination of Chebyshev polynomial with different order to generate learnable univariate function \\(\\phi\\). The Chebyshev polynomial is defined by:" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.211, + 0.825, + 0.228 + ], + "angle": 0, + "content": "\\[\nT _ {n} (x) = \\cos (n \\operatorname {a r c c o s} (x)) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.235, + 0.825, + 0.278 + ], + "angle": 0, + "content": "where \\( n \\) is the highest order of Chebyshev polynomials and the complexity of Chebyshev polynomials is increasing with increasing order. A 1-layer ChebyshevKAN applied to channel dimension can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.28, + 0.826, + 0.322 + ], + "angle": 0, + "content": "\\[\n\\phi_ {o} (x) = \\sum_ {j = 1} ^ {D} \\sum_ {i = 0} ^ {n} \\Theta_ {o, j, i} T _ {i} (\\tanh (x _ {j})) \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.412, + 0.34, + 0.826, + 0.383 + ], + "angle": 0, + "content": "\\[\n\\operatorname {K A N} (x) = \\left\\{ \\begin{array}{c} \\phi_ {1} (x) \\\\ \\dots \\\\ \\phi_ {D} (x) \\end{array} \\right\\} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.398, + 0.825, + 0.552 + ], + "angle": 0, + "content": "where \\(o\\) is the index of output neuron and \\(\\Theta \\in \\mathbb{R}^{D\\times D\\times (n + 1)}\\) are the learnable coefficients used to linearly combine the Chebyshev polynomials. It is worth noting that the frequency components within the time series exhibit increasingly complex temporal dynamics as the frequency increases, necessitating a network with stronger representation capabilities to learn these characteristics. ChebyshevKAN allows for the adjustment of the highest order of Chebyshev polynomials \\(n\\) to enhance its representation ability. Therefore, from the low-frequency to high-frequency components, we adopt an increasing order of Chebyshev polynomials to align the frequency components with the complexity of the KAN, thereby accurately learning the representations of different frequency components. We refer to this group of KANs with varying highest Chebyshev polynomials orders as Multi-order KANs. We set an lower bound order \\(b\\), and the representation learning process for \\(x_{i}\\) can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.561, + 0.825, + 0.577 + ], + "angle": 0, + "content": "\\[\nf _ {i, 2} = \\mathrm {K A N} \\left(f _ {i}, \\text {o r d e r} = b + k - i\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.825, + 0.621 + ], + "angle": 0, + "content": "The final output of the M-KAN block is the sum of the outputs from the Multi-order KANs and the Depthwise Convolution." + }, + { + "type": "equation", + "bbox": [ + 0.445, + 0.623, + 0.825, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\hat {f} _ {i} = f _ {i, 1} + f _ {i, 2} \\tag {10}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.658, + 0.364, + 0.673 + ], + "angle": 0, + "content": "3.5 FREQUENCY MIXING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.825, + 0.801 + ], + "angle": 0, + "content": "After specifically learning the representation of each frequency component, we need to re-transform the frequency representations into the form of multi-level sequences before entering next CFD block, ensuring that the Decomposition-Learning-Mixing process is repeatable. Therefore, we designed Frequency Mixing blocks to convert the frequency component at \\(i\\)-th level \\(\\hat{f}_i\\) into multi-level sequences \\(x_i\\), enabling it to serve as input for the next CFD block. To transform the frequency component at \\(i\\)-th level \\(\\hat{f}_i\\) into multi-level sequences \\(x_i\\), we simply need to to supplement the frequency information from levels \\(i + 1\\) to \\(k\\) back into the \\(i\\)-th level. Thus, we employ Frequency Upsampling again to incrementally reintegrate the information into the higher frequency components:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.809, + 0.825, + 0.827 + ], + "angle": 0, + "content": "\\[\nx _ {i} = \\operatorname {I F F T} (\\text {P a d d i n g} (\\operatorname {F F T} (x _ {i + 1}))) + f _ {i} \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.834, + 0.825, + 0.863 + ], + "angle": 0, + "content": "For the last Frequency Mixing block, we extract the highest-level sequence \\( x_{1} \\) and use a simple linear layer to produce the forecasting results \\( X_{O} \\)." + }, + { + "type": "equation", + "bbox": [ + 0.435, + 0.871, + 0.825, + 0.888 + ], + "angle": 0, + "content": "\\[\nX _ {O} = \\operatorname {L i n e a r} \\left(x _ {1}\\right) \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Due to the use of a variate-independent strategy, we also need to stack the predicted results of all variables together to obtain the final multivariate prediction \\(\\mathbf{X}_{\\mathrm{O}}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.156 + ], + "angle": 0, + "content": "Table 1: Full results of the multivariate long-term forecasting result comparison. The input sequence length is set to 96 for all baselines and the prediction lengths \\( F \\in \\{96, 192, 336, 720\\} \\). Avg means the average results from all four prediction lengths." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.158, + 0.825, + 0.479 + ], + "angle": 0, + "content": "
ModelsTimeKAN OursTimeMixer 2024aiTransformer 2024bTime-FFM 2024aPatchTST 2023TimesNet 2023MICN 2023DLinear 2023FreTS 2024FiLM 2022aFEDformer 2022bAutoformer 2021
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETThI960.3670.3950.3850.4020.3860.4050.3850.4000.4600.4470.3840.4020.4260.4460.3970.4120.3950.4070.4380.4330.3950.4240.4490.459
1920.4140.4200.4430.4300.4410.4360.4390.4300.5120.4770.4360.4290.4540.4640.4460.4410.4900.4770.4940.4660.4690.4700.5000.482
3360.4450.4340.5120.4700.4870.4580.4800.4490.5460.4960.6380.4690.4930.4870.4890.4670.5100.4800.5470.4950.4900.4770.5210.496
7200.4440.4590.4970.4760.5030.4910.4620.4560.5440.5170.5210.5000.5260.5260.5130.5100.5680.5380.5860.5380.5980.5440.5140.512
Avg0.4170.4270.4590.4440.4540.4470.4420.4340.5160.4840.4950.4500.4750.4800.4610.4570.4910.4750.5160.4830.4980.4840.4960.487
ETThI960.2900.3400.2890.3420.2970.3490.3010.3510.3080.3550.3400.3740.3720.4240.3400.3940.3320.3870.3220.3640.3580.3970.3460.388
1920.3750.3920.3780.3970.3800.4000.3780.3970.3930.4050.4020.4140.4920.4920.4820.4790.4510.4570.4050.4140.4290.4390.4560.452
3360.4230.4350.4320.4340.4280.4320.4220.4310.4270.4360.4520.4520.6070.5550.5910.5410.4660.4730.4350.4450.4960.4870.4820.486
7200.4430.4490.4640.4640.4270.4450.4270.4440.4360.4500.4620.4680.8240.6550.8390.6610.4850.4710.4450.4570.4630.4740.5150.511
Avg0.3830.4040.3900.4090.3830.4070.3820.4060.3910.4110.4140.4270.5740.5310.5630.5190.4330.4460.4020.4200.4370.4490.4500.459
ETThI960.3220.3610.3170.3560.3340.3680.3360.3690.3520.3740.3380.3750.3650.3870.3460.3740.3370.3740.3530.3700.3790.4190.5050.475
1920.3570.3830.3670.3840.3770.3910.3780.3890.3900.3930.3740.3870.4030.4080.3820.3910.3820.3980.3870.4260.4410.5530.496
3360.3820.4010.3910.4060.4260.4200.4110.4100.4210.4140.4100.4110.4360.4310.4150.4150.4200.4230.4210.4080.4450.4590.6210.537
7200.4450.4350.4540.4410.4910.4590.4690.4410.4620.4490.4780.4500.4890.4620.4730.4510.4900.4710.4810.4410.5430.4900.6710.561
Avg0.3760.3950.3820.3970.4070.4100.3990.4020.4060.4070.4000.4060.4230.4220.4040.4080.4070.4170.4120.4020.4480.4520.5880.517
ETThI960.1740.2550.1750.2570.1800.2640.1810.2670.1830.2700.1870.2670.1970.2960.1930.2930.1860.2750.1830.2660.2030.2870.2550.339
1920.2390.2990.2400.3020.2500.3090.2470.3080.2550.3140.2490.3090.2840.3610.2840.3610.2590.3230.2480.3050.2690.3280.2810.340
3360.3010.3400.3030.3430.3110.3480.3090.3470.3090.3470.3210.3510.3810.4290.3820.4290.3490.3860.3090.3430.3250.3660.3390.372
7200.3950.3960.3920.3960.4120.4070.4060.4040.4120.4040.4080.4030.5490.5220.5580.5250.5590.5110.4100.4000.4210.4150.4330.432
Avg0.2770.3220.2770.3240.2880.3320.2860.3320.2900.3340.2910.3330.3530.4020.3540.4020.3390.3740.2880.3280.3050.3490.3270.371
Weather960.1620.2080.1630.2090.1740.2140.1910.2300.1860.2270.1720.2200.1980.2610.1950.2520.1710.2270.1950.2360.2170.2960.2660.336
1920.2070.2490.2110.2540.2210.2540.2360.2670.2340.2650.2190.2610.2390.2990.2370.2950.2180.2800.2390.2710.2760.3360.3070.367
3360.2630.2900.2630.2930.2780.2960.2890.3030.2840.3010.2460.3370.2850.3360.2820.3310.2650.3170.2890.3060.3390.3800.359
7200.3380.3400.3440.3480.3580.3470.3620.3500.3560.3490.3650.3590.3510.3880.3450.3820.3260.3510.3600.3510.4030.4280.4190.428
Avg0.2420.2720.2450.2760.2580.2780.2700.2880.2650.2850.2510.2940.2680.3210.2650.3150.2450.2940.2710.2900.3090.3600.3380.382
Electricity960.1740.2660.1530.2450.1480.2400.1980.2820.1900.2960.1680.2720.1800.2930.2100.3020.1710.2600.1980.2740.1930.3080.2010.317
1920.1820.2730.1660.2570.1620.2530.1990.2850.1990.3040.1840.3220.1890.3020.2100.3050.1770.2680.1980.2780.2010.3150.2220.334
3360.1970.2860.1850.2750.1780.2690.2120.2980.2170.3190.1980.3000.1980.3120.2230.3190.1900.2840.2170.3000.2140.3290.2310.443
7200.2360.3200.2240.3120.2250.3170.2530.3300.2580.3520.2200.3200.2170.3300.2580.3500.2280.3160.2780.3560.2460.3550.2540.361
Avg0.1970.2860.1820.2720.1780.2700.2700.2880.2160.3180.1930.3040.1960.3090.2250.3190.1920.2820.2230.3020.2140.3270.2270.338
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.5, + 0.329, + 0.515 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.531, + 0.825, + 0.587 + ], + "angle": 0, + "content": "Datasets We conduct extensive experiments on six real-world time series datasets, including Weather, ETTh1, ETTh2, ETTm1, ETTm2 and Electricity for long-term forecasting. Following previous work (Wu et al., 2021), we split the ETT series dataset into training, validation, and test sets in a ratio of 6:2:2. For the remaining datasets, we adopt a split ratio of 7:1:2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.602, + 0.825, + 0.687 + ], + "angle": 0, + "content": "Baseline We carefully select eleven well-acknowledged methods in the field of long-term time series forecasting as our baselines, including (1) Transformer-based methods: Autoformer (2021), FEDformer (2022b), PatchTST (2023), iTransformer (2024b); (2) MLP-based methods: DLinear (2023) and TimeMixer (2024a) (3) CNN-based method: MICN (2023), TimesNet (2023); (4) Frequency-based methods: FreTS (2024) and FiLM (2022a). And a time series foundation model Time-FFM (2024a)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.701, + 0.825, + 0.758 + ], + "angle": 0, + "content": "Experimental Settings To ensure fair comparisons, we adopt the same look-back window length \\( T = 96 \\) and the same prediction length \\( F = \\{96,192,336,720\\} \\). We utilize the L2 loss for model training and use Mean Square Error (MSE) and Mean Absolute Error (MAE) metrics to evaluate the performance of each method." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.773, + 0.325, + 0.787 + ], + "angle": 0, + "content": "4.1 MAIN RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.8, + 0.826, + 0.925 + ], + "angle": 0, + "content": "The comprehensive forecasting results are presented in Table 1, where the best results are highlighted in bold red and the second-best are underlined in blue. A lower MSE/MAE indicates a more accurate prediction result. We observe that TimeKAN demonstrates superior predictive performance across all datasets, except for the Electricity dataset, where iTransformer achieves the best result. This is due to iTransformer's use of channel-wise self-attention mechanisms to model inter-variable dependencies, which is particularly effective for high-dimensional datasets like Electricity. Additionally, both TimeKAN and TimeMixer perform consistently well in long-term forecasting tasks, showcasing the generalizability of well-designed time-series decomposition architectures for accurate predictions. Compared with other state-of-the-art methods, TimeKAN introduces a novel" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.225, + 0.113, + 0.772, + 0.129 + ], + "angle": 0, + "content": "Table 2: Ablation study of the Frequency Upsampling. The best results are in bold." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.131, + 0.825, + 0.228 + ], + "angle": 0, + "content": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2WeatherElectricity
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
Linear Mapping0.4010.4130.3120.3620.3280.3650.1800.2630.1640.2110.1840.275
Linear Interpolation0.3830.3980.2960.3470.3360.3700.1810.2630.1650.2100.1960.277
Transposed Convolution0.3770.4070.2900.3440.3260.3660.1780.2610.1630.2110.1880.274
Frequency Upsampling0.3670.3950.2900.3400.3220.3610.1740.2550.1620.2080.1740.266
" + }, + { + "type": "table_caption", + "bbox": [ + 0.24, + 0.254, + 0.757, + 0.269 + ], + "angle": 0, + "content": "Table 3: Ablation study of the Multi-order KANs. The best results are in bold." + }, + { + "type": "table", + "bbox": [ + 0.212, + 0.272, + 0.787, + 0.369 + ], + "angle": 0, + "content": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
MLPs0.3760.3970.2980.3480.3190.3610.1780.2640.1620.211
Fixed Low-order KANs0.3760.3980.2920.3410.3270.3660.1750.2570.1640.211
Fixed High-order KANs0.3800.4070.3100.3630.3270.2690.1760.2570.1640.212
Multi-order KANs0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.396, + 0.825, + 0.439 + ], + "angle": 0, + "content": "Decomposition-Learning-Mixing framework, closely integrating the characteristics of Multi-order KANs with this hierarchical architecture, enabling superior performance in a wide range of long-term forecasting tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.457, + 0.343, + 0.47 + ], + "angle": 0, + "content": "4.2 ABLATION STUDY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.483, + 0.825, + 0.513 + ], + "angle": 0, + "content": "In this section, we investigate several key components of TimeKAN, including Frequency Upsampling, Depthwise Convolution and Multi-order KANs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.827, + 0.643 + ], + "angle": 0, + "content": "Frequency Upsampling To investigate the effectiveness of Frequency Upsampling, we compared it with three alternative upsampling methods that may not preserve frequency information before and after transformation: (1) Linear Mapping; (2) Linear Interpolation; and (3) Transposed Convolution. As shown in Table 2, replacing Frequency Upsampling with any of these three methods resulted in a decline in performance. This indicates that these upsampling techniques fail to maintain the integrity of frequency information after transforming, leading to the Decomposition-Learning-Mixing framework ineffective. This strongly demonstrates that the chosen Frequency Upsampling, as a non-parametric method, is an irreplaceable component of the TimeKAN framework." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.827, + 0.825 + ], + "angle": 0, + "content": "Multi-order KANs We designed the following modules to investigate the effectiveness of Multi-order KANs: (1) MLPs, which means using MLP to replace each KAN; (2) Fixed Low-order KANs, which means using a KAN of order 2 at each frequency level; and (3) Fixed High-order KANs, which means using a KAN of order 5 at each frequency level. The comparison results are shown in Table 3. Overall, Multi-order KANs achieved the best performance. Compared to MLPs, Multi-order KANs perform significantly better, demonstrating that well-designed KANs possess stronger representation capabilities than MLPs and are a compelling alternative. Both Low-order KANs and High-order KANs performed worse than Multi-order KANs, indicating the validity of our design choice to incrementally increase the order of KANs to adapt to the representation of different frequency components. Thus, the learnable functions of KANs are indeed a double-edged sword; achieving satisfactory results requires selecting the appropriate level of function complexity for specific tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Depthwise Convolution To assess the effectiveness of Depthwise Convolution, we replace it with the following choice: (1) w/o Depthwise Convolution; (2) Standard Convolution; (3) Multi-head Self-Attention. The results are shown in Table 4. Overall, Depthwise Convolution is the best choice. We clearly observe that removing Depthwise Convolution or replacing it with Multi-head Self-Attention leads to a significant drop in performance, highlighting the effectiveness of using convolution to learn temporal dependencies. When Depthwise Convolution is replaced with Standard" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.223, + 0.113, + 0.773, + 0.129 + ], + "angle": 0, + "content": "Table 4: Ablation study of the Depthwise Convolution. The best results are in bold." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.131, + 0.778, + 0.229 + ], + "angle": 0, + "content": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
w/o Depthwise Conv0.3790.3970.2960.3430.3370.3730.1800.2630.1680.211
Standard Conv0.3640.3930.2950.3450.3230.3640.1800.2640.1620.210
Self-Attention0.3770.4060.2930.3420.3290.3650.1840.2720.1740.225
Depthwise Conv0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.244, + 0.501, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.244, + 0.822, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.435, + 0.825, + 0.478 + ], + "angle": 0, + "content": "Figure 2: Comparison of forecasting performance between TimeKAN and other three models with varying look-back windows on ETTm2 and Weather datasets. The look-back windows are selected to be \\( T \\in \\{48,96,192,336,512,720\\} \\), and the prediction length is fixed to \\( F = 96 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.503, + 0.825, + 0.547 + ], + "angle": 0, + "content": "Convolution, there are declines in most metrics, which implies that focusing on extracting temporal dependencies individually with Depthwise Convolution, without interference from inter-channel relationships, is a reasonable design." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.561, + 0.827, + 0.716 + ], + "angle": 0, + "content": "Varing Look-back Window In principle, extending the look-back window can provide more information for predicting future, leading to a potential improvement in forecasting performance. A effective long-term TSF method equipped with a strong temporal relation extraction capability should be able to improve forecasting performance when look-back window length increasing (Zeng et al., 2023). As a model based on frequency decomposition learning, TimeKAN should achieve better predictive performance as the look-back window lengths, since more incremental frequency information is available for prediction. To demonstrate that TimeKAN benefits from a larger look-back window, we select look-back window lengths from \\( T = \\{48,96,192,336,512,720\\} \\) while keeping the prediction length fixed at 96. As demonstrated in Figure 2, our TimeKAN consistently reduces the MSE scores as the look-back window increases, indicating that TimeKAN can effectively learn from long time series." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.731, + 0.358, + 0.745 + ], + "angle": 0, + "content": "4.3 MODEL EFFICIENCY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We compare TimeKAN with MLP-based method TimeMier and Transformer-based methods iTransformer and PatchTST, in terms of model parameters and Multiply-Accumulate Operations (MACs), to validate that TimeKAN is a lightweight and efficient architecture. To ensure a fair comparison, we fix the prediction length \\( F = 96 \\) and input length \\( T = 96 \\), and set the input batch size to 32. The comparison results are summarized in Table 5. It is clear that our TimeKAN demonstrates significant advantages in both model parameter size and MACs, particularly when compared to Transformer-based models. For instance, on the Electricity dataset, the parameter count of PatchTST is nearly 295 times that of TimeKAN, and its MACs are almost 118 times greater. Even when compared to the relatively lightweight MLP-based method TimeMixer, TimeKAN shows superior efficiency. On the Weather dataset, TimeKAN requires only \\( 20.05\\% \\) of the parameters needed by TimeMixer and only \\( 36.14\\% \\) of the MACs. This remarkable efficiency advantage is primarily attributed to the lightweight architectural design. The main computations of the TimeKAN model are concentrated" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.827, + 0.169 + ], + "angle": 0, + "content": "Table 5: A comparison of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and three other models. To ensure a fair comparison, we fix the prediction length \\( F = 96 \\) and the input length \\( T = 96 \\), and set the input batch size to 32. The lowest computational cost is highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.183, + 0.17, + 0.83, + 0.266 + ], + "angle": 0, + "content": "
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.294, + 0.827, + 0.366 + ], + "angle": 0, + "content": "in the M-KAN block, and the Depthwise Convolution we employed significantly reduces the number of parameters through grouped operations. Additionally, the powerful representation capabilities afforded by Multi-order KANs allow us to represent time series with very few neurons. Therefore, we cannot overlook that TimeKAN achieves outstanding forecasting performance while requiring minimal computational resources." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.387, + 0.321, + 0.403 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.419, + 0.825, + 0.533 + ], + "angle": 0, + "content": "We proposed an efficient KAN-based Frequency Decomposition Learning architecture (TimeKAN) for long-term time series forecasting. Based on Decomposition-Learning-Mixing architecture, TimeKAN obtains series representations for each frequency band using a Cascaded Frequency Decomposition blocks. Additionally, a Multi-order KAN Representation Learning blocks further leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format. Extensive experiments on real-world datasets demonstrate that TimeKAN achieves the state of the art forecasting performance and extremely lightweight computational consumption." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.554, + 0.37, + 0.568 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.585, + 0.825, + 0.614 + ], + "angle": 0, + "content": "This work is supported by Shanghai Artificial Intelligence Laboratory. This work was done during Songtao Huang's internship at Shanghai Artificial Intelligence Laboratory." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.637, + 0.289, + 0.652 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.661, + 0.825, + 0.691 + ], + "angle": 0, + "content": "Alexander Dylan Bodner, Antonio Santiago Tepsich, Jack Natan Spolski, and Santiago Pourteau. Convolutional kolmogorov-arnold networks. arXiv preprint arXiv:2406.13155, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.702, + 0.826, + 0.759 + ], + "angle": 0, + "content": "Tao Dai, Beiliang Wu, Peiyuan Liu, Naiqi Li, Jigang Bao, Yong Jiang, and Shu-Tao Xia. Periodicity decoupling framework for long-term series forecasting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=dp27P5HBBt." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.771, + 0.825, + 0.814 + ], + "angle": 0, + "content": "Luo donghao and wang xue. ModernTCN: A modern pure convolution structure for general time series analysis. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vpJMJerXHU." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.826, + 0.825, + 0.87 + ], + "angle": 0, + "content": "Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. In ICML, 2024. URL https://openreview.net/forum?id=FVvf69a5rx." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Hongbin Huang, Minghua Chen, and Xiao Qiao. Generative learning for financial time series with irregular and scale-invariant patterns. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=CdjnzWsQax." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.661, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.162 + ], + "angle": 0, + "content": "Weiwei Jiang and Jiayun Luo. Graph neural network for traffic forecasting: A survey. Expert Systems with Applications, 207:117921, 2022. ISSN 0957-4174. doi: https://doi.org/10.1016/j.eswa.2022.117921. URL https://www.sciencedirect.com/science/article/pii/S0957417422011654." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.827, + 0.256 + ], + "angle": 0, + "content": "Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, Alexander Merose, Stephan Hoyer, George Holland, Oriol Vinyals, Jacklynn Stott, Alexander Pritzel, Shakir Mohamed, and Peter Battaglia. Learning skillful medium-range global weather forecasting. Science, 382(6677):1416-1421, 2023. doi: 10.1126/science.adi2336. URL https://www.science.org/doi/abs/10.1126/science.adi2336." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.263, + 0.825, + 0.308 + ], + "angle": 0, + "content": "Chenxin Li, Xinyu Liu, Wuyang Li, Cheng Wang, Hengyu Liu, and Yixuan Yuan. U-kan makes strong backbone for medical image segmentation and generation. arXiv preprint arXiv:2406.02918, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.316, + 0.825, + 0.346 + ], + "angle": 0, + "content": "Ziyao Li. Kolmogorov-arnold networks are radial basis function networks. arXiv preprint arXiv:2405.06721, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.354, + 0.825, + 0.399 + ], + "angle": 0, + "content": "Shengsheng Lin, Weiwei Lin, Wentai Wu, Haojun Chen, and Junjie Yang. SparseTSF: Modeling long-term time series forecasting with \\( ^{*}1\\mathrm{k}^{*} \\) parameters. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=54NSHO01Fe." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.407, + 0.825, + 0.451 + ], + "angle": 0, + "content": "Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.459, + 0.825, + 0.516 + ], + "angle": 0, + "content": "Qingxiang Liu, Xu Liu, Chenghao Liu, Qingsong Wen, and Yuxuan Liang. Time-FFM: Towards LM-empowered federated foundation model for time series forecasting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=HS0faHRhWD." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.525, + 0.825, + 0.582 + ], + "angle": 0, + "content": "Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=JePfAI8fah." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.592, + 0.825, + 0.635 + ], + "angle": 0, + "content": "Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljacic, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.644, + 0.825, + 0.701 + ], + "angle": 0, + "content": "Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Jbdc0vTOcol." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.71, + 0.825, + 0.755 + ], + "angle": 0, + "content": "Khemraj Shukla, Juan Diego Toscano, Zhicheng Wang, Zongren Zou, and George Em Karniadakis. A comprehensive and fair comparison between mlp and kan representations for differential equations and operator networks. arXiv preprint arXiv:2406.02917, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.763, + 0.825, + 0.793 + ], + "angle": 0, + "content": "Sidharth SS. Chebyshev polynomial-based kolmogorov-arnold networks: An efficient architecture for nonlinear function approximation. arXiv preprint arXiv:2405.07200, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.801, + 0.825, + 0.858 + ], + "angle": 0, + "content": "Huiqiang Wang, Jian Peng, Feihu Huang, Jince Wang, Junhui Chen, and Yifei Xiao. MICN: Multiscale local and global context modeling for long-term series forecasting. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=zt53IDUR1U." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.867, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, and JUN ZHOU. Timemixer: Decomposable multiscale mixing for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=7oLshfEIC2." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.162 + ], + "angle": 0, + "content": "Yizheng Wang, Jia Sun, Jinshuai Bai, Cosmin Anitescu, Mohammad Sadegh Eshaghi, Xiaoying Zhuang, Timon Rabczuk, and Yinghua Liu. Kolmogorov arnold informed neural network: A physics-informed deep learning framework for solving pdes based on kolmogorov arnold networks. arXiv preprint arXiv:2406.11045, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.826, + 0.255 + ], + "angle": 0, + "content": "Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 22419-22430. Curran Associates, Inc., 2021. URL https://proceedings.neurips.cc/paper_files/paper/2021/file/bcc0d400288793e8bcdcd7c19a8ac0c2b-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.262, + 0.826, + 0.32 + ], + "angle": 0, + "content": "Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ju_Uqw384Oq." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.328, + 0.826, + 0.357 + ], + "angle": 0, + "content": "Kunpeng Xu, Lifei Chen, and Shengrui Wang. Are kan effective for identifying and tracking concept drift in time series? arXiv preprint arXiv:2410.10041, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.366, + 0.826, + 0.41 + ], + "angle": 0, + "content": "Zhijian Xu, Ailing Zeng, and Qiang Xu. FITS: Modeling time series with $10k$ parameters. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=bWcvvZ3qMb." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.417, + 0.826, + 0.461 + ], + "angle": 0, + "content": "Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. Frequency-domain mlps are more effective learners in time series forecasting. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.469, + 0.826, + 0.526 + ], + "angle": 0, + "content": "Linfei Yin, Xinghui Cao, and Dongduan Liu. Weighted fully-connected regression networks for one-day-ahead hourly photovoltaic power forecasting. Applied Energy, 332:120527, 2023. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2022.120527. URL https://www.sciencedirect.com/science/article/pii/S0306261922017846." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.534, + 0.826, + 0.577 + ], + "angle": 0, + "content": "Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.586, + 0.826, + 0.642 + ], + "angle": 0, + "content": "G.Peter Zhang. Time series forecasting using a hybrid arima and neural network model. Neurocomputing, 50:159-175, 2003. ISSN 0925-2312. doi: https://doi.org/10.1016/S0925-2312(01)00702-0. URL https://www.sciencedirect.com/science/article/pii/S0925231201007020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.651, + 0.826, + 0.695 + ], + "angle": 0, + "content": "Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.702, + 0.826, + 0.746 + ], + "angle": 0, + "content": "Tian Zhou, Ziqing Ma, Qingsong Wen, Liang Sun, Tao Yao, Wotao Yin, Rong Jin, et al. Film: Frequency improved legendre memory model for long-term time series forecasting. Advances in neural information processing systems, 35:12677-12690, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.754, + 0.826, + 0.798 + ], + "angle": 0, + "content": "Tian Zhou, Ziqing Ma, Qingsong Wen, Xue Wang, Liang Sun, and Rong Jin. Fedformer: Frequency enhanced decomposed transformer for long-term series forecasting. In International conference on machine learning, pp. 27268-27286. PMLR, 2022b." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.481, + 0.119 + ], + "angle": 0, + "content": "A ADDITIONAL MODEL ANALYSIS" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.162, + 0.827, + 0.218 + ], + "angle": 0, + "content": "Table 6: Full comparison results of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and other models. To ensure a fair comparison, we fix the prediction length \\( F = 96 \\) and the input length \\( T = 96 \\), and set the input batch size to 32. The lowest computational cost is highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.182, + 0.219, + 0.862, + 0.406 + ], + "angle": 0, + "content": "
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimesNet605.48K18.13G1.19M36.28G4.71M144G1.19M36.28G1.19M36.28G150.30M4.61T
MICN25.20M71.95G25.20M71.95G25.20M71.95G25.20M71.95G111.03K295.07M6.64M19.5G
Dlinear18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M
FreTS3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M
FILM12.58M2.82G12.58M2.82G12.58M2.82G12.58M2.82G12.58M8.46G12.58M8.46G
FEDFormer23.38M24.96G23.38M24.96G23.38M24.96G23.38M24.96G23.45M25.23G24.99M30.89G
AutoFormer10.54M22.82G10.54M22.82G10.54M22.82G10.54M22.82G10.61M23.08G12.14M28.75G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.45, + 0.516, + 0.463 + ], + "angle": 0, + "content": "A.1 COMPUTATIONAL COMPLEXITY ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.479, + 0.828, + 0.633 + ], + "angle": 0, + "content": "In our TimeKAN, the main computational complexity lies in Fast Fourier Transform (FFT), Depthwise Convolution block and Multi-order KAN block. Consider a time series with length \\( L \\) and the hidden state of each time point is \\( D \\). For FFT, the computation complexity is \\( \\mathcal{O}(L\\log L) \\). For Depthwise Convolution block, if we set the convolutional kernel to \\( M \\) and stride to 1, the complexity is \\( \\mathcal{O}(LDM) \\). Finally, assuming that the highest order of Chebyshev polynomials is \\( K \\), the complexity of Multi-order KAN block is \\( \\mathcal{O}(LD^2K) \\). Since \\( M, D, K \\) are constants that are independent of the input length \\( L \\), the computational complexity of both the Depthwise Convolution block and the Multi-order KAN block can be reduced to \\( \\mathcal{O}(L) \\), which is linear about the sequence length. In summary, the overall computational complexity is \\( \\max(\\mathcal{O}(L\\log L), \\mathcal{O}(L) = \\mathcal{O}(L\\log L) \\). When the input is a multivariate sequence with \\( M \\) variables, the computational complexity will expand to \\( \\mathcal{O}(ML\\log L) \\) due to our variable-independent strategy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.659, + 0.362, + 0.673 + ], + "angle": 0, + "content": "A.2 MODEL EFFICIENCY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.687, + 0.827, + 0.788 + ], + "angle": 0, + "content": "Here, we provide the complete results of model efficiency in terms of parameters and MACs in Table 6. As can be seen, except for DLinear, our TimeKAN consistently demonstrates a significant advantage in both parameter count and MACs compared to any other model. DLinear is a model consisting of only a single linear layer, which makes it the most lightweight in terms of parameters and MACs. However, the performance of DLinear already shows a significant gap when compared to state-of-the-art methods. Therefore, our TimeKAN actually achieves superior performance in both forecasting accuracy and efficiency." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.811, + 0.312, + 0.825 + ], + "angle": 0, + "content": "A.3 ERROR BARS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.827, + 0.926 + ], + "angle": 0, + "content": "To evaluate the robustness of TimeKAN, we repeated the experiments on three randomly selected seeds and compared it with the second-best model (TimeMixer). We report the mean and standard deviation of the results across the three experiments, as well as the confidence level of TimeKAN's superiority over TimeMixer. The results are averaged over four prediction horizons (96, 192, 336, and 720). As shown in the Table 7, in most cases, we have over \\(90\\%\\) confidence that TimeKAN outperforms the second-best model and demonstrates good robustne of TimeKAN." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.14 + ], + "angle": 0, + "content": "Table 7: Standard deviation and statistical tests for our TimeKAN method and second-best method (TimeMixer) on five datasets." + }, + { + "type": "table", + "bbox": [ + 0.234, + 0.141, + 0.766, + 0.253 + ], + "angle": 0, + "content": "
MetricMSEMAE
DatasetTimeKANTimeMixerConfidenceTimeKANTimeMixerConfidence
ETTh10.422±0.0040.462±0.00699%0.430±0.0020.448±0.00499%
ETTh20.387±0.0030.392±0.00399%0.408±0.0030.412±0.00490%
ETTm10.378±0.0020.386±0.00399%0.396±0.0010.399±0.00199%
ETTm20.278±0.0010.278±0.0010.324±0.0010.325±0.00190%
Weather0.243±0.0010.245±0.00199%0.273±0.0010.276±0.00199%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.185, + 0.296, + 0.812, + 0.311 + ], + "angle": 0, + "content": "Table 8: Comparison on the Electricity dataset when the look back window is expanded to 512." + }, + { + "type": "table", + "bbox": [ + 0.307, + 0.312, + 0.692, + 0.401 + ], + "angle": 0, + "content": "
Models96192336720
MSEMAEMSEMAEMSEMAEMSEMAE
MOMENT0.1360.2330.1520.2470.1670.2640.2050.295
TimeMixer0.1350.2310.1490.2450.1720.2680.2030.295
TimeKAN0.1330.2300.1490.2470.1650.2610.2030.294
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.446, + 0.56, + 0.46 + ], + "angle": 0, + "content": "A.4 FREQUENCY LEARNING WITH LONGER WINDOW" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.481, + 0.827, + 0.648 + ], + "angle": 0, + "content": "In Table 1, TimeKAN performs relatively poorly on the Electricity dataset. We infer that its poor performance on the electricity dataset is due to the overly short look-back window (\\(T = 96\\)), which cannot provide sufficient frequency information. To verify this, we compare the average number of effective frequency components under a specific look-back window. Specifically, we randomly select a sequence of length \\(T\\) from the electricity dataset and transform it into the frequency domain using FFT. We define effective frequencies as those with amplitudes greater than 0.1 times the maximum amplitude. Then, we take the average number of effective frequencies obtained across all variables to reflect the amount of effective frequency information provided by the sequence. When \\(T = 96\\) (the setting in this paper), the average number of effective frequencies is 10.69. When we extend the sequence length to 512, the average number of effective frequencies becomes 19.74. Therefore, the effective frequency information provided by 512 time steps is nearly twice that of 96 time steps. This indicates that \\(T = 96\\) loses a substantial amount of effective information." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.654, + 0.828, + 0.753 + ], + "angle": 0, + "content": "To validate whether using \\( T = 512 \\) allows us to leverage more frequency information, we extend the look-back window of TimeKAN to 512 on the electricity dataset and compare it with the state-of-the-art methods TimeMixer and time series foundation model MOMENT (Goswami et al., 2024). The results are shown in Table 8. Although TimeKAN performs significantly worse than TimeMixer when \\( T = 96 \\), it achieves the best performance on the electricity dataset when the look-back window is extended to 512. This also demonstrates that TimeKAN can benefit significantly from richer frequency information." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.792, + 0.53, + 0.807 + ], + "angle": 0, + "content": "A.5 IMPACT OF NUMBER OF FREQUENCY BANDS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "To explore the impact of the number of frequency bands on performance, we set the number of frequency bands to 2, 3, 4, and 5. The effects of different frequency band divisions on performance are shown in the Table 9. As we can see, in most cases, dividing the frequency bands into 3 or 4 layers yields the best performance. This aligns with our prior intuition: dividing into two bands results in excessive frequency overlap, while dividing into five bands leads to too little information within each band, making it difficult to accurately model the information within that frequency range." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.113, + 0.825, + 0.128 + ], + "angle": 0, + "content": "Table 9: Impact of number of frequency bands on performance under the 96-to-96 prediction setting." + }, + { + "type": "table", + "bbox": [ + 0.308, + 0.129, + 0.692, + 0.222 + ], + "angle": 0, + "content": "
Number of FrequencyETTh2WeatherElectricity
MSEMAEMSEMAEMSEMAE
20.2920.3400.1640.2090.1830.270
30.2900.3390.1630.2090.1770.268
40.2900.3400.1620.2080.1740.266
50.2950.3460.1640.2110.1770.273
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.242, + 0.427, + 0.258 + ], + "angle": 0, + "content": "B MATHEMATICAL DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.273, + 0.465, + 0.287 + ], + "angle": 0, + "content": "B.1 KOLMOGOROV-ARNOLD NETWORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.299, + 0.825, + 0.342 + ], + "angle": 0, + "content": "Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. More specifically, a multivariate continuous function \\( g:[0,1]^n\\Rightarrow \\mathbb{R} \\) can be defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.346, + 0.823, + 0.387 + ], + "angle": 0, + "content": "\\[\ng (x) = g \\left(x _ {1}, \\dots , x _ {n}\\right) = \\sum_ {i = 1} ^ {2 n + 1} \\Phi_ {i} \\left(\\sum_ {j = 1} ^ {n} \\phi_ {i j} \\left(x _ {j}\\right)\\right) \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.391, + 0.825, + 0.462 + ], + "angle": 0, + "content": "where \\(\\phi_{ij}\\) and \\(\\Phi_i\\) are univariate functions. Following the pattern of MLP, Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) extends the Kolmogorov-Arnoldtheorem to deep representations, i.e., stacked multilayer Kolmogorov-Arnold representations. Assume that KAN is composed of \\(L + 1\\) layer neurons and the number of neurons in layer \\(l\\) is \\(n_l\\). The transmission relationship between the \\(j\\)-th neuron in layer \\(l + 1\\) and all neurons in layer \\(l\\) can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.416, + 0.465, + 0.825, + 0.504 + ], + "angle": 0, + "content": "\\[\nx _ {l + 1, j} = \\sum_ {i = 1} ^ {n _ {l}} \\phi_ {l, j, i} \\left(x _ {l, i}\\right) \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.507, + 0.825, + 0.563 + ], + "angle": 0, + "content": "We can simply understand that each neuron is connected to other neurons in the previous layer through a univariate function \\(\\phi\\). Similar to MLP, the computation of all neurons at layer \\(l\\) can be reorganized as a function matrix multiplication \\(\\Phi_{l-1}\\). Therefore, given a input vector \\(x \\in \\mathbb{R}^{n_0}\\), the final output of KAN network is:" + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.567, + 0.825, + 0.583 + ], + "angle": 0, + "content": "\\[\n\\mathrm {K A N} (x) = \\left(\\Phi_ {L - 1} \\circ \\dots \\circ \\Phi_ {1} \\circ \\Phi_ {0}\\right) x \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.586, + 0.825, + 0.643 + ], + "angle": 0, + "content": "In vanilla KAN (Liu et al., 2024c), the univariate function \\(\\phi_{l,j,i}\\) is parametrized using B-splines, which is a class of smooth curves constructed via segmented polynomial basis functions. To ensure the stability and enhance the representational capacity, KAN overlays the spline function on a fixed basis function \\(b\\), which is typically the SiLU function:" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.646, + 0.825, + 0.663 + ], + "angle": 0, + "content": "\\[\n\\phi (x) = w _ {b} b (x) + w _ {s} \\operatorname {s p l i n e} (\\mathrm {x}) \\tag {16}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.414, + 0.665, + 0.825, + 0.696 + ], + "angle": 0, + "content": "\\[\n\\operatorname {s p l i n e} (x) = \\sum_ {i} c _ {i} B _ {i} (x) \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.698, + 0.825, + 0.769 + ], + "angle": 0, + "content": "where \\( w_{b} \\) and \\( w_{s} \\) are learnable weights and \\( \\mathrm{spline(x)} \\) is the spline function constructed from the linear combination of B-spline basis functions \\( B_{i} \\). However, the complex recursive computation process of high-order B-spline functions hinders the efficiency of KAN. Therefore, in this work, we adopt the simpler Chebyshev polynomial as the univariate function to replace the B-spline function (SS, 2024). The univariate function defined by the Chebyshev polynomial is given as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.772, + 0.825, + 0.789 + ], + "angle": 0, + "content": "\\[\nT _ {k} (x) = \\cos (k \\operatorname {a r c c o s} (x)) \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.821 + ], + "angle": 0, + "content": "Here, \\( k \\) represents the order of the polynomial. Then, we consider the univariate function \\( \\Phi \\) as a linear combination of Chebyshev polynomials with different orders:" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.825, + 0.825, + 0.865 + ], + "angle": 0, + "content": "\\[\nx _ {l + 1, j} = \\sum_ {i = 1} ^ {n _ {l}} \\phi_ {l, j, i} \\left(x _ {l, i}\\right) = \\sum_ {i = 1} ^ {n _ {l}} \\sum_ {k = 0} ^ {K} \\Theta_ {i, k} T _ {k} \\left(\\tanh \\left(x _ {l, i}\\right)\\right) \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Where \\(\\Theta_{i,k}\\) is the coefficients of \\(k\\)-th order Chebyshev polynomials acting on the \\(x_{l,i}\\) and \\(\\tanh\\) is the tanh activation function used to normalize the inputs to between -1 and 1. By adjusting the highest order of the Chebyshev polynomial \\(K\\), we can control the fitting capability of KAN. This also inspires tour design of the Multi-order KAN to dynamically represent different frequencies." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.377, + 0.119 + ], + "angle": 0, + "content": "B.2 FOURIER TRANSFORM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.228 + ], + "angle": 0, + "content": "Time series are often composed of multiple frequency components superimposed on each other, and it is difficult to observe these individual frequency components directly in the time domain. Therefore, transforming a time series from the time domain to the frequency domain for analysis is often necessary. The Discrete Fourier Transform (DFT) is a commonly used domain transformation algorithm that converts a discrete-time signal from the time domain to the complex frequency domain. Mathematically, given a sequence of real numbers \\( x[n] \\) in time domain, where \\( n = 0,1,\\dots ,N - 1 \\) the DFT process can be described as:" + }, + { + "type": "equation", + "bbox": [ + 0.172, + 0.236, + 0.825, + 0.288 + ], + "angle": 0, + "content": "\\[\nX [ k ] = \\sum_ {n = 0} ^ {N - 1} x [ n ] \\cdot e ^ {- i \\frac {2 \\pi}{N} k n} = \\sum_ {n = 0} ^ {N - 1} x [ n ] \\left(\\cos \\left(\\frac {2 \\pi}{N} k n\\right) - i \\sin \\left(\\frac {2 \\pi}{N} k n\\right)\\right), \\quad k = 0, 1, \\dots , N - 1 \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.825, + 0.331 + ], + "angle": 0, + "content": "where \\( X[k] \\) is the \\( k \\)-th frequency component of frequency domain signal and \\( i \\) is the imaginary unit. Similarly, we can use Inverse DFT (iDFT) to convert a frequency domain signal back to the time domain." + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.336, + 0.826, + 0.378 + ], + "angle": 0, + "content": "\\[\nx [ n ] = \\frac {1}{N} \\sum_ {k = 0} ^ {N - 1} X [ k ] \\cdot e ^ {i \\frac {2 \\pi}{N} k n} = \\frac {1}{N} \\sum_ {k = 0} ^ {N - 1} X [ k ] \\left(\\cos \\left(\\frac {2 \\pi}{N} k n\\right) + i \\sin \\left(\\frac {2 \\pi}{N} k n\\right)\\right) \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.386, + 0.827, + 0.458 + ], + "angle": 0, + "content": "The computational complexity of the DFT is typically \\(\\mathcal{O}(N^2)\\) (Zhou et al., 2022b). In practice, we use the Fast Fourier Transform (FFT) to efficiently compute the Discrete Fourier Transform (DFT) of complex sequences, which reduces the computational complexity to \\(\\mathcal{O}(N\\log N)\\). Additionally, by employing the Real FFT (rFFT), we can compress an input sequence of \\(N\\) real numbers into a signal sequence in the complex frequency domain containing \\(N / 2 + 1\\) frequency components." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "16" + } + ] +] \ No newline at end of file diff --git a/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_origin.pdf b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a84fc5a7dcd265a98c13848a573feb6c938f079e --- /dev/null +++ b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/c4acf521-4bf1-41df-95d9-c37b967e30fc_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1fe83e24893205002db28e925d90ff60c6939578ff5298ce5c8ac380d6e2279 +size 562757 diff --git a/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/full.md b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/full.md new file mode 100644 index 0000000000000000000000000000000000000000..59bff7e37fff1d4854cf259abbf7e21584b1e2d8 --- /dev/null +++ b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/full.md @@ -0,0 +1,359 @@ +# TIMEKAN: KAN-BASED FREQUENCY DECOMPOSITION LEARNING ARCHITECTURE FOR LONG-TERM TIME SERIES FORECASTING + +Songtao Huang $^{1,2}$ , Zhen Zhao $^{1}$ , Can Li $^{3}$ , Lei Bai $^{4}$ + +$^{1}$ Shanghai Artificial Intelligence Laboratory, Shanghai, China + +$^{2}$ School of Information Science and Engineering, Lanzhou University, Lanzhou, China + +3The Key Laboratory of Road and Traffic Engineering of the Ministry of Education, Tongji University, Shanghai, China + +huangsongtao@pjlab.org.cn, zhen.zhao@outlook.com, + +lchelen1005@gmail.com, baisanshi@gmail.com + +# ABSTRACT + +Real-world time series often have multiple frequency components that are intertwined with each other, making accurate time series forecasting challenging. Decomposing the mixed frequency components into multiple single frequency components is a natural choice. However, the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterization. To address this challenges, inspired by the flexibility of the recent Kolmogorov-Arnold Network (KAN), we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex forecasting challenges caused by multiple frequency mixtures. Specifically, TimeKAN mainly consists of three components: Cascaded Frequency Decomposition (CFD) blocks, Multi-order KAN Representation Learning (M-KAN) blocks and Frequency Mixing blocks. CFD blocks adopt a bottom-up cascading approach to obtain series representations for each frequency band. Benefiting from the high flexibility of KAN, we design a novel M-KAN block to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks is used to recombine the frequency bands into the original format. Extensive experimental results across multiple real-world time series datasets demonstrate that TimeKAN achieves state-of-the-art performance as an extremely lightweight architecture. Code is available at https://github.com/huangst21/TimeKAN. + +# 1 INTRODUCTION + +Time series forecasting (TSF) has garnered significant interest due to its wide range of applications, including finance (Huang et al., 2024), energy management (Yin et al., 2023), traffic flow planning (Jiang & Luo, 2022), and weather forecasting (Lam et al., 2023). Recently, deep learning has led to substantial advancements in TSF, with the most state-of-the-art performances achieved by CNN-based methods (Wang et al., 2023; donghao & wang xue, 2024), Transformer-based methods(Nie et al., 2023; Liu et al., 2024b) and MLP-based methods (Zeng et al., 2023; Wang et al., 2024a). + +Due to the complex nature of the real world, observed multivariate time series are often nonstationary and exhibit diverse patterns. These intertwined patterns complicate the internal relationships within the time series, making it challenging to capture and establish connections between historical observations and future targets. To address the complex temporal patterns in time series, an increasing number of studies focus on leveraging prior knowledge to decompose time series into simpler components that provide a basis for forecasting. For instance, Autoformer (Wu et al., 2021) decomposes time series into seasonal and trend components. This idea is also adopted by DLinear (Zeng et al., 2023) and FEDFormer (Zhou et al., 2022b). Building on this foundation, TimeMixer (Wang et al., 2024a) further introduces multi-scale seasonal-trend decomposition and highlights the importance of interactions between different scales. Recent models like TimesNet (Wu et al., + +2023), PDF (Dai et al., 2024), and SparseTSF (Lin et al., 2024) emphasize the inherent periodicity in time series and decompose long sequences into multiple shorter ones based on the period length, thereby enabling the separate modeling of inter-period and intra-period dependencies within temporal patterns. In summary, these different decomposition methods share a common goal: utilizing the simplified subsequences to provide critical information for future predictions, thereby achieving accurate forecasting. + +It is worth noting that time series are often composed of multiple frequency components, where the low-frequency components represent long-term periodic variations and the high-frequency components capture certain abrupt events. The mixture of different frequency components makes accurate forecasting particularly challenging. The aforementioned decomposition approaches motivate us to design a frequency decomposition framework that decouples different frequency components in a time series and independently learns the temporal patterns associated with each frequency. However, this introduces another challenge: the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterizations, resulting in sub-optimal results. Fortunately, a new neural network architecture, known as Kolmogorov-Arnold Networks (KAN) (Liu et al., 2024c), has recently gained significant attention in the deep learning community due to its outstanding data-fitting capabilities and flexibility, showing potential as a substitute for traditional MLP. Compared to MLP, KAN offers optional kernels and allows for the adjustment of kernel order to control its fitting capacity. This consideration leads us to explore the use of Multi-order KANs to represent temporal patterns across different frequencies, thereby providing more accurate information for forecasting. + +Motivated by these observations, we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex prediction challenges caused by multiple frequency mixtures. Specifically, TimeKAN first employs moving average to progressively remove relatively high-frequency components from the sequence. Subsequently, Cascaded Frequency Decomposition (CFD) blocks adopt a bottom-up cascading approach to obtain sequence representations for each frequency band. Multi-order KAN Representation Learning (M-KAN) blocks leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that this Decomposition-Learning-Mixing process is repeatable, thereby modeling different temporal patterns at various frequencies more accurately. The final high-level sequence is then mapped to the desired forecasting output via a simple linear mapping. With our meticulously designed architecture, TimeKAN achieves state-of-the-art performance across multiple long-term time series forecasting tasks, while also being a lightweight architecture that outperforms complex TSF models with fewer computational resources. + +Our contributions are summarized as follows: + +- We revisit time series forecasting from the perspective of frequency decoupling, effectively disentangling time series characteristics through a frequency Decomposition-Learning-Mixing architecture to address challenges caused by complex information coupling in time series. +- We introduce TimeKAN as a lightweight yet effective forecasting model and design a novel M-KAN blocks to effectively modeling and representing patterns at different frequencies by maximizing the flexibility of KAN. +- TimeKAN demonstrates superior performance across multiple TSF prediction tasks, while having a parameter count significantly lower than that of state-of-the-art TSF models. + +# 2 RELATED WORK + +# 2.1 KOLMOGOROV-ARNOLD NETWORK + +Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) leverages this theorem to propose an innovative alternative to traditional MLP. Unlike MLP, which use fixed activation functions at the nodes, KAN introduces + +learnable activation functions along the edges. Due to the flexibility and adaptability, KAN is considered as a promising alternative to MLP. + +The original KAN was parameterized using spline functions. However, due to the inherent complexity of spline functions, the speed and scalability of the original KAN were not satisfactory. Consequently, subsequent research explored the use of simpler basis functions to replace splines, thereby achieving higher efficiency. ChebyshevKAN (SS, 2024) incorporates Chebyshev polynomials to parametrize the learnable functions. FastKAN (Li, 2024) uses faster Gaussian radial basis functions to approximate third-order B-spline functions. + +Moreover, KAN has been applied as alternatives to MLP in various domains. Convolutional KAN (Bodner et al., 2024) replaces the linear weight matrices in traditional convolutional networks with learnable spline function matrices. U-KAN (Li et al., 2024) integrates KAN layers into the U-Net architecture, demonstrating impressive accuracy and efficiency in several medical image segmentation tasks. KAN has also been used to bridge the gap between AI and science. Works such as PIKAN (Shukla et al., 2024) and PINN (Wang et al., 2024b) utilize KAN to build physics-informed machine learning models. This paper aims to introduce KAN into TSF and demonstrate the strong potential of KAN in representing time series data. + +# 2.2 TIME SERIES FORECASTING + +Traditional time series forecasting (TSF) methods, such as ARIMA (Zhang, 2003), can provide sufficient interpretability for the forecasting results but often fail to achieve satisfactory accuracy. In recent years, deep learning methods have dominated the field of TSF, mainly including CNN-based, Transformer-based, and MLP-based approaches. CNN-based models primarily apply convolution operations along the temporal dimension to extract temporal patterns. For example, MICN (Wang et al., 2023) and TimesNet (Wu et al., 2023) enhance the precision of sequence modeling by adjusting the receptive field to capture both short-term and long-term views within the sequences. ModernTCN (donghao & wang xue, 2024) advocates using large convolution kernels along the temporal dimension and capture both cross-time and cross-variable dependencies. Compared to CNN-based methods, which have limited receptive field, Transformer-based methods offer global modeling capabilities, making them more suitable for handling long and complex sequence data. They have become the cornerstone of modern time series forecasting. Informer (Zhou et al., 2021) is one of the early implementations of Transformer models in TSF, making efficient forecasting possible by carefully modifying the internal Transformer architecture. PatchTST (Nie et al., 2023) divides the sequence into multiple patches along the temporal dimension, which are then fed into the Transformer, establishing it as an important benchmark in the time series domain. In contrast, iTransformer (Liu et al., 2024b) treats each variable as an independent token to capture cross-variable dependencies in multivariate time series. However, Transformer-based methods face challenges due to the large number of parameters and high memory consumption. Recent research on MLP-based methods has shown that with appropriately designed architectures leveraging prior knowledge, simple MLPs can outperform complex Transformer-based methods. DLinear (Zeng et al., 2023), for instance, preprocesses sequences using a trend-season decomposition strategy. FITS (Xu et al., 2024b) performs linear transformations in the frequency domain, while TimeMixer (Wang et al., 2024a) uses MLP to facilitate information interaction at different scales. These MLP-based methods have demonstrated strong performance regarding both forecasting accuracy and efficiency. Unlike the aforementioned methods, this paper introduces the novel KAN to TSF to represent time series data more accurately. It also proposes a well-designed Decomposition-Learning-Mixing architecture to fully unlock the potential of KAN for time series forecasting. + +# 2.3 TIME SERIES DECOMPOSITION + +Real-world time series often consist of various underlying patterns. To leverage the characteristics of different patterns, recent approaches tend to decompose the series into multiple subcomponents, including trend-seasonal decomposition, multi-scale decomposition, and multi-period decomposition. DLinear (Zeng et al., 2023) employs moving averages to decouple the seasonal and trend components. SCINet (Liu et al., 2022) uses a hierarchical downsampling tree to iteratively extract and exchange information at multiple temporal resolutions. TimeMixer (Wang et al., 2024a) follows a fine-to-coarse principle to decompose the sequence into multiple scales across different + +![](images/60360e90e37b535a3c68ba4c2afa0235d4eda70752a48627a4173e1bc04fa0df.jpg) +Figure 1: The architecture of TimeKAN, which mainly consists of Cascaded Frequency Decomposition block, Multi-order KAN Representation Learning block, and Frequency Mixing block. Here, we divide the frequency range of the time series into three frequency bands as an example. + +time spans and further splits each scale into seasonal and periodic components. TimesNet (Wu et al., 2023) and PDF (Dai et al., 2024) utilize Fourier periodic analysis to decouple sequence into multiple sub-period sequences based on the calculated period. Inspired by these works, this paper proposes a novel Decomposition-Learning-Mixing architecture, which examines time series from a multi-frequency perspective to accurately model the complex patterns within time series. + +# 3 TIMEKAN + +# 3.1 OVERALL ARCHITECTURE + +Given a historical multivariate time series input $\mathbf{X} \in \mathbb{R}^{N \times T}$ , the aim of time series forecasting is to predict the future output series $\mathbf{X}_O \in \mathbb{R}^{N \times F}$ , where $T, F$ is the look-back window length and the future window length, and $N$ represents the number of variates. In this paper, we propose TimeKAN to tackle the challenges arising from the complex mixture of multi-frequency components in time series. The overall architecture of TimeKAN is shown in Figure 1. We adopt variate-independent manner (Nie et al., 2023) to predict each univariate series independently. Each univariate input time series is denoted as $X \in \mathbb{R}^T$ and we consider univariate time series as the instance in the following calculation. In our TimeKAN, the first step is to progressively remove the relatively high-frequency components using moving averages and generate multi-level sequences followed by projecting each sequence into a high-dimensional space. Next, adhering to the Decomposition-Learning-Mixing architecture design principle, we first design Cascaded Frequency Decomposition (CFD) blocks to obtain sequence representations for each frequency band, adopting a bottom-up cascading approach. Then, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that the Decomposition-Learning-Mixing process is repeatable. More details about our TimeKAN are described as follow. + +# 3.2 HIERARCHICAL SEQUENCE PREPROCESSING + +Assume that we divide the frequency range of raw time series $X$ into predefined $k$ frequency bands. We first use moving average to progressively remove the relatively high-frequency components and generate multi-level sequences $\{x_{1},\dots ,x_{k}\}$ , where $x_{i}\in \mathbb{R}^{\frac{T}{d^{i - 1}}}\left(i\in \{1,\dots ,k\}\right)$ . $x_{1}$ is equal to the input series $X$ and $d$ denotes the length of moving average window. The process of producing multi-level sequences is as follows: + +$$ +x _ {i} = \operatorname {A v g P o o l} (\text {P a d d i n g} (x _ {i - 1})) \tag {1} +$$ + +After obtaining the multi-level sequences, each sequence is independently embedded into a higher dimension through a Linear layer: + +$$ +x _ {i} = \operatorname {L i n e a r} \left(x _ {i}\right) \tag {2} +$$ + +where $x_{i} \in \mathbb{R}_{d^{i - 1}}^{\frac{T}{T - 1} \times D}$ and $D$ is embedding dimension. We define $x_{1}$ as the highest level sequence and $x_{k}$ as the lowest level sequence. Notably, each lower-level sequence is derived from the sequence one level higher by removing a portion of the high-frequency information. The above process is a preprocessing process and only occurs once in TimeKAN. + +# 3.3 CASCADED FREQUENCY DECOMPOSITION + +Real-world time series are often composed of multiple frequency components, with the low-frequency component representing long-term changes in the time series and the high-frequency component representing short-term fluctuations or unexpected events. These different frequency components complement each other and provide a comprehensive perspective for accurately modeling time series. Therefore, we design the Cascaded Frequency Decomposition (CFD) block to accurately decompose each frequency component in a cascade way, thus laying the foundation for accurately modeling different frequency components. + +The aim of CFD block is to obtain the representation of each frequency component. Here, we take obtaining the representation of the $i$ -th frequency band as an example. To achieve it, we first employ the Fast Fourier Transform (FFT) to obtain the representation of $x_{i+1}$ in the frequency domain. Then, Zero-Padding is used to extend the length of the frequency domain sequence, so that it can have the same length as the upper sequence $x_i$ after transforming back to the time domain. Next, we use Inverse Fast Fourier Transform (IFFT) to transform it back into the time domain. We refer to this upsampling process as Frequency Upsampling, which ensures that the frequency information remains unchanged before and after the upsampling. The process of Frequency Upsampling can be described as: + +$$ +\hat {x} _ {i} = \operatorname {I F F T} (\text {P a d d i n g} (\operatorname {F F T} (x _ {i + 1}))) \tag {3} +$$ + +Here, $\hat{x}_i$ and $x_i$ have the same sequence length. Notably, compared to $x_i$ , $\hat{x}_i$ lacks the $i$ -th frequency component. The reason is that $x_{i+1}$ is originally formed by removing $i$ -th frequency component from $x_i$ in the hierarchical sequence preprocessing and $x_{i+1}$ is now transformed into $\hat{x}_i$ through a lossless frequency conversion process, thereby aligning length with $x_i$ in the time domain. Therefore, to get the series representation of the $i$ -th frequency component $f_i$ in time domain, we only need to get the residuals between $x_i$ and $\hat{x}_i$ : + +$$ +f _ {i} = x _ {i} - \hat {x} _ {i} \tag {4} +$$ + +# 3.4 MULTI-ORDER KAN REPRESENTATION LEARNING + +Given the multi-level frequency component representation $\{f_1, \dots, f_k\}$ generated by the CFD block, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn specific representations and temporal dependencies at each frequency. M-KAN adopts a dual-branch parallel architecture to separately model temporal representation learning and temporal dependency learning in a frequency-specific way, using Multi-order KANs to learn the representation of each frequency component and employing Depthwise Convolution to capture the temporal dependency. The details of Depthwise Convolution and Multi-order KAN will be given as follows. + +Depthwise Convolution To separate the modeling of temporal dependency from learning sequence representation, we adopt a specific type of group convolution known as Depthwise Convolution, in which the number of groups matches the embedding dimension. Depthwise Convolution employs $D$ groups of convolution kernels to perform independent convolution operations on the series of each channel. This allows the model to focus on capturing temporal patterns without interference from inter channel relationships. The process of Depthwise Convolution is: + +$$ +f _ {i, 1} = \operatorname {C o n v} _ {D \rightarrow D} \left(f _ {i}, \text {g r o u p} = D\right) \tag {5} +$$ + +Multi-order KANs Compared with traditional MLP, KAN replaces linear weights with learnable univariate functions, allowing complex nonlinear relationships to be modeled with fewer parameters and greater interpretability. (Xu et al., 2024a). Assume that KAN is composed of $L + 1$ layer neurons and the number of neurons in layer $l$ is $n_{l}$ . The transmission relationship between the $j$ -th neuron in layer $l + 1$ and all neurons in layer $l$ can be expressed as $z_{l + 1,j} = \sum_{i = 1}^{n_l}\phi_{l,j,i}(z_{l,i})$ , where $z_{l + 1,j}$ is the $j$ -th neuron at layer $l + 1$ and $z_{l,i}$ is the $i$ -th neuron at layer $l$ . We can simply understand + +that each neuron is connected to other neurons in the previous layer through a learnable univariate function $\phi$ . The vanilla KAN (Liu et al., 2024c) employs spline function as the learnable univariate basic functions $\phi$ , but suffering from the complex recursive computation process, which hinders the efficiency of KAN. Here, we adopt ChebyshevKAN (SS, 2024) to learn the representation of each frequency component, i.e., channel learning. ChebyshevKAN is constructed from linear combinations of Chebyshev polynomial. That is, using the linear combination of Chebyshev polynomial with different order to generate learnable univariate function $\phi$ . The Chebyshev polynomial is defined by: + +$$ +T _ {n} (x) = \cos (n \operatorname {a r c c o s} (x)) \tag {6} +$$ + +where $n$ is the highest order of Chebyshev polynomials and the complexity of Chebyshev polynomials is increasing with increasing order. A 1-layer ChebyshevKAN applied to channel dimension can be expressed as: + +$$ +\phi_ {o} (x) = \sum_ {j = 1} ^ {D} \sum_ {i = 0} ^ {n} \Theta_ {o, j, i} T _ {i} (\tanh (x _ {j})) \tag {7} +$$ + +$$ +\operatorname {K A N} (x) = \left\{ \begin{array}{c} \phi_ {1} (x) \\ \dots \\ \phi_ {D} (x) \end{array} \right\} \tag {8} +$$ + +where $o$ is the index of output neuron and $\Theta \in \mathbb{R}^{D\times D\times (n + 1)}$ are the learnable coefficients used to linearly combine the Chebyshev polynomials. It is worth noting that the frequency components within the time series exhibit increasingly complex temporal dynamics as the frequency increases, necessitating a network with stronger representation capabilities to learn these characteristics. ChebyshevKAN allows for the adjustment of the highest order of Chebyshev polynomials $n$ to enhance its representation ability. Therefore, from the low-frequency to high-frequency components, we adopt an increasing order of Chebyshev polynomials to align the frequency components with the complexity of the KAN, thereby accurately learning the representations of different frequency components. We refer to this group of KANs with varying highest Chebyshev polynomials orders as Multi-order KANs. We set an lower bound order $b$ , and the representation learning process for $x_{i}$ can be expressed as: + +$$ +f _ {i, 2} = \mathrm {K A N} \left(f _ {i}, \text {o r d e r} = b + k - i\right) \tag {9} +$$ + +The final output of the M-KAN block is the sum of the outputs from the Multi-order KANs and the Depthwise Convolution. + +$$ +\hat {f} _ {i} = f _ {i, 1} + f _ {i, 2} \tag {10} +$$ + +# 3.5 FREQUENCY MIXING + +After specifically learning the representation of each frequency component, we need to re-transform the frequency representations into the form of multi-level sequences before entering next CFD block, ensuring that the Decomposition-Learning-Mixing process is repeatable. Therefore, we designed Frequency Mixing blocks to convert the frequency component at $i$ -th level $\hat{f}_i$ into multi-level sequences $x_i$ , enabling it to serve as input for the next CFD block. To transform the frequency component at $i$ -th level $\hat{f}_i$ into multi-level sequences $x_i$ , we simply need to to supplement the frequency information from levels $i + 1$ to $k$ back into the $i$ -th level. Thus, we employ Frequency Upsampling again to incrementally reintegrate the information into the higher frequency components: + +$$ +x _ {i} = \operatorname {I F F T} (\text {P a d d i n g} (\operatorname {F F T} (x _ {i + 1}))) + f _ {i} \tag {11} +$$ + +For the last Frequency Mixing block, we extract the highest-level sequence $x_{1}$ and use a simple linear layer to produce the forecasting results $X_{O}$ . + +$$ +X _ {O} = \operatorname {L i n e a r} \left(x _ {1}\right) \tag {12} +$$ + +Due to the use of a variate-independent strategy, we also need to stack the predicted results of all variables together to obtain the final multivariate prediction $\mathbf{X}_{\mathrm{O}}$ . + +Table 1: Full results of the multivariate long-term forecasting result comparison. The input sequence length is set to 96 for all baselines and the prediction lengths $F \in \{96, 192, 336, 720\}$ . Avg means the average results from all four prediction lengths. + +
ModelsTimeKAN OursTimeMixer 2024aiTransformer 2024bTime-FFM 2024aPatchTST 2023TimesNet 2023MICN 2023DLinear 2023FreTS 2024FiLM 2022aFEDformer 2022bAutoformer 2021
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETThI960.3670.3950.3850.4020.3860.4050.3850.4000.4600.4470.3840.4020.4260.4460.3970.4120.3950.4070.4380.4330.3950.4240.4490.459
1920.4140.4200.4430.4300.4410.4360.4390.4300.5120.4770.4360.4290.4540.4640.4460.4410.4900.4770.4940.4660.4690.4700.5000.482
3360.4450.4340.5120.4700.4870.4580.4800.4490.5460.4960.6380.4690.4930.4870.4890.4670.5100.4800.5470.4950.4900.4770.5210.496
7200.4440.4590.4970.4760.5030.4910.4620.4560.5440.5170.5210.5000.5260.5260.5130.5100.5680.5380.5860.5380.5980.5440.5140.512
Avg0.4170.4270.4590.4440.4540.4470.4420.4340.5160.4840.4950.4500.4750.4800.4610.4570.4910.4750.5160.4830.4980.4840.4960.487
ETThI960.2900.3400.2890.3420.2970.3490.3010.3510.3080.3550.3400.3740.3720.4240.3400.3940.3320.3870.3220.3640.3580.3970.3460.388
1920.3750.3920.3780.3970.3800.4000.3780.3970.3930.4050.4020.4140.4920.4920.4820.4790.4510.4570.4050.4140.4290.4390.4560.452
3360.4230.4350.4320.4340.4280.4320.4220.4310.4270.4360.4520.4520.6070.5550.5910.5410.4660.4730.4350.4450.4960.4870.4820.486
7200.4430.4490.4640.4640.4270.4450.4270.4440.4360.4500.4620.4680.8240.6550.8390.6610.4850.4710.4450.4570.4630.4740.5150.511
Avg0.3830.4040.3900.4090.3830.4070.3820.4060.3910.4110.4140.4270.5740.5310.5630.5190.4330.4460.4020.4200.4370.4490.4500.459
ETThI960.3220.3610.3170.3560.3340.3680.3360.3690.3520.3740.3380.3750.3650.3870.3460.3740.3370.3740.3530.3700.3790.4190.5050.475
1920.3570.3830.3670.3840.3770.3910.3780.3890.3900.3930.3740.3870.4030.4080.3820.3910.3820.3980.3870.4260.4410.5530.496
3360.3820.4010.3910.4060.4260.4200.4110.4100.4210.4140.4100.4110.4360.4310.4150.4150.4200.4230.4210.4080.4450.4590.6210.537
7200.4450.4350.4540.4410.4910.4590.4690.4410.4620.4490.4780.4500.4890.4620.4730.4510.4900.4710.4810.4410.5430.4900.6710.561
Avg0.3760.3950.3820.3970.4070.4100.3990.4020.4060.4070.4000.4060.4230.4220.4040.4080.4070.4170.4120.4020.4480.4520.5880.517
ETThI960.1740.2550.1750.2570.1800.2640.1810.2670.1830.2700.1870.2670.1970.2960.1930.2930.1860.2750.1830.2660.2030.2870.2550.339
1920.2390.2990.2400.3020.2500.3090.2470.3080.2550.3140.2490.3090.2840.3610.2840.3610.2590.3230.2480.3050.2690.3280.2810.340
3360.3010.3400.3030.3430.3110.3480.3090.3470.3090.3470.3210.3510.3810.4290.3820.4290.3490.3860.3090.3430.3250.3660.3390.372
7200.3950.3960.3920.3960.4120.4070.4060.4040.4120.4040.4080.4030.5490.5220.5580.5250.5590.5110.4100.4000.4210.4150.4330.432
Avg0.2770.3220.2770.3240.2880.3320.2860.3320.2900.3340.2910.3330.3530.4020.3540.4020.3390.3740.2880.3280.3050.3490.3270.371
Weather960.1620.2080.1630.2090.1740.2140.1910.2300.1860.2270.1720.2200.1980.2610.1950.2520.1710.2270.1950.2360.2170.2960.2660.336
1920.2070.2490.2110.2540.2210.2540.2360.2670.2340.2650.2190.2610.2390.2990.2370.2950.2180.2800.2390.2710.2760.3360.3070.367
3360.2630.2900.2630.2930.2780.2960.2890.3030.2840.3010.2460.3370.2850.3360.2820.3310.2650.3170.2890.3060.3390.3800.359
7200.3380.3400.3440.3480.3580.3470.3620.3500.3560.3490.3650.3590.3510.3880.3450.3820.3260.3510.3600.3510.4030.4280.4190.428
Avg0.2420.2720.2450.2760.2580.2780.2700.2880.2650.2850.2510.2940.2680.3210.2650.3150.2450.2940.2710.2900.3090.3600.3380.382
Electricity960.1740.2660.1530.2450.1480.2400.1980.2820.1900.2960.1680.2720.1800.2930.2100.3020.1710.2600.1980.2740.1930.3080.2010.317
1920.1820.2730.1660.2570.1620.2530.1990.2850.1990.3040.1840.3220.1890.3020.2100.3050.1770.2680.1980.2780.2010.3150.2220.334
3360.1970.2860.1850.2750.1780.2690.2120.2980.2170.3190.1980.3000.1980.3120.2230.3190.1900.2840.2170.3000.2140.3290.2310.443
7200.2360.3200.2240.3120.2250.3170.2530.3300.2580.3520.2200.3200.2170.3300.2580.3500.2280.3160.2780.3560.2460.3550.2540.361
Avg0.1970.2860.1820.2720.1780.2700.2700.2880.2160.3180.1930.3040.1960.3090.2250.3190.1920.2820.2230.3020.2140.3270.2270.338
+ +# 4 EXPERIMENTS + +Datasets We conduct extensive experiments on six real-world time series datasets, including Weather, ETTh1, ETTh2, ETTm1, ETTm2 and Electricity for long-term forecasting. Following previous work (Wu et al., 2021), we split the ETT series dataset into training, validation, and test sets in a ratio of 6:2:2. For the remaining datasets, we adopt a split ratio of 7:1:2. + +Baseline We carefully select eleven well-acknowledged methods in the field of long-term time series forecasting as our baselines, including (1) Transformer-based methods: Autoformer (2021), FEDformer (2022b), PatchTST (2023), iTransformer (2024b); (2) MLP-based methods: DLinear (2023) and TimeMixer (2024a) (3) CNN-based method: MICN (2023), TimesNet (2023); (4) Frequency-based methods: FreTS (2024) and FiLM (2022a). And a time series foundation model Time-FFM (2024a). + +Experimental Settings To ensure fair comparisons, we adopt the same look-back window length $T = 96$ and the same prediction length $F = \{96,192,336,720\}$ . We utilize the L2 loss for model training and use Mean Square Error (MSE) and Mean Absolute Error (MAE) metrics to evaluate the performance of each method. + +# 4.1 MAIN RESULTS + +The comprehensive forecasting results are presented in Table 1, where the best results are highlighted in bold red and the second-best are underlined in blue. A lower MSE/MAE indicates a more accurate prediction result. We observe that TimeKAN demonstrates superior predictive performance across all datasets, except for the Electricity dataset, where iTransformer achieves the best result. This is due to iTransformer's use of channel-wise self-attention mechanisms to model inter-variable dependencies, which is particularly effective for high-dimensional datasets like Electricity. Additionally, both TimeKAN and TimeMixer perform consistently well in long-term forecasting tasks, showcasing the generalizability of well-designed time-series decomposition architectures for accurate predictions. Compared with other state-of-the-art methods, TimeKAN introduces a novel + +Table 2: Ablation study of the Frequency Upsampling. The best results are in bold. + +
DatasetsMetricETTh1ETTh2ETTm1ETTm2WeatherElectricity
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
Linear Mapping0.4010.4130.3120.3620.3280.3650.1800.2630.1640.2110.1840.275
Linear Interpolation0.3830.3980.2960.3470.3360.3700.1810.2630.1650.2100.1960.277
Transposed Convolution0.3770.4070.2900.3440.3260.3660.1780.2610.1630.2110.1880.274
Frequency Upsampling0.3670.3950.2900.3400.3220.3610.1740.2550.1620.2080.1740.266
+ +Table 3: Ablation study of the Multi-order KANs. The best results are in bold. + +
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
MLPs0.3760.3970.2980.3480.3190.3610.1780.2640.1620.211
Fixed Low-order KANs0.3760.3980.2920.3410.3270.3660.1750.2570.1640.211
Fixed High-order KANs0.3800.4070.3100.3630.3270.2690.1760.2570.1640.212
Multi-order KANs0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
+ +Decomposition-Learning-Mixing framework, closely integrating the characteristics of Multi-order KANs with this hierarchical architecture, enabling superior performance in a wide range of long-term forecasting tasks. + +# 4.2 ABLATION STUDY + +In this section, we investigate several key components of TimeKAN, including Frequency Upsampling, Depthwise Convolution and Multi-order KANs. + +Frequency Upsampling To investigate the effectiveness of Frequency Upsampling, we compared it with three alternative upsampling methods that may not preserve frequency information before and after transformation: (1) Linear Mapping; (2) Linear Interpolation; and (3) Transposed Convolution. As shown in Table 2, replacing Frequency Upsampling with any of these three methods resulted in a decline in performance. This indicates that these upsampling techniques fail to maintain the integrity of frequency information after transforming, leading to the Decomposition-Learning-Mixing framework ineffective. This strongly demonstrates that the chosen Frequency Upsampling, as a non-parametric method, is an irreplaceable component of the TimeKAN framework. + +Multi-order KANs We designed the following modules to investigate the effectiveness of Multi-order KANs: (1) MLPs, which means using MLP to replace each KAN; (2) Fixed Low-order KANs, which means using a KAN of order 2 at each frequency level; and (3) Fixed High-order KANs, which means using a KAN of order 5 at each frequency level. The comparison results are shown in Table 3. Overall, Multi-order KANs achieved the best performance. Compared to MLPs, Multi-order KANs perform significantly better, demonstrating that well-designed KANs possess stronger representation capabilities than MLPs and are a compelling alternative. Both Low-order KANs and High-order KANs performed worse than Multi-order KANs, indicating the validity of our design choice to incrementally increase the order of KANs to adapt to the representation of different frequency components. Thus, the learnable functions of KANs are indeed a double-edged sword; achieving satisfactory results requires selecting the appropriate level of function complexity for specific tasks. + +Depthwise Convolution To assess the effectiveness of Depthwise Convolution, we replace it with the following choice: (1) w/o Depthwise Convolution; (2) Standard Convolution; (3) Multi-head Self-Attention. The results are shown in Table 4. Overall, Depthwise Convolution is the best choice. We clearly observe that removing Depthwise Convolution or replacing it with Multi-head Self-Attention leads to a significant drop in performance, highlighting the effectiveness of using convolution to learn temporal dependencies. When Depthwise Convolution is replaced with Standard + +Table 4: Ablation study of the Depthwise Convolution. The best results are in bold. + +
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
w/o Depthwise Conv0.3790.3970.2960.3430.3370.3730.1800.2630.1680.211
Standard Conv0.3640.3930.2950.3450.3230.3640.1800.2640.1620.210
Self-Attention0.3770.4060.2930.3420.3290.3650.1840.2720.1740.225
Depthwise Conv0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
+ +![](images/cb2d292288b505849be1ebccd78db2174a67eb9322af381215248d8bf02f49c6.jpg) +Figure 2: Comparison of forecasting performance between TimeKAN and other three models with varying look-back windows on ETTm2 and Weather datasets. The look-back windows are selected to be $T \in \{48,96,192,336,512,720\}$ , and the prediction length is fixed to $F = 96$ . + +![](images/5406045b8d3dd7cdbaee74bf072ecba19610f7495c9691ff5b71a89f25be987c.jpg) + +Convolution, there are declines in most metrics, which implies that focusing on extracting temporal dependencies individually with Depthwise Convolution, without interference from inter-channel relationships, is a reasonable design. + +Varing Look-back Window In principle, extending the look-back window can provide more information for predicting future, leading to a potential improvement in forecasting performance. A effective long-term TSF method equipped with a strong temporal relation extraction capability should be able to improve forecasting performance when look-back window length increasing (Zeng et al., 2023). As a model based on frequency decomposition learning, TimeKAN should achieve better predictive performance as the look-back window lengths, since more incremental frequency information is available for prediction. To demonstrate that TimeKAN benefits from a larger look-back window, we select look-back window lengths from $T = \{48,96,192,336,512,720\}$ while keeping the prediction length fixed at 96. As demonstrated in Figure 2, our TimeKAN consistently reduces the MSE scores as the look-back window increases, indicating that TimeKAN can effectively learn from long time series. + +# 4.3 MODEL EFFICIENCY + +We compare TimeKAN with MLP-based method TimeMier and Transformer-based methods iTransformer and PatchTST, in terms of model parameters and Multiply-Accumulate Operations (MACs), to validate that TimeKAN is a lightweight and efficient architecture. To ensure a fair comparison, we fix the prediction length $F = 96$ and input length $T = 96$ , and set the input batch size to 32. The comparison results are summarized in Table 5. It is clear that our TimeKAN demonstrates significant advantages in both model parameter size and MACs, particularly when compared to Transformer-based models. For instance, on the Electricity dataset, the parameter count of PatchTST is nearly 295 times that of TimeKAN, and its MACs are almost 118 times greater. Even when compared to the relatively lightweight MLP-based method TimeMixer, TimeKAN shows superior efficiency. On the Weather dataset, TimeKAN requires only $20.05\%$ of the parameters needed by TimeMixer and only $36.14\%$ of the MACs. This remarkable efficiency advantage is primarily attributed to the lightweight architectural design. The main computations of the TimeKAN model are concentrated + +Table 5: A comparison of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and three other models. To ensure a fair comparison, we fix the prediction length $F = 96$ and the input length $T = 96$ , and set the input batch size to 32. The lowest computational cost is highlighted in bold. + +
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
+ +in the M-KAN block, and the Depthwise Convolution we employed significantly reduces the number of parameters through grouped operations. Additionally, the powerful representation capabilities afforded by Multi-order KANs allow us to represent time series with very few neurons. Therefore, we cannot overlook that TimeKAN achieves outstanding forecasting performance while requiring minimal computational resources. + +# 5 CONCLUSION + +We proposed an efficient KAN-based Frequency Decomposition Learning architecture (TimeKAN) for long-term time series forecasting. Based on Decomposition-Learning-Mixing architecture, TimeKAN obtains series representations for each frequency band using a Cascaded Frequency Decomposition blocks. Additionally, a Multi-order KAN Representation Learning blocks further leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format. Extensive experiments on real-world datasets demonstrate that TimeKAN achieves the state of the art forecasting performance and extremely lightweight computational consumption. + +# ACKNOWLEDGEMENTS + +This work is supported by Shanghai Artificial Intelligence Laboratory. This work was done during Songtao Huang's internship at Shanghai Artificial Intelligence Laboratory. + +# REFERENCES + +Alexander Dylan Bodner, Antonio Santiago Tepsich, Jack Natan Spolski, and Santiago Pourteau. Convolutional kolmogorov-arnold networks. arXiv preprint arXiv:2406.13155, 2024. +Tao Dai, Beiliang Wu, Peiyuan Liu, Naiqi Li, Jigang Bao, Yong Jiang, and Shu-Tao Xia. Periodicity decoupling framework for long-term series forecasting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=dp27P5HBBt. +Luo donghao and wang xue. ModernTCN: A modern pure convolution structure for general time series analysis. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vpJMJerXHU. +Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. In ICML, 2024. URL https://openreview.net/forum?id=FVvf69a5rx. +Hongbin Huang, Minghua Chen, and Xiao Qiao. Generative learning for financial time series with irregular and scale-invariant patterns. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=CdjnzWsQax. + +Weiwei Jiang and Jiayun Luo. Graph neural network for traffic forecasting: A survey. Expert Systems with Applications, 207:117921, 2022. ISSN 0957-4174. doi: https://doi.org/10.1016/j.eswa.2022.117921. URL https://www.sciencedirect.com/science/article/pii/S0957417422011654. +Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, Alexander Merose, Stephan Hoyer, George Holland, Oriol Vinyals, Jacklynn Stott, Alexander Pritzel, Shakir Mohamed, and Peter Battaglia. Learning skillful medium-range global weather forecasting. Science, 382(6677):1416-1421, 2023. doi: 10.1126/science.adi2336. URL https://www.science.org/doi/abs/10.1126/science.adi2336. +Chenxin Li, Xinyu Liu, Wuyang Li, Cheng Wang, Hengyu Liu, and Yixuan Yuan. U-kan makes strong backbone for medical image segmentation and generation. arXiv preprint arXiv:2406.02918, 2024. +Ziyao Li. Kolmogorov-arnold networks are radial basis function networks. arXiv preprint arXiv:2405.06721, 2024. +Shengsheng Lin, Weiwei Lin, Wentai Wu, Haojun Chen, and Junjie Yang. SparseTSF: Modeling long-term time series forecasting with $^{*}1\mathrm{k}^{*}$ parameters. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=54NSHO01Fe. +Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022. +Qingxiang Liu, Xu Liu, Chenghao Liu, Qingsong Wen, and Yuxuan Liang. Time-FFM: Towards LM-empowered federated foundation model for time series forecasting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=HS0faHRhWD. +Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=JePfAI8fah. +Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljacic, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024c. +Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Jbdc0vTOcol. +Khemraj Shukla, Juan Diego Toscano, Zhicheng Wang, Zongren Zou, and George Em Karniadakis. A comprehensive and fair comparison between mlp and kan representations for differential equations and operator networks. arXiv preprint arXiv:2406.02917, 2024. +Sidharth SS. Chebyshev polynomial-based kolmogorov-arnold networks: An efficient architecture for nonlinear function approximation. arXiv preprint arXiv:2405.07200, 2024. +Huiqiang Wang, Jian Peng, Feihu Huang, Jince Wang, Junhui Chen, and Yifei Xiao. MICN: Multiscale local and global context modeling for long-term series forecasting. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=zt53IDUR1U. +Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, and JUN ZHOU. Timemixer: Decomposable multiscale mixing for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=7oLshfEIC2. + +Yizheng Wang, Jia Sun, Jinshuai Bai, Cosmin Anitescu, Mohammad Sadegh Eshaghi, Xiaoying Zhuang, Timon Rabczuk, and Yinghua Liu. Kolmogorov arnold informed neural network: A physics-informed deep learning framework for solving pdes based on kolmogorov arnold networks. arXiv preprint arXiv:2406.11045, 2024b. +Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 22419-22430. Curran Associates, Inc., 2021. URL https://proceedings.neurips.cc/paper_files/paper/2021/file/bcc0d400288793e8bcdcd7c19a8ac0c2b-Paper.pdf. +Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ju_Uqw384Oq. +Kunpeng Xu, Lifei Chen, and Shengrui Wang. Are kan effective for identifying and tracking concept drift in time series? arXiv preprint arXiv:2410.10041, 2024a. +Zhijian Xu, Ailing Zeng, and Qiang Xu. FITS: Modeling time series with $10k$ parameters. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=bWcvvZ3qMb. +Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. Frequency-domain mlps are more effective learners in time series forecasting. Advances in Neural Information Processing Systems, 36, 2024. +Linfei Yin, Xinghui Cao, and Dongduan Liu. Weighted fully-connected regression networks for one-day-ahead hourly photovoltaic power forecasting. Applied Energy, 332:120527, 2023. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2022.120527. URL https://www.sciencedirect.com/science/article/pii/S0306261922017846. +Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023. +G.Peter Zhang. Time series forecasting using a hybrid arima and neural network model. Neurocomputing, 50:159-175, 2003. ISSN 0925-2312. doi: https://doi.org/10.1016/S0925-2312(01)00702-0. URL https://www.sciencedirect.com/science/article/pii/S0925231201007020. +Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021. +Tian Zhou, Ziqing Ma, Qingsong Wen, Liang Sun, Tao Yao, Wotao Yin, Rong Jin, et al. Film: Frequency improved legendre memory model for long-term time series forecasting. Advances in neural information processing systems, 35:12677-12690, 2022a. +Tian Zhou, Ziqing Ma, Qingsong Wen, Xue Wang, Liang Sun, and Rong Jin. Fedformer: Frequency enhanced decomposed transformer for long-term series forecasting. In International conference on machine learning, pp. 27268-27286. PMLR, 2022b. + +# A ADDITIONAL MODEL ANALYSIS + +Table 6: Full comparison results of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and other models. To ensure a fair comparison, we fix the prediction length $F = 96$ and the input length $T = 96$ , and set the input batch size to 32. The lowest computational cost is highlighted in bold. + +
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimesNet605.48K18.13G1.19M36.28G4.71M144G1.19M36.28G1.19M36.28G150.30M4.61T
MICN25.20M71.95G25.20M71.95G25.20M71.95G25.20M71.95G111.03K295.07M6.64M19.5G
Dlinear18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M
FreTS3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M
FILM12.58M2.82G12.58M2.82G12.58M2.82G12.58M2.82G12.58M8.46G12.58M8.46G
FEDFormer23.38M24.96G23.38M24.96G23.38M24.96G23.38M24.96G23.45M25.23G24.99M30.89G
AutoFormer10.54M22.82G10.54M22.82G10.54M22.82G10.54M22.82G10.61M23.08G12.14M28.75G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
+ +# A.1 COMPUTATIONAL COMPLEXITY ANALYSIS + +In our TimeKAN, the main computational complexity lies in Fast Fourier Transform (FFT), Depthwise Convolution block and Multi-order KAN block. Consider a time series with length $L$ and the hidden state of each time point is $D$ . For FFT, the computation complexity is $\mathcal{O}(L\log L)$ . For Depthwise Convolution block, if we set the convolutional kernel to $M$ and stride to 1, the complexity is $\mathcal{O}(LDM)$ . Finally, assuming that the highest order of Chebyshev polynomials is $K$ , the complexity of Multi-order KAN block is $\mathcal{O}(LD^2K)$ . Since $M, D, K$ are constants that are independent of the input length $L$ , the computational complexity of both the Depthwise Convolution block and the Multi-order KAN block can be reduced to $\mathcal{O}(L)$ , which is linear about the sequence length. In summary, the overall computational complexity is $\max(\mathcal{O}(L\log L), \mathcal{O}(L) = \mathcal{O}(L\log L)$ . When the input is a multivariate sequence with $M$ variables, the computational complexity will expand to $\mathcal{O}(ML\log L)$ due to our variable-independent strategy. + +# A.2 MODEL EFFICIENCY + +Here, we provide the complete results of model efficiency in terms of parameters and MACs in Table 6. As can be seen, except for DLinear, our TimeKAN consistently demonstrates a significant advantage in both parameter count and MACs compared to any other model. DLinear is a model consisting of only a single linear layer, which makes it the most lightweight in terms of parameters and MACs. However, the performance of DLinear already shows a significant gap when compared to state-of-the-art methods. Therefore, our TimeKAN actually achieves superior performance in both forecasting accuracy and efficiency. + +# A.3 ERROR BARS + +To evaluate the robustness of TimeKAN, we repeated the experiments on three randomly selected seeds and compared it with the second-best model (TimeMixer). We report the mean and standard deviation of the results across the three experiments, as well as the confidence level of TimeKAN's superiority over TimeMixer. The results are averaged over four prediction horizons (96, 192, 336, and 720). As shown in the Table 7, in most cases, we have over $90\%$ confidence that TimeKAN outperforms the second-best model and demonstrates good robustne of TimeKAN. + +Table 7: Standard deviation and statistical tests for our TimeKAN method and second-best method (TimeMixer) on five datasets. + +
MetricMSEMAE
DatasetTimeKANTimeMixerConfidenceTimeKANTimeMixerConfidence
ETTh10.422±0.0040.462±0.00699%0.430±0.0020.448±0.00499%
ETTh20.387±0.0030.392±0.00399%0.408±0.0030.412±0.00490%
ETTm10.378±0.0020.386±0.00399%0.396±0.0010.399±0.00199%
ETTm20.278±0.0010.278±0.0010.324±0.0010.325±0.00190%
Weather0.243±0.0010.245±0.00199%0.273±0.0010.276±0.00199%
+ +Table 8: Comparison on the Electricity dataset when the look back window is expanded to 512. + +
Models96192336720
MSEMAEMSEMAEMSEMAEMSEMAE
MOMENT0.1360.2330.1520.2470.1670.2640.2050.295
TimeMixer0.1350.2310.1490.2450.1720.2680.2030.295
TimeKAN0.1330.2300.1490.2470.1650.2610.2030.294
+ +# A.4 FREQUENCY LEARNING WITH LONGER WINDOW + +In Table 1, TimeKAN performs relatively poorly on the Electricity dataset. We infer that its poor performance on the electricity dataset is due to the overly short look-back window ( $T = 96$ ), which cannot provide sufficient frequency information. To verify this, we compare the average number of effective frequency components under a specific look-back window. Specifically, we randomly select a sequence of length $T$ from the electricity dataset and transform it into the frequency domain using FFT. We define effective frequencies as those with amplitudes greater than 0.1 times the maximum amplitude. Then, we take the average number of effective frequencies obtained across all variables to reflect the amount of effective frequency information provided by the sequence. When $T = 96$ (the setting in this paper), the average number of effective frequencies is 10.69. When we extend the sequence length to 512, the average number of effective frequencies becomes 19.74. Therefore, the effective frequency information provided by 512 time steps is nearly twice that of 96 time steps. This indicates that $T = 96$ loses a substantial amount of effective information. + +To validate whether using $T = 512$ allows us to leverage more frequency information, we extend the look-back window of TimeKAN to 512 on the electricity dataset and compare it with the state-of-the-art methods TimeMixer and time series foundation model MOMENT (Goswami et al., 2024). The results are shown in Table 8. Although TimeKAN performs significantly worse than TimeMixer when $T = 96$ , it achieves the best performance on the electricity dataset when the look-back window is extended to 512. This also demonstrates that TimeKAN can benefit significantly from richer frequency information. + +# A.5 IMPACT OF NUMBER OF FREQUENCY BANDS + +To explore the impact of the number of frequency bands on performance, we set the number of frequency bands to 2, 3, 4, and 5. The effects of different frequency band divisions on performance are shown in the Table 9. As we can see, in most cases, dividing the frequency bands into 3 or 4 layers yields the best performance. This aligns with our prior intuition: dividing into two bands results in excessive frequency overlap, while dividing into five bands leads to too little information within each band, making it difficult to accurately model the information within that frequency range. + +Table 9: Impact of number of frequency bands on performance under the 96-to-96 prediction setting. + +
Number of FrequencyETTh2WeatherElectricity
MSEMAEMSEMAEMSEMAE
20.2920.3400.1640.2090.1830.270
30.2900.3390.1630.2090.1770.268
40.2900.3400.1620.2080.1740.266
50.2950.3460.1640.2110.1770.273
+ +# B MATHEMATICAL DETAILS + +# B.1 KOLMOGOROV-ARNOLD NETWORK + +Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. More specifically, a multivariate continuous function $g:[0,1]^n\Rightarrow \mathbb{R}$ can be defined as: + +$$ +g (x) = g \left(x _ {1}, \dots , x _ {n}\right) = \sum_ {i = 1} ^ {2 n + 1} \Phi_ {i} \left(\sum_ {j = 1} ^ {n} \phi_ {i j} \left(x _ {j}\right)\right) \tag {13} +$$ + +where $\phi_{ij}$ and $\Phi_i$ are univariate functions. Following the pattern of MLP, Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) extends the Kolmogorov-Arnoldtheorem to deep representations, i.e., stacked multilayer Kolmogorov-Arnold representations. Assume that KAN is composed of $L + 1$ layer neurons and the number of neurons in layer $l$ is $n_l$ . The transmission relationship between the $j$ -th neuron in layer $l + 1$ and all neurons in layer $l$ can be expressed as: + +$$ +x _ {l + 1, j} = \sum_ {i = 1} ^ {n _ {l}} \phi_ {l, j, i} \left(x _ {l, i}\right) \tag {14} +$$ + +We can simply understand that each neuron is connected to other neurons in the previous layer through a univariate function $\phi$ . Similar to MLP, the computation of all neurons at layer $l$ can be reorganized as a function matrix multiplication $\Phi_{l-1}$ . Therefore, given a input vector $x \in \mathbb{R}^{n_0}$ , the final output of KAN network is: + +$$ +\mathrm {K A N} (x) = \left(\Phi_ {L - 1} \circ \dots \circ \Phi_ {1} \circ \Phi_ {0}\right) x \tag {15} +$$ + +In vanilla KAN (Liu et al., 2024c), the univariate function $\phi_{l,j,i}$ is parametrized using B-splines, which is a class of smooth curves constructed via segmented polynomial basis functions. To ensure the stability and enhance the representational capacity, KAN overlays the spline function on a fixed basis function $b$ , which is typically the SiLU function: + +$$ +\phi (x) = w _ {b} b (x) + w _ {s} \operatorname {s p l i n e} (\mathrm {x}) \tag {16} +$$ + +$$ +\operatorname {s p l i n e} (x) = \sum_ {i} c _ {i} B _ {i} (x) \tag {17} +$$ + +where $w_{b}$ and $w_{s}$ are learnable weights and $\mathrm{spline(x)}$ is the spline function constructed from the linear combination of B-spline basis functions $B_{i}$ . However, the complex recursive computation process of high-order B-spline functions hinders the efficiency of KAN. Therefore, in this work, we adopt the simpler Chebyshev polynomial as the univariate function to replace the B-spline function (SS, 2024). The univariate function defined by the Chebyshev polynomial is given as follows: + +$$ +T _ {k} (x) = \cos (k \operatorname {a r c c o s} (x)) \tag {18} +$$ + +Here, $k$ represents the order of the polynomial. Then, we consider the univariate function $\Phi$ as a linear combination of Chebyshev polynomials with different orders: + +$$ +x _ {l + 1, j} = \sum_ {i = 1} ^ {n _ {l}} \phi_ {l, j, i} \left(x _ {l, i}\right) = \sum_ {i = 1} ^ {n _ {l}} \sum_ {k = 0} ^ {K} \Theta_ {i, k} T _ {k} \left(\tanh \left(x _ {l, i}\right)\right) \tag {19} +$$ + +Where $\Theta_{i,k}$ is the coefficients of $k$ -th order Chebyshev polynomials acting on the $x_{l,i}$ and $\tanh$ is the tanh activation function used to normalize the inputs to between -1 and 1. By adjusting the highest order of the Chebyshev polynomial $K$ , we can control the fitting capability of KAN. This also inspires tour design of the Multi-order KAN to dynamically represent different frequencies. + +# B.2 FOURIER TRANSFORM + +Time series are often composed of multiple frequency components superimposed on each other, and it is difficult to observe these individual frequency components directly in the time domain. Therefore, transforming a time series from the time domain to the frequency domain for analysis is often necessary. The Discrete Fourier Transform (DFT) is a commonly used domain transformation algorithm that converts a discrete-time signal from the time domain to the complex frequency domain. Mathematically, given a sequence of real numbers $x[n]$ in time domain, where $n = 0,1,\dots ,N - 1$ the DFT process can be described as: + +$$ +X [ k ] = \sum_ {n = 0} ^ {N - 1} x [ n ] \cdot e ^ {- i \frac {2 \pi}{N} k n} = \sum_ {n = 0} ^ {N - 1} x [ n ] \left(\cos \left(\frac {2 \pi}{N} k n\right) - i \sin \left(\frac {2 \pi}{N} k n\right)\right), \quad k = 0, 1, \dots , N - 1 \tag {20} +$$ + +where $X[k]$ is the $k$ -th frequency component of frequency domain signal and $i$ is the imaginary unit. Similarly, we can use Inverse DFT (iDFT) to convert a frequency domain signal back to the time domain. + +$$ +x [ n ] = \frac {1}{N} \sum_ {k = 0} ^ {N - 1} X [ k ] \cdot e ^ {i \frac {2 \pi}{N} k n} = \frac {1}{N} \sum_ {k = 0} ^ {N - 1} X [ k ] \left(\cos \left(\frac {2 \pi}{N} k n\right) + i \sin \left(\frac {2 \pi}{N} k n\right)\right) \tag {21} +$$ + +The computational complexity of the DFT is typically $\mathcal{O}(N^2)$ (Zhou et al., 2022b). In practice, we use the Fast Fourier Transform (FFT) to efficiently compute the Discrete Fourier Transform (DFT) of complex sequences, which reduces the computational complexity to $\mathcal{O}(N\log N)$ . Additionally, by employing the Real FFT (rFFT), we can compress an input sequence of $N$ real numbers into a signal sequence in the complex frequency domain containing $N / 2 + 1$ frequency components. \ No newline at end of file diff --git a/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/images.zip b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ea4a81013ae8694b9294a8af77bc0ab7761b0e90 --- /dev/null +++ b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba76e8d0cfed4082f01f49bcbedc4ca9e37d41a9e2079888d455b4e18c0476a4 +size 913973 diff --git a/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/layout.json b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a0d7b99be2a6c8d24aff2044ad0e0d503cf9c79c --- /dev/null +++ b/2025/TimeKAN_ KAN-based Frequency Decomposition Learning Architecture for Long-term Time Series Forecasting/layout.json @@ -0,0 +1,10133 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "type": "text", + "content": "TIMEKAN: KAN-BASED FREQUENCY DECOMPOSITION LEARNING ARCHITECTURE FOR LONG-TERM TIME SERIES FORECASTING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "spans": [ + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "text", + "content": "Songtao Huang" + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "text", + "content": ", Zhen Zhao" + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "text", + "content": ", Can Li" + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "text", + "content": ", Lei Bai" + }, + { + "bbox": [ + 110, + 153, + 334, + 166 + ], + "type": "inline_equation", + "content": "^{4}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 166, + 361, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 166, + 361, + 177 + ], + "spans": [ + { + "bbox": [ + 110, + 166, + 361, + 177 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 166, + 361, + 177 + ], + "type": "text", + "content": "Shanghai Artificial Intelligence Laboratory, Shanghai, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 177, + 460, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 177, + 460, + 189 + ], + "spans": [ + { + "bbox": [ + 110, + 177, + 460, + 189 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 177, + 460, + 189 + ], + "type": "text", + "content": "School of Information Science and Engineering, Lanzhou University, Lanzhou, China" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 189, + 449, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 189, + 449, + 212 + ], + "spans": [ + { + "bbox": [ + 110, + 189, + 449, + 212 + ], + "type": "text", + "content": "3The Key Laboratory of Road and Traffic Engineering of the Ministry of Education, Tongji University, Shanghai, China" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 212, + 376, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 212, + 376, + 223 + ], + "spans": [ + { + "bbox": [ + 110, + 212, + 376, + 223 + ], + "type": "text", + "content": "huangsongtao@pjlab.org.cn, zhen.zhao@outlook.com," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 223, + 341, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 223, + 341, + 233 + ], + "spans": [ + { + "bbox": [ + 110, + 223, + 341, + 233 + ], + "type": "text", + "content": "lchelen1005@gmail.com, baisanshi@gmail.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 276, + 262, + 335, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 262, + 335, + 274 + ], + "spans": [ + { + "bbox": [ + 276, + 262, + 335, + 274 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 285, + 470, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 285, + 470, + 506 + ], + "spans": [ + { + "bbox": [ + 140, + 285, + 470, + 506 + ], + "type": "text", + "content": "Real-world time series often have multiple frequency components that are intertwined with each other, making accurate time series forecasting challenging. Decomposing the mixed frequency components into multiple single frequency components is a natural choice. However, the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterization. To address this challenges, inspired by the flexibility of the recent Kolmogorov-Arnold Network (KAN), we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex forecasting challenges caused by multiple frequency mixtures. Specifically, TimeKAN mainly consists of three components: Cascaded Frequency Decomposition (CFD) blocks, Multi-order KAN Representation Learning (M-KAN) blocks and Frequency Mixing blocks. CFD blocks adopt a bottom-up cascading approach to obtain series representations for each frequency band. Benefiting from the high flexibility of KAN, we design a novel M-KAN block to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks is used to recombine the frequency bands into the original format. Extensive experimental results across multiple real-world time series datasets demonstrate that TimeKAN achieves state-of-the-art performance as an extremely lightweight architecture. Code is available at https://github.com/huangst21/TimeKAN." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 525, + 206, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 206, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 206, + 538 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 550, + 504, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 504, + 617 + ], + "type": "text", + "content": "Time series forecasting (TSF) has garnered significant interest due to its wide range of applications, including finance (Huang et al., 2024), energy management (Yin et al., 2023), traffic flow planning (Jiang & Luo, 2022), and weather forecasting (Lam et al., 2023). Recently, deep learning has led to substantial advancements in TSF, with the most state-of-the-art performances achieved by CNN-based methods (Wang et al., 2023; donghao & wang xue, 2024), Transformer-based methods(Nie et al., 2023; Liu et al., 2024b) and MLP-based methods (Zeng et al., 2023; Wang et al., 2024a)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "Due to the complex nature of the real world, observed multivariate time series are often nonstationary and exhibit diverse patterns. These intertwined patterns complicate the internal relationships within the time series, making it challenging to capture and establish connections between historical observations and future targets. To address the complex temporal patterns in time series, an increasing number of studies focus on leveraging prior knowledge to decompose time series into simpler components that provide a basis for forecasting. For instance, Autoformer (Wu et al., 2021) decomposes time series into seasonal and trend components. This idea is also adopted by DLinear (Zeng et al., 2023) and FEDFormer (Zhou et al., 2022b). Building on this foundation, TimeMixer (Wang et al., 2024a) further introduces multi-scale seasonal-trend decomposition and highlights the importance of interactions between different scales. Recent models like TimesNet (Wu et al.," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": "2023), PDF (Dai et al., 2024), and SparseTSF (Lin et al., 2024) emphasize the inherent periodicity in time series and decompose long sequences into multiple shorter ones based on the period length, thereby enabling the separate modeling of inter-period and intra-period dependencies within temporal patterns. In summary, these different decomposition methods share a common goal: utilizing the simplified subsequences to provide critical information for future predictions, thereby achieving accurate forecasting." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 506, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 319 + ], + "type": "text", + "content": "It is worth noting that time series are often composed of multiple frequency components, where the low-frequency components represent long-term periodic variations and the high-frequency components capture certain abrupt events. The mixture of different frequency components makes accurate forecasting particularly challenging. The aforementioned decomposition approaches motivate us to design a frequency decomposition framework that decouples different frequency components in a time series and independently learns the temporal patterns associated with each frequency. However, this introduces another challenge: the information density of patterns varies across different frequencies, and employing a uniform modeling approach for different frequency components can lead to inaccurate characterizations, resulting in sub-optimal results. Fortunately, a new neural network architecture, known as Kolmogorov-Arnold Networks (KAN) (Liu et al., 2024c), has recently gained significant attention in the deep learning community due to its outstanding data-fitting capabilities and flexibility, showing potential as a substitute for traditional MLP. Compared to MLP, KAN offers optional kernels and allows for the adjustment of kernel order to control its fitting capacity. This consideration leads us to explore the use of Multi-order KANs to represent temporal patterns across different frequencies, thereby providing more accurate information for forecasting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 324, + 506, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 324, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 324, + 506, + 478 + ], + "type": "text", + "content": "Motivated by these observations, we propose a KAN-based Frequency Decomposition Learning architecture (TimeKAN) to address the complex prediction challenges caused by multiple frequency mixtures. Specifically, TimeKAN first employs moving average to progressively remove relatively high-frequency components from the sequence. Subsequently, Cascaded Frequency Decomposition (CFD) blocks adopt a bottom-up cascading approach to obtain sequence representations for each frequency band. Multi-order KAN Representation Learning (M-KAN) blocks leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that this Decomposition-Learning-Mixing process is repeatable, thereby modeling different temporal patterns at various frequencies more accurately. The final high-level sequence is then mapped to the desired forecasting output via a simple linear mapping. With our meticulously designed architecture, TimeKAN achieves state-of-the-art performance across multiple long-term time series forecasting tasks, while also being a lightweight architecture that outperforms complex TSF models with fewer computational resources." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 483, + 290, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 290, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 290, + 495 + ], + "type": "text", + "content": "Our contributions are summarized as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 506, + 504, + 619 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 132, + 506, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 506, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 132, + 506, + 504, + 550 + ], + "type": "text", + "content": "- We revisit time series forecasting from the perspective of frequency decoupling, effectively disentangling time series characteristics through a frequency Decomposition-Learning-Mixing architecture to address challenges caused by complex information coupling in time series." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 557, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 557, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 132, + 557, + 504, + 590 + ], + "type": "text", + "content": "- We introduce TimeKAN as a lightweight yet effective forecasting model and design a novel M-KAN blocks to effectively modeling and representing patterns at different frequencies by maximizing the flexibility of KAN." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 597, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 597, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 132, + 597, + 504, + 619 + ], + "type": "text", + "content": "- TimeKAN demonstrates superior performance across multiple TSF prediction tasks, while having a parameter count significantly lower than that of state-of-the-art TSF models." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 639, + 212, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 212, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 212, + 651 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 666, + 282, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 282, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 282, + 677 + ], + "type": "text", + "content": "2.1 KOLMOGOROV-ARNOLD NETWORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 687, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 732 + ], + "type": "text", + "content": "Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) leverages this theorem to propose an innovative alternative to traditional MLP. Unlike MLP, which use fixed activation functions at the nodes, KAN introduces" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "learnable activation functions along the edges. Due to the flexibility and adaptability, KAN is considered as a promising alternative to MLP." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "type": "text", + "content": "The original KAN was parameterized using spline functions. However, due to the inherent complexity of spline functions, the speed and scalability of the original KAN were not satisfactory. Consequently, subsequent research explored the use of simpler basis functions to replace splines, thereby achieving higher efficiency. ChebyshevKAN (SS, 2024) incorporates Chebyshev polynomials to parametrize the learnable functions. FastKAN (Li, 2024) uses faster Gaussian radial basis functions to approximate third-order B-spline functions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 506, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 272 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 272 + ], + "type": "text", + "content": "Moreover, KAN has been applied as alternatives to MLP in various domains. Convolutional KAN (Bodner et al., 2024) replaces the linear weight matrices in traditional convolutional networks with learnable spline function matrices. U-KAN (Li et al., 2024) integrates KAN layers into the U-Net architecture, demonstrating impressive accuracy and efficiency in several medical image segmentation tasks. KAN has also been used to bridge the gap between AI and science. Works such as PIKAN (Shukla et al., 2024) and PINN (Wang et al., 2024b) utilize KAN to build physics-informed machine learning models. This paper aims to introduce KAN into TSF and demonstrate the strong potential of KAN in representing time series data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 286, + 253, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 286, + 253, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 253, + 298 + ], + "type": "text", + "content": "2.2 TIME SERIES FORECASTING" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 308, + 506, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 617 + ], + "type": "text", + "content": "Traditional time series forecasting (TSF) methods, such as ARIMA (Zhang, 2003), can provide sufficient interpretability for the forecasting results but often fail to achieve satisfactory accuracy. In recent years, deep learning methods have dominated the field of TSF, mainly including CNN-based, Transformer-based, and MLP-based approaches. CNN-based models primarily apply convolution operations along the temporal dimension to extract temporal patterns. For example, MICN (Wang et al., 2023) and TimesNet (Wu et al., 2023) enhance the precision of sequence modeling by adjusting the receptive field to capture both short-term and long-term views within the sequences. ModernTCN (donghao & wang xue, 2024) advocates using large convolution kernels along the temporal dimension and capture both cross-time and cross-variable dependencies. Compared to CNN-based methods, which have limited receptive field, Transformer-based methods offer global modeling capabilities, making them more suitable for handling long and complex sequence data. They have become the cornerstone of modern time series forecasting. Informer (Zhou et al., 2021) is one of the early implementations of Transformer models in TSF, making efficient forecasting possible by carefully modifying the internal Transformer architecture. PatchTST (Nie et al., 2023) divides the sequence into multiple patches along the temporal dimension, which are then fed into the Transformer, establishing it as an important benchmark in the time series domain. In contrast, iTransformer (Liu et al., 2024b) treats each variable as an independent token to capture cross-variable dependencies in multivariate time series. However, Transformer-based methods face challenges due to the large number of parameters and high memory consumption. Recent research on MLP-based methods has shown that with appropriately designed architectures leveraging prior knowledge, simple MLPs can outperform complex Transformer-based methods. DLinear (Zeng et al., 2023), for instance, preprocesses sequences using a trend-season decomposition strategy. FITS (Xu et al., 2024b) performs linear transformations in the frequency domain, while TimeMixer (Wang et al., 2024a) uses MLP to facilitate information interaction at different scales. These MLP-based methods have demonstrated strong performance regarding both forecasting accuracy and efficiency. Unlike the aforementioned methods, this paper introduces the novel KAN to TSF to represent time series data more accurately. It also proposes a well-designed Decomposition-Learning-Mixing architecture to fully unlock the potential of KAN for time series forecasting." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 633, + 264, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 633, + 264, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 264, + 643 + ], + "type": "text", + "content": "2.3 TIME SERIES DECOMPOSITION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": "Real-world time series often consist of various underlying patterns. To leverage the characteristics of different patterns, recent approaches tend to decompose the series into multiple subcomponents, including trend-seasonal decomposition, multi-scale decomposition, and multi-period decomposition. DLinear (Zeng et al., 2023) employs moving averages to decouple the seasonal and trend components. SCINet (Liu et al., 2022) uses a hierarchical downsampling tree to iteratively extract and exchange information at multiple temporal resolutions. TimeMixer (Wang et al., 2024a) follows a fine-to-coarse principle to decompose the sequence into multiple scales across different" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 83, + 504, + 208 + ], + "blocks": [ + { + "bbox": [ + 109, + 83, + 504, + 208 + ], + "lines": [ + { + "bbox": [ + 109, + 83, + 504, + 208 + ], + "spans": [ + { + "bbox": [ + 109, + 83, + 504, + 208 + ], + "type": "image", + "image_path": "60360e90e37b535a3c68ba4c2afa0235d4eda70752a48627a4173e1bc04fa0df.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 229, + 504, + 264 + ], + "lines": [ + { + "bbox": [ + 104, + 229, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 504, + 264 + ], + "type": "text", + "content": "Figure 1: The architecture of TimeKAN, which mainly consists of Cascaded Frequency Decomposition block, Multi-order KAN Representation Learning block, and Frequency Mixing block. Here, we divide the frequency range of the time series into three frequency bands as an example." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 285, + 504, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 504, + 340 + ], + "type": "text", + "content": "time spans and further splits each scale into seasonal and periodic components. TimesNet (Wu et al., 2023) and PDF (Dai et al., 2024) utilize Fourier periodic analysis to decouple sequence into multiple sub-period sequences based on the calculated period. Inspired by these works, this paper proposes a novel Decomposition-Learning-Mixing architecture, which examines time series from a multi-frequency perspective to accurately model the complex patterns within time series." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 357, + 181, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 181, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 181, + 369 + ], + "type": "text", + "content": "3 TIMEKAN" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 382, + 244, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 382, + 244, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 244, + 392 + ], + "type": "text", + "content": "3.1 OVERALL ARCHITECTURE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "text", + "content": "Given a historical multivariate time series input " + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{N \\times T}" + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "text", + "content": ", the aim of time series forecasting is to predict the future output series " + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_O \\in \\mathbb{R}^{N \\times F}" + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "inline_equation", + "content": "T, F" + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "text", + "content": " is the look-back window length and the future window length, and " + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "text", + "content": " represents the number of variates. In this paper, we propose TimeKAN to tackle the challenges arising from the complex mixture of multi-frequency components in time series. The overall architecture of TimeKAN is shown in Figure 1. We adopt variate-independent manner (Nie et al., 2023) to predict each univariate series independently. Each univariate input time series is denoted as " + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "inline_equation", + "content": "X \\in \\mathbb{R}^T" + }, + { + "bbox": [ + 104, + 403, + 506, + 579 + ], + "type": "text", + "content": " and we consider univariate time series as the instance in the following calculation. In our TimeKAN, the first step is to progressively remove the relatively high-frequency components using moving averages and generate multi-level sequences followed by projecting each sequence into a high-dimensional space. Next, adhering to the Decomposition-Learning-Mixing architecture design principle, we first design Cascaded Frequency Decomposition (CFD) blocks to obtain sequence representations for each frequency band, adopting a bottom-up cascading approach. Then, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format, ensuring that the Decomposition-Learning-Mixing process is repeatable. More details about our TimeKAN are described as follow." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 593, + 321, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 593, + 321, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 321, + 604 + ], + "type": "text", + "content": "3.2 HIERARCHICAL SEQUENCE PREPROCESSING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": "Assume that we divide the frequency range of raw time series " + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": " into predefined " + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": " frequency bands. We first use moving average to progressively remove the relatively high-frequency components and generate multi-level sequences " + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "inline_equation", + "content": "\\{x_{1},\\dots ,x_{k}\\}" + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "inline_equation", + "content": "x_{i}\\in \\mathbb{R}^{\\frac{T}{d^{i - 1}}}\\left(i\\in \\{1,\\dots ,k\\}\\right)" + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "inline_equation", + "content": "x_{1}" + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": " is equal to the input series " + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 613, + 504, + 672 + ], + "type": "text", + "content": " denotes the length of moving average window. The process of producing multi-level sequences is as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 237, + 679, + 504, + 692 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 679, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 237, + 679, + 504, + 692 + ], + "type": "interline_equation", + "content": "x _ {i} = \\operatorname {A v g P o o l} (\\text {P a d d i n g} (x _ {i - 1})) \\tag {1}", + "image_path": "714921d2060bf021d0415d54bb98b0ccd14f745eed5b3ee75c531fc6f1b305f0.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 697, + 504, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 697, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 504, + 719 + ], + "type": "text", + "content": "After obtaining the multi-level sequences, each sequence is independently embedded into a higher dimension through a Linear layer:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 269, + 720, + 504, + 733 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 720, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 269, + 720, + 504, + 733 + ], + "type": "interline_equation", + "content": "x _ {i} = \\operatorname {L i n e a r} \\left(x _ {i}\\right) \\tag {2}", + "image_path": "2ae933d441f7c8f746186e116f687df1eca31e143bd79e039734993183e128ae.jpg" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "x_{i} \\in \\mathbb{R}_{d^{i - 1}}^{\\frac{T}{T - 1} \\times D}" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": " is embedding dimension. We define " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "x_{1}" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": " as the highest level sequence and " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "x_{k}" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": " as the lowest level sequence. Notably, each lower-level sequence is derived from the sequence one level higher by removing a portion of the high-frequency information. The above process is a preprocessing process and only occurs once in TimeKAN." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 141, + 312, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 141, + 312, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 312, + 152 + ], + "type": "text", + "content": "3.3 CASCADED FREQUENCY DECOMPOSITION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 162, + 504, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 162, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 162, + 504, + 240 + ], + "type": "text", + "content": "Real-world time series are often composed of multiple frequency components, with the low-frequency component representing long-term changes in the time series and the high-frequency component representing short-term fluctuations or unexpected events. These different frequency components complement each other and provide a comprehensive perspective for accurately modeling time series. Therefore, we design the Cascaded Frequency Decomposition (CFD) block to accurately decompose each frequency component in a cascade way, thus laying the foundation for accurately modeling different frequency components." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "text", + "content": "The aim of CFD block is to obtain the representation of each frequency component. Here, we take obtaining the representation of the " + }, + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "text", + "content": "-th frequency band as an example. To achieve it, we first employ the Fast Fourier Transform (FFT) to obtain the representation of " + }, + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "inline_equation", + "content": "x_{i+1}" + }, + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "text", + "content": " in the frequency domain. Then, Zero-Padding is used to extend the length of the frequency domain sequence, so that it can have the same length as the upper sequence " + }, + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 244, + 504, + 342 + ], + "type": "text", + "content": " after transforming back to the time domain. Next, we use Inverse Fast Fourier Transform (IFFT) to transform it back into the time domain. We refer to this upsampling process as Frequency Upsampling, which ensures that the frequency information remains unchanged before and after the upsampling. The process of Frequency Upsampling can be described as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 230, + 343, + 504, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 343, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 230, + 343, + 504, + 357 + ], + "type": "interline_equation", + "content": "\\hat {x} _ {i} = \\operatorname {I F F T} (\\text {P a d d i n g} (\\operatorname {F F T} (x _ {i + 1}))) \\tag {3}", + "image_path": "278aef037dd8a022bebbffe0ca04c90f39d3320d54cc74739c2a7400eaccd991.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\hat{x}_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " have the same sequence length. Notably, compared to " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\hat{x}_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " lacks the " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": "-th frequency component. The reason is that " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "x_{i+1}" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " is originally formed by removing " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": "-th frequency component from " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " in the hierarchical sequence preprocessing and " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "x_{i+1}" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " is now transformed into " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\hat{x}_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " through a lossless frequency conversion process, thereby aligning length with " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " in the time domain. Therefore, to get the series representation of the " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": "-th frequency component " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "f_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " in time domain, we only need to get the residuals between " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\hat{x}_i" + }, + { + "bbox": [ + 104, + 359, + 504, + 426 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 277, + 432, + 504, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 432, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 277, + 432, + 504, + 444 + ], + "type": "interline_equation", + "content": "f _ {i} = x _ {i} - \\hat {x} _ {i} \\tag {4}", + "image_path": "dc1dae0fde3df24a1cbc99dddf2b7332b2ce532cd2bfbf70444c66b77b0762fa.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 457, + 348, + 469 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 348, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 348, + 469 + ], + "type": "text", + "content": "3.4 MULTI-ORDER KAN REPRESENTATION LEARNING" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 478, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 504, + 556 + ], + "type": "text", + "content": "Given the multi-level frequency component representation " + }, + { + "bbox": [ + 104, + 478, + 504, + 556 + ], + "type": "inline_equation", + "content": "\\{f_1, \\dots, f_k\\}" + }, + { + "bbox": [ + 104, + 478, + 504, + 556 + ], + "type": "text", + "content": " generated by the CFD block, we propose Multi-order KAN Representation Learning (M-KAN) blocks to learn specific representations and temporal dependencies at each frequency. M-KAN adopts a dual-branch parallel architecture to separately model temporal representation learning and temporal dependency learning in a frequency-specific way, using Multi-order KANs to learn the representation of each frequency component and employing Depthwise Convolution to capture the temporal dependency. The details of Depthwise Convolution and Multi-order KAN will be given as follows." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 568, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 504, + 635 + ], + "type": "text", + "content": "Depthwise Convolution To separate the modeling of temporal dependency from learning sequence representation, we adopt a specific type of group convolution known as Depthwise Convolution, in which the number of groups matches the embedding dimension. Depthwise Convolution employs " + }, + { + "bbox": [ + 104, + 568, + 504, + 635 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 568, + 504, + 635 + ], + "type": "text", + "content": " groups of convolution kernels to perform independent convolution operations on the series of each channel. This allows the model to focus on capturing temporal patterns without interference from inter channel relationships. The process of Depthwise Convolution is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 232, + 641, + 504, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 641, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 232, + 641, + 504, + 654 + ], + "type": "interline_equation", + "content": "f _ {i, 1} = \\operatorname {C o n v} _ {D \\rightarrow D} \\left(f _ {i}, \\text {g r o u p} = D\\right) \\tag {5}", + "image_path": "810592c8549b8d265bb7c17a83b633d9213a39065f09cbb02a8863bff66a883f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": "Multi-order KANs Compared with traditional MLP, KAN replaces linear weights with learnable univariate functions, allowing complex nonlinear relationships to be modeled with fewer parameters and greater interpretability. (Xu et al., 2024a). Assume that KAN is composed of " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "L + 1" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " layer neurons and the number of neurons in layer " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "n_{l}" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": ". The transmission relationship between the " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": "-th neuron in layer " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "l + 1" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " and all neurons in layer " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " can be expressed as " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "z_{l + 1,j} = \\sum_{i = 1}^{n_l}\\phi_{l,j,i}(z_{l,i})" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "z_{l + 1,j}" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": "-th neuron at layer " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "l + 1" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "z_{l,i}" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": "-th neuron at layer " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": ". We can simply understand" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "text", + "content": "that each neuron is connected to other neurons in the previous layer through a learnable univariate function " + }, + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "text", + "content": ". The vanilla KAN (Liu et al., 2024c) employs spline function as the learnable univariate basic functions " + }, + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "text", + "content": ", but suffering from the complex recursive computation process, which hinders the efficiency of KAN. Here, we adopt ChebyshevKAN (SS, 2024) to learn the representation of each frequency component, i.e., channel learning. ChebyshevKAN is constructed from linear combinations of Chebyshev polynomial. That is, using the linear combination of Chebyshev polynomial with different order to generate learnable univariate function " + }, + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 83, + 506, + 161 + ], + "type": "text", + "content": ". The Chebyshev polynomial is defined by:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 249, + 167, + 504, + 180 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 167, + 504, + 180 + ], + "spans": [ + { + "bbox": [ + 249, + 167, + 504, + 180 + ], + "type": "interline_equation", + "content": "T _ {n} (x) = \\cos (n \\operatorname {a r c c o s} (x)) \\tag {6}", + "image_path": "f1bf0e93d902105359201956cedbecdd4578aef80e615d6d2fff474fa07daf93.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 186, + 504, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 504, + 220 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 186, + 504, + 220 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 186, + 504, + 220 + ], + "type": "text", + "content": " is the highest order of Chebyshev polynomials and the complexity of Chebyshev polynomials is increasing with increasing order. A 1-layer ChebyshevKAN applied to channel dimension can be expressed as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 229, + 221, + 505, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 221, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 229, + 221, + 505, + 255 + ], + "type": "interline_equation", + "content": "\\phi_ {o} (x) = \\sum_ {j = 1} ^ {D} \\sum_ {i = 0} ^ {n} \\Theta_ {o, j, i} T _ {i} (\\tanh (x _ {j})) \\tag {7}", + "image_path": "fe5ea9b2128a91718d9dc673f8a30a40e16ba407e1573cd621bee1e25ddc2a03.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 252, + 269, + 505, + 303 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 269, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 252, + 269, + 505, + 303 + ], + "type": "interline_equation", + "content": "\\operatorname {K A N} (x) = \\left\\{ \\begin{array}{c} \\phi_ {1} (x) \\\\ \\dots \\\\ \\phi_ {D} (x) \\end{array} \\right\\} \\tag {8}", + "image_path": "f08feb65e588a30b53ef71e2d28233d3d4b53dffcaf8d44c0d4a8e27bbf83cd6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "text", + "content": " is the index of output neuron and " + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\Theta \\in \\mathbb{R}^{D\\times D\\times (n + 1)}" + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "text", + "content": " are the learnable coefficients used to linearly combine the Chebyshev polynomials. It is worth noting that the frequency components within the time series exhibit increasingly complex temporal dynamics as the frequency increases, necessitating a network with stronger representation capabilities to learn these characteristics. ChebyshevKAN allows for the adjustment of the highest order of Chebyshev polynomials " + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "text", + "content": " to enhance its representation ability. Therefore, from the low-frequency to high-frequency components, we adopt an increasing order of Chebyshev polynomials to align the frequency components with the complexity of the KAN, thereby accurately learning the representations of different frequency components. We refer to this group of KANs with varying highest Chebyshev polynomials orders as Multi-order KANs. We set an lower bound order " + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "text", + "content": ", and the representation learning process for " + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 104, + 315, + 504, + 437 + ], + "type": "text", + "content": " can be expressed as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 230, + 444, + 504, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 444, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 230, + 444, + 504, + 456 + ], + "type": "interline_equation", + "content": "f _ {i, 2} = \\mathrm {K A N} \\left(f _ {i}, \\text {o r d e r} = b + k - i\\right) \\tag {9}", + "image_path": "1dec1e8dc7dc11e2a9b589fb490433a18c89fa968c503b76b1c3e7d365bbb391.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 469, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 491 + ], + "type": "text", + "content": "The final output of the M-KAN block is the sum of the outputs from the Multi-order KANs and the Depthwise Convolution." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 272, + 493, + 504, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 493, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 272, + 493, + 504, + 507 + ], + "type": "interline_equation", + "content": "\\hat {f} _ {i} = f _ {i, 1} + f _ {i, 2} \\tag {10}", + "image_path": "c08ecff67c2ab6974add7a08fb6516e1718ab20392424cbf10a67c9bb58a2dc2.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 521, + 222, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 222, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 222, + 533 + ], + "type": "text", + "content": "3.5 FREQUENCY MIXING" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": "After specifically learning the representation of each frequency component, we need to re-transform the frequency representations into the form of multi-level sequences before entering next CFD block, ensuring that the Decomposition-Learning-Mixing process is repeatable. Therefore, we designed Frequency Mixing blocks to convert the frequency component at " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": "-th level " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\hat{f}_i" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": " into multi-level sequences " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": ", enabling it to serve as input for the next CFD block. To transform the frequency component at " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": "-th level " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\hat{f}_i" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": " into multi-level sequences " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": ", we simply need to to supplement the frequency information from levels " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": " back into the " + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 542, + 504, + 634 + ], + "type": "text", + "content": "-th level. Thus, we employ Frequency Upsampling again to incrementally reintegrate the information into the higher frequency components:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 220, + 640, + 504, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 640, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 220, + 640, + 504, + 654 + ], + "type": "interline_equation", + "content": "x _ {i} = \\operatorname {I F F T} (\\text {P a d d i n g} (\\operatorname {F F T} (x _ {i + 1}))) + f _ {i} \\tag {11}", + "image_path": "f0f0a8fd7c8c2959f12ad4ff15360f655b51d94c0656149a9650b2dbf91e6c1d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 660, + 504, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 660, + 504, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 660, + 504, + 683 + ], + "type": "text", + "content": "For the last Frequency Mixing block, we extract the highest-level sequence " + }, + { + "bbox": [ + 104, + 660, + 504, + 683 + ], + "type": "inline_equation", + "content": "x_{1}" + }, + { + "bbox": [ + 104, + 660, + 504, + 683 + ], + "type": "text", + "content": " and use a simple linear layer to produce the forecasting results " + }, + { + "bbox": [ + 104, + 660, + 504, + 683 + ], + "type": "inline_equation", + "content": "X_{O}" + }, + { + "bbox": [ + 104, + 660, + 504, + 683 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 266, + 689, + 504, + 703 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 689, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 266, + 689, + 504, + 703 + ], + "type": "interline_equation", + "content": "X _ {O} = \\operatorname {L i n e a r} \\left(x _ {1}\\right) \\tag {12}", + "image_path": "ad2d549249899faf120bcf3e76c5b73c4f943dbc12899bc944d71b522f8cceaa.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Due to the use of a variate-independent strategy, we also need to stack the predicted results of all variables together to obtain the final multivariate prediction " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_{\\mathrm{O}}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 125, + 504, + 379 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 123 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 123 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 123 + ], + "type": "text", + "content": "Table 1: Full results of the multivariate long-term forecasting result comparison. The input sequence length is set to 96 for all baselines and the prediction lengths " + }, + { + "bbox": [ + 104, + 89, + 504, + 123 + ], + "type": "inline_equation", + "content": "F \\in \\{96, 192, 336, 720\\}" + }, + { + "bbox": [ + 104, + 89, + 504, + 123 + ], + "type": "text", + "content": ". Avg means the average results from all four prediction lengths." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 125, + 504, + 379 + ], + "lines": [ + { + "bbox": [ + 107, + 125, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 107, + 125, + 504, + 379 + ], + "type": "table", + "html": "
ModelsTimeKAN OursTimeMixer 2024aiTransformer 2024bTime-FFM 2024aPatchTST 2023TimesNet 2023MICN 2023DLinear 2023FreTS 2024FiLM 2022aFEDformer 2022bAutoformer 2021
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETThI960.3670.3950.3850.4020.3860.4050.3850.4000.4600.4470.3840.4020.4260.4460.3970.4120.3950.4070.4380.4330.3950.4240.4490.459
1920.4140.4200.4430.4300.4410.4360.4390.4300.5120.4770.4360.4290.4540.4640.4460.4410.4900.4770.4940.4660.4690.4700.5000.482
3360.4450.4340.5120.4700.4870.4580.4800.4490.5460.4960.6380.4690.4930.4870.4890.4670.5100.4800.5470.4950.4900.4770.5210.496
7200.4440.4590.4970.4760.5030.4910.4620.4560.5440.5170.5210.5000.5260.5260.5130.5100.5680.5380.5860.5380.5980.5440.5140.512
Avg0.4170.4270.4590.4440.4540.4470.4420.4340.5160.4840.4950.4500.4750.4800.4610.4570.4910.4750.5160.4830.4980.4840.4960.487
ETThI960.2900.3400.2890.3420.2970.3490.3010.3510.3080.3550.3400.3740.3720.4240.3400.3940.3320.3870.3220.3640.3580.3970.3460.388
1920.3750.3920.3780.3970.3800.4000.3780.3970.3930.4050.4020.4140.4920.4920.4820.4790.4510.4570.4050.4140.4290.4390.4560.452
3360.4230.4350.4320.4340.4280.4320.4220.4310.4270.4360.4520.4520.6070.5550.5910.5410.4660.4730.4350.4450.4960.4870.4820.486
7200.4430.4490.4640.4640.4270.4450.4270.4440.4360.4500.4620.4680.8240.6550.8390.6610.4850.4710.4450.4570.4630.4740.5150.511
Avg0.3830.4040.3900.4090.3830.4070.3820.4060.3910.4110.4140.4270.5740.5310.5630.5190.4330.4460.4020.4200.4370.4490.4500.459
ETThI960.3220.3610.3170.3560.3340.3680.3360.3690.3520.3740.3380.3750.3650.3870.3460.3740.3370.3740.3530.3700.3790.4190.5050.475
1920.3570.3830.3670.3840.3770.3910.3780.3890.3900.3930.3740.3870.4030.4080.3820.3910.3820.3980.3870.4260.4410.5530.496
3360.3820.4010.3910.4060.4260.4200.4110.4100.4210.4140.4100.4110.4360.4310.4150.4150.4200.4230.4210.4080.4450.4590.6210.537
7200.4450.4350.4540.4410.4910.4590.4690.4410.4620.4490.4780.4500.4890.4620.4730.4510.4900.4710.4810.4410.5430.4900.6710.561
Avg0.3760.3950.3820.3970.4070.4100.3990.4020.4060.4070.4000.4060.4230.4220.4040.4080.4070.4170.4120.4020.4480.4520.5880.517
ETThI960.1740.2550.1750.2570.1800.2640.1810.2670.1830.2700.1870.2670.1970.2960.1930.2930.1860.2750.1830.2660.2030.2870.2550.339
1920.2390.2990.2400.3020.2500.3090.2470.3080.2550.3140.2490.3090.2840.3610.2840.3610.2590.3230.2480.3050.2690.3280.2810.340
3360.3010.3400.3030.3430.3110.3480.3090.3470.3090.3470.3210.3510.3810.4290.3820.4290.3490.3860.3090.3430.3250.3660.3390.372
7200.3950.3960.3920.3960.4120.4070.4060.4040.4120.4040.4080.4030.5490.5220.5580.5250.5590.5110.4100.4000.4210.4150.4330.432
Avg0.2770.3220.2770.3240.2880.3320.2860.3320.2900.3340.2910.3330.3530.4020.3540.4020.3390.3740.2880.3280.3050.3490.3270.371
Weather960.1620.2080.1630.2090.1740.2140.1910.2300.1860.2270.1720.2200.1980.2610.1950.2520.1710.2270.1950.2360.2170.2960.2660.336
1920.2070.2490.2110.2540.2210.2540.2360.2670.2340.2650.2190.2610.2390.2990.2370.2950.2180.2800.2390.2710.2760.3360.3070.367
3360.2630.2900.2630.2930.2780.2960.2890.3030.2840.3010.2460.3370.2850.3360.2820.3310.2650.3170.2890.3060.3390.3800.359
7200.3380.3400.3440.3480.3580.3470.3620.3500.3560.3490.3650.3590.3510.3880.3450.3820.3260.3510.3600.3510.4030.4280.4190.428
Avg0.2420.2720.2450.2760.2580.2780.2700.2880.2650.2850.2510.2940.2680.3210.2650.3150.2450.2940.2710.2900.3090.3600.3380.382
Electricity960.1740.2660.1530.2450.1480.2400.1980.2820.1900.2960.1680.2720.1800.2930.2100.3020.1710.2600.1980.2740.1930.3080.2010.317
1920.1820.2730.1660.2570.1620.2530.1990.2850.1990.3040.1840.3220.1890.3020.2100.3050.1770.2680.1980.2780.2010.3150.2220.334
3360.1970.2860.1850.2750.1780.2690.2120.2980.2170.3190.1980.3000.1980.3120.2230.3190.1900.2840.2170.3000.2140.3290.2310.443
7200.2360.3200.2240.3120.2250.3170.2530.3300.2580.3520.2200.3200.2170.3300.2580.3500.2280.3160.2780.3560.2460.3550.2540.361
Avg0.1970.2860.1820.2720.1780.2700.2700.2880.2160.3180.1930.3040.1960.3090.2250.3190.1920.2820.2230.3020.2140.3270.2270.338
", + "image_path": "bd2650de916f0276ce0ddda006581d17498b5ea606d1ab136a0c4928828b55c0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 396, + 201, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 201, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 201, + 407 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 420, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 420, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 420, + 504, + 464 + ], + "type": "text", + "content": "Datasets We conduct extensive experiments on six real-world time series datasets, including Weather, ETTh1, ETTh2, ETTm1, ETTm2 and Electricity for long-term forecasting. Following previous work (Wu et al., 2021), we split the ETT series dataset into training, validation, and test sets in a ratio of 6:2:2. For the remaining datasets, we adopt a split ratio of 7:1:2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 476, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 544 + ], + "type": "text", + "content": "Baseline We carefully select eleven well-acknowledged methods in the field of long-term time series forecasting as our baselines, including (1) Transformer-based methods: Autoformer (2021), FEDformer (2022b), PatchTST (2023), iTransformer (2024b); (2) MLP-based methods: DLinear (2023) and TimeMixer (2024a) (3) CNN-based method: MICN (2023), TimesNet (2023); (4) Frequency-based methods: FreTS (2024) and FiLM (2022a). And a time series foundation model Time-FFM (2024a)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 555, + 504, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 600 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 600 + ], + "type": "text", + "content": "Experimental Settings To ensure fair comparisons, we adopt the same look-back window length " + }, + { + "bbox": [ + 104, + 555, + 504, + 600 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 555, + 504, + 600 + ], + "type": "text", + "content": " and the same prediction length " + }, + { + "bbox": [ + 104, + 555, + 504, + 600 + ], + "type": "inline_equation", + "content": "F = \\{96,192,336,720\\}" + }, + { + "bbox": [ + 104, + 555, + 504, + 600 + ], + "type": "text", + "content": ". We utilize the L2 loss for model training and use Mean Square Error (MSE) and Mean Absolute Error (MAE) metrics to evaluate the performance of each method." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 612, + 198, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 612, + 198, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 612, + 198, + 623 + ], + "type": "text", + "content": "4.1 MAIN RESULTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 633, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 505, + 732 + ], + "type": "text", + "content": "The comprehensive forecasting results are presented in Table 1, where the best results are highlighted in bold red and the second-best are underlined in blue. A lower MSE/MAE indicates a more accurate prediction result. We observe that TimeKAN demonstrates superior predictive performance across all datasets, except for the Electricity dataset, where iTransformer achieves the best result. This is due to iTransformer's use of channel-wise self-attention mechanisms to model inter-variable dependencies, which is particularly effective for high-dimensional datasets like Electricity. Additionally, both TimeKAN and TimeMixer perform consistently well in long-term forecasting tasks, showcasing the generalizability of well-designed time-series decomposition architectures for accurate predictions. Compared with other state-of-the-art methods, TimeKAN introduces a novel" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 103, + 504, + 180 + ], + "blocks": [ + { + "bbox": [ + 137, + 89, + 472, + 102 + ], + "lines": [ + { + "bbox": [ + 137, + 89, + 472, + 102 + ], + "spans": [ + { + "bbox": [ + 137, + 89, + 472, + 102 + ], + "type": "text", + "content": "Table 2: Ablation study of the Frequency Upsampling. The best results are in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 103, + 504, + 180 + ], + "lines": [ + { + "bbox": [ + 107, + 103, + 504, + 180 + ], + "spans": [ + { + "bbox": [ + 107, + 103, + 504, + 180 + ], + "type": "table", + "html": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2WeatherElectricity
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
Linear Mapping0.4010.4130.3120.3620.3280.3650.1800.2630.1640.2110.1840.275
Linear Interpolation0.3830.3980.2960.3470.3360.3700.1810.2630.1650.2100.1960.277
Transposed Convolution0.3770.4070.2900.3440.3260.3660.1780.2610.1630.2110.1880.274
Frequency Upsampling0.3670.3950.2900.3400.3220.3610.1740.2550.1620.2080.1740.266
", + "image_path": "5c77d357baa2f67e10bbff3063cef2bd45f772e6987c4003fead093d73c3040d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 129, + 215, + 481, + 292 + ], + "blocks": [ + { + "bbox": [ + 146, + 201, + 463, + 213 + ], + "lines": [ + { + "bbox": [ + 146, + 201, + 463, + 213 + ], + "spans": [ + { + "bbox": [ + 146, + 201, + 463, + 213 + ], + "type": "text", + "content": "Table 3: Ablation study of the Multi-order KANs. The best results are in bold." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 129, + 215, + 481, + 292 + ], + "lines": [ + { + "bbox": [ + 129, + 215, + 481, + 292 + ], + "spans": [ + { + "bbox": [ + 129, + 215, + 481, + 292 + ], + "type": "table", + "html": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
MLPs0.3760.3970.2980.3480.3190.3610.1780.2640.1620.211
Fixed Low-order KANs0.3760.3980.2920.3410.3270.3660.1750.2570.1640.211
Fixed High-order KANs0.3800.4070.3100.3630.3270.2690.1760.2570.1640.212
Multi-order KANs0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
", + "image_path": "4b7a1ce71709331718e998581dae0bbeeac94f55ab0dc7ac780ad781a1329780.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 313, + 504, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 313, + 504, + 347 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 504, + 347 + ], + "type": "text", + "content": "Decomposition-Learning-Mixing framework, closely integrating the characteristics of Multi-order KANs with this hierarchical architecture, enabling superior performance in a wide range of long-term forecasting tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 361, + 209, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 209, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 209, + 372 + ], + "type": "text", + "content": "4.2 ABLATION STUDY" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 382, + 504, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 382, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 382, + 504, + 406 + ], + "type": "text", + "content": "In this section, we investigate several key components of TimeKAN, including Frequency Upsampling, Depthwise Convolution and Multi-order KANs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 418, + 506, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 506, + 509 + ], + "type": "text", + "content": "Frequency Upsampling To investigate the effectiveness of Frequency Upsampling, we compared it with three alternative upsampling methods that may not preserve frequency information before and after transformation: (1) Linear Mapping; (2) Linear Interpolation; and (3) Transposed Convolution. As shown in Table 2, replacing Frequency Upsampling with any of these three methods resulted in a decline in performance. This indicates that these upsampling techniques fail to maintain the integrity of frequency information after transforming, leading to the Decomposition-Learning-Mixing framework ineffective. This strongly demonstrates that the chosen Frequency Upsampling, as a non-parametric method, is an irreplaceable component of the TimeKAN framework." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 520, + 506, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 506, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 506, + 653 + ], + "type": "text", + "content": "Multi-order KANs We designed the following modules to investigate the effectiveness of Multi-order KANs: (1) MLPs, which means using MLP to replace each KAN; (2) Fixed Low-order KANs, which means using a KAN of order 2 at each frequency level; and (3) Fixed High-order KANs, which means using a KAN of order 5 at each frequency level. The comparison results are shown in Table 3. Overall, Multi-order KANs achieved the best performance. Compared to MLPs, Multi-order KANs perform significantly better, demonstrating that well-designed KANs possess stronger representation capabilities than MLPs and are a compelling alternative. Both Low-order KANs and High-order KANs performed worse than Multi-order KANs, indicating the validity of our design choice to incrementally increase the order of KANs to adapt to the representation of different frequency components. Thus, the learnable functions of KANs are indeed a double-edged sword; achieving satisfactory results requires selecting the appropriate level of function complexity for specific tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "text", + "content": "Depthwise Convolution To assess the effectiveness of Depthwise Convolution, we replace it with the following choice: (1) w/o Depthwise Convolution; (2) Standard Convolution; (3) Multi-head Self-Attention. The results are shown in Table 4. Overall, Depthwise Convolution is the best choice. We clearly observe that removing Depthwise Convolution or replacing it with Multi-head Self-Attention leads to a significant drop in performance, highlighting the effectiveness of using convolution to learn temporal dependencies. When Depthwise Convolution is replaced with Standard" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 103, + 476, + 181 + ], + "blocks": [ + { + "bbox": [ + 136, + 89, + 473, + 102 + ], + "lines": [ + { + "bbox": [ + 136, + 89, + 473, + 102 + ], + "spans": [ + { + "bbox": [ + 136, + 89, + 473, + 102 + ], + "type": "text", + "content": "Table 4: Ablation study of the Depthwise Convolution. The best results are in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 103, + 476, + 181 + ], + "lines": [ + { + "bbox": [ + 135, + 103, + 476, + 181 + ], + "spans": [ + { + "bbox": [ + 135, + 103, + 476, + 181 + ], + "type": "table", + "html": "
DatasetsMetricETTh1ETTh2ETTm1ETTm2Weather
MSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
w/o Depthwise Conv0.3790.3970.2960.3430.3370.3730.1800.2630.1680.211
Standard Conv0.3640.3930.2950.3450.3230.3640.1800.2640.1620.210
Self-Attention0.3770.4060.2930.3420.3290.3650.1840.2720.1740.225
Depthwise Conv0.3670.3950.2900.3400.3220.3610.1740.2550.1620.208
", + "image_path": "5140572fdcf0b103f43c0a59f6fc6a674f7fa68a9b3190106ec11f1644c9e00e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 193, + 306, + 318 + ], + "blocks": [ + { + "bbox": [ + 108, + 193, + 306, + 318 + ], + "lines": [ + { + "bbox": [ + 108, + 193, + 306, + 318 + ], + "spans": [ + { + "bbox": [ + 108, + 193, + 306, + 318 + ], + "type": "image", + "image_path": "cb2d292288b505849be1ebccd78db2174a67eb9322af381215248d8bf02f49c6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "lines": [ + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "type": "text", + "content": "Figure 2: Comparison of forecasting performance between TimeKAN and other three models with varying look-back windows on ETTm2 and Weather datasets. The look-back windows are selected to be " + }, + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "type": "inline_equation", + "content": "T \\in \\{48,96,192,336,512,720\\}" + }, + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "type": "text", + "content": ", and the prediction length is fixed to " + }, + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "type": "inline_equation", + "content": "F = 96" + }, + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 193, + 503, + 317 + ], + "blocks": [ + { + "bbox": [ + 309, + 193, + 503, + 317 + ], + "lines": [ + { + "bbox": [ + 309, + 193, + 503, + 317 + ], + "spans": [ + { + "bbox": [ + 309, + 193, + 503, + 317 + ], + "type": "image", + "image_path": "5406045b8d3dd7cdbaee74bf072ecba19610f7495c9691ff5b71a89f25be987c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 398, + 504, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 398, + 504, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 398, + 504, + 433 + ], + "type": "text", + "content": "Convolution, there are declines in most metrics, which implies that focusing on extracting temporal dependencies individually with Depthwise Convolution, without interference from inter-channel relationships, is a reasonable design." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 444, + 506, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 444, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 444, + 506, + 567 + ], + "type": "text", + "content": "Varing Look-back Window In principle, extending the look-back window can provide more information for predicting future, leading to a potential improvement in forecasting performance. A effective long-term TSF method equipped with a strong temporal relation extraction capability should be able to improve forecasting performance when look-back window length increasing (Zeng et al., 2023). As a model based on frequency decomposition learning, TimeKAN should achieve better predictive performance as the look-back window lengths, since more incremental frequency information is available for prediction. To demonstrate that TimeKAN benefits from a larger look-back window, we select look-back window lengths from " + }, + { + "bbox": [ + 104, + 444, + 506, + 567 + ], + "type": "inline_equation", + "content": "T = \\{48,96,192,336,512,720\\}" + }, + { + "bbox": [ + 104, + 444, + 506, + 567 + ], + "type": "text", + "content": " while keeping the prediction length fixed at 96. As demonstrated in Figure 2, our TimeKAN consistently reduces the MSE scores as the look-back window increases, indicating that TimeKAN can effectively learn from long time series." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 578, + 219, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 219, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 219, + 590 + ], + "type": "text", + "content": "4.3 MODEL EFFICIENCY" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": "We compare TimeKAN with MLP-based method TimeMier and Transformer-based methods iTransformer and PatchTST, in terms of model parameters and Multiply-Accumulate Operations (MACs), to validate that TimeKAN is a lightweight and efficient architecture. To ensure a fair comparison, we fix the prediction length " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "F = 96" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " and input length " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": ", and set the input batch size to 32. The comparison results are summarized in Table 5. It is clear that our TimeKAN demonstrates significant advantages in both model parameter size and MACs, particularly when compared to Transformer-based models. For instance, on the Electricity dataset, the parameter count of PatchTST is nearly 295 times that of TimeKAN, and its MACs are almost 118 times greater. Even when compared to the relatively lightweight MLP-based method TimeMixer, TimeKAN shows superior efficiency. On the Weather dataset, TimeKAN requires only " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "20.05\\%" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " of the parameters needed by TimeMixer and only " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "36.14\\%" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " of the MACs. This remarkable efficiency advantage is primarily attributed to the lightweight architectural design. The main computations of the TimeKAN model are concentrated" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 134, + 507, + 210 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 506, + 133 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 506, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 506, + 133 + ], + "type": "text", + "content": "Table 5: A comparison of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and three other models. To ensure a fair comparison, we fix the prediction length " + }, + { + "bbox": [ + 104, + 89, + 506, + 133 + ], + "type": "inline_equation", + "content": "F = 96" + }, + { + "bbox": [ + 104, + 89, + 506, + 133 + ], + "type": "text", + "content": " and the input length " + }, + { + "bbox": [ + 104, + 89, + 506, + 133 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 89, + 506, + 133 + ], + "type": "text", + "content": ", and set the input batch size to 32. The lowest computational cost is highlighted in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 111, + 134, + 507, + 210 + ], + "lines": [ + { + "bbox": [ + 111, + 134, + 507, + 210 + ], + "spans": [ + { + "bbox": [ + 111, + 134, + 507, + 210 + ], + "type": "table", + "html": "
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
", + "image_path": "5f15770d99924dd348277e9a8fc7ffae4b18e431b48c93fc67aac8efba01159d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "text", + "content": "in the M-KAN block, and the Depthwise Convolution we employed significantly reduces the number of parameters through grouped operations. Additionally, the powerful representation capabilities afforded by Multi-order KANs allow us to represent time series with very few neurons. Therefore, we cannot overlook that TimeKAN achieves outstanding forecasting performance while requiring minimal computational resources." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 306, + 196, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 306, + 196, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 196, + 319 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 331, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 504, + 422 + ], + "type": "text", + "content": "We proposed an efficient KAN-based Frequency Decomposition Learning architecture (TimeKAN) for long-term time series forecasting. Based on Decomposition-Learning-Mixing architecture, TimeKAN obtains series representations for each frequency band using a Cascaded Frequency Decomposition blocks. Additionally, a Multi-order KAN Representation Learning blocks further leverage the high flexibility of KAN to learn and represent specific temporal patterns within each frequency band. Finally, Frequency Mixing blocks recombine the frequency bands into the original format. Extensive experiments on real-world datasets demonstrate that TimeKAN achieves the state of the art forecasting performance and extremely lightweight computational consumption." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 438, + 226, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 438, + 226, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 226, + 449 + ], + "type": "text", + "content": "ACKNOWLEDGEMENTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "type": "text", + "content": "This work is supported by Shanghai Artificial Intelligence Laboratory. This work was done during Songtao Huang's internship at Shanghai Artificial Intelligence Laboratory." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 504, + 176, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 504, + 176, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 176, + 516 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 523, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 105, + 523, + 504, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 523, + 504, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 504, + 547 + ], + "type": "text", + "content": "Alexander Dylan Bodner, Antonio Santiago Tepsich, Jack Natan Spolski, and Santiago Pourteau. Convolutional kolmogorov-arnold networks. arXiv preprint arXiv:2406.13155, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 555, + 505, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 601 + ], + "type": "text", + "content": "Tao Dai, Beiliang Wu, Peiyuan Liu, Naiqi Li, Jigang Bao, Yong Jiang, and Shu-Tao Xia. Periodicity decoupling framework for long-term series forecasting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=dp27P5HBBt." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 610, + 504, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 610, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 610, + 504, + 644 + ], + "type": "text", + "content": "Luo donghao and wang xue. ModernTCN: A modern pure convolution structure for general time series analysis. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vpJMJerXHU." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 654, + 504, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 689 + ], + "type": "text", + "content": "Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. In ICML, 2024. URL https://openreview.net/forum?id=FVvf69a5rx." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 732 + ], + "type": "text", + "content": "Hongbin Huang, Minghua Chen, and Xiao Qiao. Generative learning for financial time series with irregular and scale-invariant patterns. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=CdjnzWsQax." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "text", + "content": "Weiwei Jiang and Jiayun Luo. Graph neural network for traffic forecasting: A survey. Expert Systems with Applications, 207:117921, 2022. ISSN 0957-4174. doi: https://doi.org/10.1016/j.eswa.2022.117921. URL https://www.sciencedirect.com/science/article/pii/S0957417422011654." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 506, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 506, + 202 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 202 + ], + "type": "text", + "content": "Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, Alexander Merose, Stephan Hoyer, George Holland, Oriol Vinyals, Jacklynn Stott, Alexander Pritzel, Shakir Mohamed, and Peter Battaglia. Learning skillful medium-range global weather forecasting. Science, 382(6677):1416-1421, 2023. doi: 10.1126/science.adi2336. URL https://www.science.org/doi/abs/10.1126/science.adi2336." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 208, + 504, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 208, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 504, + 243 + ], + "type": "text", + "content": "Chenxin Li, Xinyu Liu, Wuyang Li, Cheng Wang, Hengyu Liu, and Yixuan Yuan. U-kan makes strong backbone for medical image segmentation and generation. arXiv preprint arXiv:2406.02918, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 250, + 504, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 250, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 504, + 274 + ], + "type": "text", + "content": "Ziyao Li. Kolmogorov-arnold networks are radial basis function networks. arXiv preprint arXiv:2405.06721, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "type": "text", + "content": "Shengsheng Lin, Weiwei Lin, Wentai Wu, Haojun Chen, and Junjie Yang. SparseTSF: Modeling long-term time series forecasting with " + }, + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "type": "inline_equation", + "content": "^{*}1\\mathrm{k}^{*}" + }, + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "type": "text", + "content": " parameters. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=54NSHO01Fe." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 322, + 504, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 322, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 504, + 357 + ], + "type": "text", + "content": "Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 363, + 504, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 363, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 504, + 408 + ], + "type": "text", + "content": "Qingxiang Liu, Xu Liu, Chenghao Liu, Qingsong Wen, and Yuxuan Liang. Time-FFM: Towards LM-empowered federated foundation model for time series forecasting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=HS0faHRhWD." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 415, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 504, + 460 + ], + "type": "text", + "content": "Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=JePfAI8fah." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 468, + 504, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 504, + 502 + ], + "type": "text", + "content": "Ziming Liu, Yixuan Wang, Sachin Vaidya, Fabian Ruehle, James Halverson, Marin Soljacic, Thomas Y Hou, and Max Tegmark. Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, 2024c." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 510, + 504, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 504, + 555 + ], + "type": "text", + "content": "Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Jbdc0vTOcol." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 562, + 504, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 504, + 597 + ], + "type": "text", + "content": "Khemraj Shukla, Juan Diego Toscano, Zhicheng Wang, Zongren Zou, and George Em Karniadakis. A comprehensive and fair comparison between mlp and kan representations for differential equations and operator networks. arXiv preprint arXiv:2406.02917, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 604, + 504, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 504, + 628 + ], + "type": "text", + "content": "Sidharth SS. Chebyshev polynomial-based kolmogorov-arnold networks: An efficient architecture for nonlinear function approximation. arXiv preprint arXiv:2405.07200, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 634, + 504, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 634, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 504, + 679 + ], + "type": "text", + "content": "Huiqiang Wang, Jian Peng, Feihu Huang, Jince Wang, Junhui Chen, and Yifei Xiao. MICN: Multiscale local and global context modeling for long-term series forecasting. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=zt53IDUR1U." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 686, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 686, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 504, + 732 + ], + "type": "text", + "content": "Shiyu Wang, Haixu Wu, Xiaoming Shi, Tengge Hu, Huakun Luo, Lintao Ma, James Y. Zhang, and JUN ZHOU. Timemixer: Decomposable multiscale mixing for time series forecasting. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=7oLshfEIC2." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 632 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "type": "text", + "content": "Yizheng Wang, Jia Sun, Jinshuai Bai, Cosmin Anitescu, Mohammad Sadegh Eshaghi, Xiaoying Zhuang, Timon Rabczuk, and Yinghua Liu. Kolmogorov arnold informed neural network: A physics-informed deep learning framework for solving pdes based on kolmogorov arnold networks. arXiv preprint arXiv:2406.11045, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 505, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 505, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 505, + 201 + ], + "type": "text", + "content": "Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 22419-22430. Curran Associates, Inc., 2021. URL https://proceedings.neurips.cc/paper_files/paper/2021/file/bcc0d400288793e8bcdcd7c19a8ac0c2b-Paper.pdf." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 207, + 505, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 207, + 505, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 505, + 253 + ], + "type": "text", + "content": "Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ju_Uqw384Oq." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 259, + 505, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 259, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 505, + 282 + ], + "type": "text", + "content": "Kunpeng Xu, Lifei Chen, and Shengrui Wang. Are kan effective for identifying and tracking concept drift in time series? arXiv preprint arXiv:2410.10041, 2024a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 289, + 505, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 505, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 505, + 324 + ], + "type": "text", + "content": "Zhijian Xu, Ailing Zeng, and Qiang Xu. FITS: Modeling time series with $10k$ parameters. In The Twelfth International Conference on Learning Representations, 2024b. URL https://openreview.net/forum?id=bWcvvZ3qMb." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 330, + 505, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 330, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 505, + 365 + ], + "type": "text", + "content": "Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. Frequency-domain mlps are more effective learners in time series forecasting. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 371, + 505, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 371, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 505, + 416 + ], + "type": "text", + "content": "Linfei Yin, Xinghui Cao, and Dongduan Liu. Weighted fully-connected regression networks for one-day-ahead hourly photovoltaic power forecasting. Applied Energy, 332:120527, 2023. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2022.120527. URL https://www.sciencedirect.com/science/article/pii/S0306261922017846." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 422, + 505, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 505, + 456 + ], + "type": "text", + "content": "Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 464, + 505, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 505, + 508 + ], + "type": "text", + "content": "G.Peter Zhang. Time series forecasting using a hybrid arima and neural network model. Neurocomputing, 50:159-175, 2003. ISSN 0925-2312. doi: https://doi.org/10.1016/S0925-2312(01)00702-0. URL https://www.sciencedirect.com/science/article/pii/S0925231201007020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 515, + 505, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 505, + 550 + ], + "type": "text", + "content": "Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 555, + 505, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 505, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 590 + ], + "type": "text", + "content": "Tian Zhou, Ziqing Ma, Qingsong Wen, Liang Sun, Tao Yao, Wotao Yin, Rong Jin, et al. Film: Frequency improved legendre memory model for long-term time series forecasting. Advances in neural information processing systems, 35:12677-12690, 2022a." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 597, + 505, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 505, + 632 + ], + "type": "text", + "content": "Tian Zhou, Ziqing Ma, Qingsong Wen, Xue Wang, Liang Sun, and Rong Jin. Fedformer: Frequency enhanced decomposed transformer for long-term series forecasting. In International conference on machine learning, pp. 27268-27286. PMLR, 2022b." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 294, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 294, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 294, + 94 + ], + "type": "text", + "content": "A ADDITIONAL MODEL ANALYSIS" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 111, + 173, + 527, + 321 + ], + "blocks": [ + { + "bbox": [ + 104, + 128, + 506, + 172 + ], + "lines": [ + { + "bbox": [ + 104, + 128, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 506, + 172 + ], + "type": "text", + "content": "Table 6: Full comparison results of model parameters (Params) and multiply-accumulate operations (MACs) for TimeKAN and other models. To ensure a fair comparison, we fix the prediction length " + }, + { + "bbox": [ + 104, + 128, + 506, + 172 + ], + "type": "inline_equation", + "content": "F = 96" + }, + { + "bbox": [ + 104, + 128, + 506, + 172 + ], + "type": "text", + "content": " and the input length " + }, + { + "bbox": [ + 104, + 128, + 506, + 172 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 128, + 506, + 172 + ], + "type": "text", + "content": ", and set the input batch size to 32. The lowest computational cost is highlighted in bold." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 111, + 173, + 527, + 321 + ], + "lines": [ + { + "bbox": [ + 111, + 173, + 527, + 321 + ], + "spans": [ + { + "bbox": [ + 111, + 173, + 527, + 321 + ], + "type": "table", + "html": "
Datasets MetricETTH1ETTH2ETTm1ETTm2WeatherElectricity
ParamsMACsParamsMACsParamsMACsParamsMACsParamsMACsParamsMACs
TimeMixer75.50K20.37M75.50K20.37M75.50K20.37M77.77K24.18M104.43K82.62M106.83K1.26G
iTransformer841.57K77.46M224.22K19.86M224.22K19.86M224.22K19.86M4.83M1.16G4.83M16.29G
PatchTST3.75M5.90G10.06M17.66G3.75M5.90G10.06M17.66G6.90M35.30G6.90M539.38G
TimesNet605.48K18.13G1.19M36.28G4.71M144G1.19M36.28G1.19M36.28G150.30M4.61T
MICN25.20M71.95G25.20M71.95G25.20M71.95G25.20M71.95G111.03K295.07M6.64M19.5G
Dlinear18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M18.62K0.6M
FreTS3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M3.24M101.46M
FILM12.58M2.82G12.58M2.82G12.58M2.82G12.58M2.82G12.58M8.46G12.58M8.46G
FEDFormer23.38M24.96G23.38M24.96G23.38M24.96G23.38M24.96G23.45M25.23G24.99M30.89G
AutoFormer10.54M22.82G10.54M22.82G10.54M22.82G10.54M22.82G10.61M23.08G12.14M28.75G
TimeKAN12.84K7.63M15.00K8.02M14.38K7.63M38.12K16.66M20.94K29.86M23.34K456.50M
", + "image_path": "8158e74fad2c54f26135ce46a28fc89e6c8304f8ee7fccf6f6c159ce516f523b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 356, + 315, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 315, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 315, + 366 + ], + "type": "text", + "content": "A.1 COMPUTATIONAL COMPLEXITY ANALYSIS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": "In our TimeKAN, the main computational complexity lies in Fast Fourier Transform (FFT), Depthwise Convolution block and Multi-order KAN block. Consider a time series with length " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": " and the hidden state of each time point is " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ". For FFT, the computation complexity is " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(L\\log L)" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ". For Depthwise Convolution block, if we set the convolutional kernel to " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": " and stride to 1, the complexity is " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(LDM)" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ". Finally, assuming that the highest order of Chebyshev polynomials is " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ", the complexity of Multi-order KAN block is " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(LD^2K)" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "M, D, K" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": " are constants that are independent of the input length " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ", the computational complexity of both the Depthwise Convolution block and the Multi-order KAN block can be reduced to " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(L)" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ", which is linear about the sequence length. In summary, the overall computational complexity is " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\max(\\mathcal{O}(L\\log L), \\mathcal{O}(L) = \\mathcal{O}(L\\log L)" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": ". When the input is a multivariate sequence with " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": " variables, the computational complexity will expand to " + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(ML\\log L)" + }, + { + "bbox": [ + 104, + 379, + 506, + 501 + ], + "type": "text", + "content": " due to our variable-independent strategy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 521, + 221, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 221, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 221, + 533 + ], + "type": "text", + "content": "A.2 MODEL EFFICIENCY" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 544, + 506, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 624 + ], + "type": "text", + "content": "Here, we provide the complete results of model efficiency in terms of parameters and MACs in Table 6. As can be seen, except for DLinear, our TimeKAN consistently demonstrates a significant advantage in both parameter count and MACs compared to any other model. DLinear is a model consisting of only a single linear layer, which makes it the most lightweight in terms of parameters and MACs. However, the performance of DLinear already shows a significant gap when compared to state-of-the-art methods. Therefore, our TimeKAN actually achieves superior performance in both forecasting accuracy and efficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 642, + 190, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 642, + 190, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 190, + 653 + ], + "type": "text", + "content": "A.3 ERROR BARS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "To evaluate the robustness of TimeKAN, we repeated the experiments on three randomly selected seeds and compared it with the second-best model (TimeMixer). We report the mean and standard deviation of the results across the three experiments, as well as the confidence level of TimeKAN's superiority over TimeMixer. The results are averaged over four prediction horizons (96, 192, 336, and 720). As shown in the Table 7, in most cases, we have over " + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": " confidence that TimeKAN outperforms the second-best model and demonstrates good robustne of TimeKAN." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 143, + 111, + 468, + 200 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 110 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 110 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 110 + ], + "type": "text", + "content": "Table 7: Standard deviation and statistical tests for our TimeKAN method and second-best method (TimeMixer) on five datasets." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 143, + 111, + 468, + 200 + ], + "lines": [ + { + "bbox": [ + 143, + 111, + 468, + 200 + ], + "spans": [ + { + "bbox": [ + 143, + 111, + 468, + 200 + ], + "type": "table", + "html": "
MetricMSEMAE
DatasetTimeKANTimeMixerConfidenceTimeKANTimeMixerConfidence
ETTh10.422±0.0040.462±0.00699%0.430±0.0020.448±0.00499%
ETTh20.387±0.0030.392±0.00399%0.408±0.0030.412±0.00490%
ETTm10.378±0.0020.386±0.00399%0.396±0.0010.399±0.00199%
ETTm20.278±0.0010.278±0.0010.324±0.0010.325±0.00190%
Weather0.243±0.0010.245±0.00199%0.273±0.0010.276±0.00199%
", + "image_path": "fcf89873993419466062669ffd52ede44eb6974539327d9184ab21e59164c34d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 187, + 247, + 423, + 317 + ], + "blocks": [ + { + "bbox": [ + 113, + 234, + 496, + 246 + ], + "lines": [ + { + "bbox": [ + 113, + 234, + 496, + 246 + ], + "spans": [ + { + "bbox": [ + 113, + 234, + 496, + 246 + ], + "type": "text", + "content": "Table 8: Comparison on the Electricity dataset when the look back window is expanded to 512." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 247, + 423, + 317 + ], + "lines": [ + { + "bbox": [ + 187, + 247, + 423, + 317 + ], + "spans": [ + { + "bbox": [ + 187, + 247, + 423, + 317 + ], + "type": "table", + "html": "
Models96192336720
MSEMAEMSEMAEMSEMAEMSEMAE
MOMENT0.1360.2330.1520.2470.1670.2640.2050.295
TimeMixer0.1350.2310.1490.2450.1720.2680.2030.295
TimeKAN0.1330.2300.1490.2470.1650.2610.2030.294
", + "image_path": "03656b0acd40951533c21c03f799d3d3cf850937be9909523840600027e3d014.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 353, + 342, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 353, + 342, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 342, + 364 + ], + "type": "text", + "content": "A.4 FREQUENCY LEARNING WITH LONGER WINDOW" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "text", + "content": "In Table 1, TimeKAN performs relatively poorly on the Electricity dataset. We infer that its poor performance on the electricity dataset is due to the overly short look-back window (" + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "text", + "content": "), which cannot provide sufficient frequency information. To verify this, we compare the average number of effective frequency components under a specific look-back window. Specifically, we randomly select a sequence of length " + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "text", + "content": " from the electricity dataset and transform it into the frequency domain using FFT. We define effective frequencies as those with amplitudes greater than 0.1 times the maximum amplitude. Then, we take the average number of effective frequencies obtained across all variables to reflect the amount of effective frequency information provided by the sequence. When " + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "text", + "content": " (the setting in this paper), the average number of effective frequencies is 10.69. When we extend the sequence length to 512, the average number of effective frequencies becomes 19.74. Therefore, the effective frequency information provided by 512 time steps is nearly twice that of 96 time steps. This indicates that " + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 380, + 506, + 513 + ], + "type": "text", + "content": " loses a substantial amount of effective information." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 517, + 506, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 596 + ], + "type": "text", + "content": "To validate whether using " + }, + { + "bbox": [ + 104, + 517, + 506, + 596 + ], + "type": "inline_equation", + "content": "T = 512" + }, + { + "bbox": [ + 104, + 517, + 506, + 596 + ], + "type": "text", + "content": " allows us to leverage more frequency information, we extend the look-back window of TimeKAN to 512 on the electricity dataset and compare it with the state-of-the-art methods TimeMixer and time series foundation model MOMENT (Goswami et al., 2024). The results are shown in Table 8. Although TimeKAN performs significantly worse than TimeMixer when " + }, + { + "bbox": [ + 104, + 517, + 506, + 596 + ], + "type": "inline_equation", + "content": "T = 96" + }, + { + "bbox": [ + 104, + 517, + 506, + 596 + ], + "type": "text", + "content": ", it achieves the best performance on the electricity dataset when the look-back window is extended to 512. This also demonstrates that TimeKAN can benefit significantly from richer frequency information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 627, + 324, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 324, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 324, + 639 + ], + "type": "text", + "content": "A.5 IMPACT OF NUMBER OF FREQUENCY BANDS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "To explore the impact of the number of frequency bands on performance, we set the number of frequency bands to 2, 3, 4, and 5. The effects of different frequency band divisions on performance are shown in the Table 9. As we can see, in most cases, dividing the frequency bands into 3 or 4 layers yields the best performance. This aligns with our prior intuition: dividing into two bands results in excessive frequency overlap, while dividing into five bands leads to too little information within each band, making it difficult to accurately model the information within that frequency range." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 188, + 102, + 423, + 175 + ], + "blocks": [ + { + "bbox": [ + 105, + 89, + 504, + 101 + ], + "lines": [ + { + "bbox": [ + 105, + 89, + 504, + 101 + ], + "spans": [ + { + "bbox": [ + 105, + 89, + 504, + 101 + ], + "type": "text", + "content": "Table 9: Impact of number of frequency bands on performance under the 96-to-96 prediction setting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 188, + 102, + 423, + 175 + ], + "lines": [ + { + "bbox": [ + 188, + 102, + 423, + 175 + ], + "spans": [ + { + "bbox": [ + 188, + 102, + 423, + 175 + ], + "type": "table", + "html": "
Number of FrequencyETTh2WeatherElectricity
MSEMAEMSEMAEMSEMAE
20.2920.3400.1640.2090.1830.270
30.2900.3390.1630.2090.1770.268
40.2900.3400.1620.2080.1740.266
50.2950.3460.1640.2110.1770.273
", + "image_path": "0073a41c665bce2da16e409a0311de07c167ee109ec36819ab56b2a80eb2e03d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 191, + 261, + 204 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 191, + 261, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 191, + 261, + 204 + ], + "type": "text", + "content": "B MATHEMATICAL DETAILS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 216, + 284, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 216, + 284, + 227 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 284, + 227 + ], + "type": "text", + "content": "B.1 KOLMOGOROV-ARNOLD NETWORK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "type": "text", + "content": "Kolmogorov-Arnold representation theorem states that any multivariate continuous function can be expressed as a combination of univariate functions and addition operations. More specifically, a multivariate continuous function " + }, + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "type": "inline_equation", + "content": "g:[0,1]^n\\Rightarrow \\mathbb{R}" + }, + { + "bbox": [ + 104, + 236, + 504, + 270 + ], + "type": "text", + "content": " can be defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 205, + 274, + 503, + 306 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 274, + 503, + 306 + ], + "spans": [ + { + "bbox": [ + 205, + 274, + 503, + 306 + ], + "type": "interline_equation", + "content": "g (x) = g \\left(x _ {1}, \\dots , x _ {n}\\right) = \\sum_ {i = 1} ^ {2 n + 1} \\Phi_ {i} \\left(\\sum_ {j = 1} ^ {n} \\phi_ {i j} \\left(x _ {j}\\right)\\right) \\tag {13}", + "image_path": "fd3b15de0a49f27ad4744857cd8ee6dc46d5564ef41bf6cd501b71ef3fbf7ff5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\phi_{ij}" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\Phi_i" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": " are univariate functions. Following the pattern of MLP, Kolmogorov-Arnold Network (KAN) (Liu et al., 2024c) extends the Kolmogorov-Arnoldtheorem to deep representations, i.e., stacked multilayer Kolmogorov-Arnold representations. Assume that KAN is composed of " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "L + 1" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": " layer neurons and the number of neurons in layer " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "n_l" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": ". The transmission relationship between the " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": "-th neuron in layer " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "l + 1" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": " and all neurons in layer " + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 309, + 504, + 365 + ], + "type": "text", + "content": " can be expressed as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 254, + 368, + 504, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 368, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 254, + 368, + 504, + 399 + ], + "type": "interline_equation", + "content": "x _ {l + 1, j} = \\sum_ {i = 1} ^ {n _ {l}} \\phi_ {l, j, i} \\left(x _ {l, i}\\right) \\tag {14}", + "image_path": "cf833f85a1e4412ea43a95b78896e0af0d5e3201b33ef57867c40b0474768d20.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "text", + "content": "We can simply understand that each neuron is connected to other neurons in the previous layer through a univariate function " + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "text", + "content": ". Similar to MLP, the computation of all neurons at layer " + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "text", + "content": " can be reorganized as a function matrix multiplication " + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "inline_equation", + "content": "\\Phi_{l-1}" + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "text", + "content": ". Therefore, given a input vector " + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^{n_0}" + }, + { + "bbox": [ + 104, + 401, + 504, + 445 + ], + "type": "text", + "content": ", the final output of KAN network is:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 226, + 449, + 504, + 461 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 449, + 504, + 461 + ], + "spans": [ + { + "bbox": [ + 226, + 449, + 504, + 461 + ], + "type": "interline_equation", + "content": "\\mathrm {K A N} (x) = \\left(\\Phi_ {L - 1} \\circ \\dots \\circ \\Phi_ {1} \\circ \\Phi_ {0}\\right) x \\tag {15}", + "image_path": "d39122deb6907064365f177a48d9afdc4cb727d48b6626f831930b6c474ba50f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 464, + 504, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 509 + ], + "type": "text", + "content": "In vanilla KAN (Liu et al., 2024c), the univariate function " + }, + { + "bbox": [ + 104, + 464, + 504, + 509 + ], + "type": "inline_equation", + "content": "\\phi_{l,j,i}" + }, + { + "bbox": [ + 104, + 464, + 504, + 509 + ], + "type": "text", + "content": " is parametrized using B-splines, which is a class of smooth curves constructed via segmented polynomial basis functions. To ensure the stability and enhance the representational capacity, KAN overlays the spline function on a fixed basis function " + }, + { + "bbox": [ + 104, + 464, + 504, + 509 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 464, + 504, + 509 + ], + "type": "text", + "content": ", which is typically the SiLU function:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 241, + 511, + 504, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 511, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 241, + 511, + 504, + 525 + ], + "type": "interline_equation", + "content": "\\phi (x) = w _ {b} b (x) + w _ {s} \\operatorname {s p l i n e} (\\mathrm {x}) \\tag {16}", + "image_path": "98b243c0291bf0d8f7e0b06cabc2b151003ce6e18e424f0c6b6d71d80361a734.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 253, + 526, + 504, + 551 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 526, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 253, + 526, + 504, + 551 + ], + "type": "interline_equation", + "content": "\\operatorname {s p l i n e} (x) = \\sum_ {i} c _ {i} B _ {i} (x) \\tag {17}", + "image_path": "8b4fe5c256ff960f72e2b384f4d21e6ac1e60fae094426f1b841b8ffcea88622.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "inline_equation", + "content": "w_{b}" + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "inline_equation", + "content": "w_{s}" + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "text", + "content": " are learnable weights and " + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "inline_equation", + "content": "\\mathrm{spline(x)}" + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "text", + "content": " is the spline function constructed from the linear combination of B-spline basis functions " + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "inline_equation", + "content": "B_{i}" + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "text", + "content": ". However, the complex recursive computation process of high-order B-spline functions hinders the efficiency of KAN. Therefore, in this work, we adopt the simpler Chebyshev polynomial as the univariate function to replace the B-spline function (SS, 2024). The univariate function defined by the Chebyshev polynomial is given as follows:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 249, + 611, + 504, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 611, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 249, + 611, + 504, + 624 + ], + "type": "interline_equation", + "content": "T _ {k} (x) = \\cos (k \\operatorname {a r c c o s} (x)) \\tag {18}", + "image_path": "bd1f2ef49b65b28e5f7754627f08007a02e7205ad7347199be43acc9c779c5a6.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 626, + 504, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 650 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 626, + 504, + 650 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 626, + 504, + 650 + ], + "type": "text", + "content": " represents the order of the polynomial. Then, we consider the univariate function " + }, + { + "bbox": [ + 104, + 626, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 626, + 504, + 650 + ], + "type": "text", + "content": " as a linear combination of Chebyshev polynomials with different orders:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 193, + 653, + 504, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 653, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 193, + 653, + 504, + 685 + ], + "type": "interline_equation", + "content": "x _ {l + 1, j} = \\sum_ {i = 1} ^ {n _ {l}} \\phi_ {l, j, i} \\left(x _ {l, i}\\right) = \\sum_ {i = 1} ^ {n _ {l}} \\sum_ {k = 0} ^ {K} \\Theta_ {i, k} T _ {k} \\left(\\tanh \\left(x _ {l, i}\\right)\\right) \\tag {19}", + "image_path": "4faf65406b7f744048b39b92984a901a858352401d50c3371e780ac98573bb16.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "Where " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\Theta_{i,k}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " is the coefficients of " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "-th order Chebyshev polynomials acting on the " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "x_{l,i}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\tanh" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " is the tanh activation function used to normalize the inputs to between -1 and 1. By adjusting the highest order of the Chebyshev polynomial " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ", we can control the fitting capability of KAN. This also inspires tour design of the Multi-order KAN to dynamically represent different frequencies." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 230, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 230, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 230, + 94 + ], + "type": "text", + "content": "B.2 FOURIER TRANSFORM" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 180 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 180 + ], + "type": "text", + "content": "Time series are often composed of multiple frequency components superimposed on each other, and it is difficult to observe these individual frequency components directly in the time domain. Therefore, transforming a time series from the time domain to the frequency domain for analysis is often necessary. The Discrete Fourier Transform (DFT) is a commonly used domain transformation algorithm that converts a discrete-time signal from the time domain to the complex frequency domain. Mathematically, given a sequence of real numbers " + }, + { + "bbox": [ + 104, + 102, + 506, + 180 + ], + "type": "inline_equation", + "content": "x[n]" + }, + { + "bbox": [ + 104, + 102, + 506, + 180 + ], + "type": "text", + "content": " in time domain, where " + }, + { + "bbox": [ + 104, + 102, + 506, + 180 + ], + "type": "inline_equation", + "content": "n = 0,1,\\dots ,N - 1" + }, + { + "bbox": [ + 104, + 102, + 506, + 180 + ], + "type": "text", + "content": " the DFT process can be described as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 186, + 504, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 504, + 228 + ], + "type": "interline_equation", + "content": "X [ k ] = \\sum_ {n = 0} ^ {N - 1} x [ n ] \\cdot e ^ {- i \\frac {2 \\pi}{N} k n} = \\sum_ {n = 0} ^ {N - 1} x [ n ] \\left(\\cos \\left(\\frac {2 \\pi}{N} k n\\right) - i \\sin \\left(\\frac {2 \\pi}{N} k n\\right)\\right), \\quad k = 0, 1, \\dots , N - 1 \\tag {20}", + "image_path": "f53c20767ed9a0b7ba0219fa5ae387386bc99ae1b0c037ac1fb6c9fb7c638eb0.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "inline_equation", + "content": "X[k]" + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "text", + "content": "-th frequency component of frequency domain signal and " + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "text", + "content": " is the imaginary unit. Similarly, we can use Inverse DFT (iDFT) to convert a frequency domain signal back to the time domain." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 266, + 505, + 299 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 266, + 505, + 299 + ], + "spans": [ + { + "bbox": [ + 140, + 266, + 505, + 299 + ], + "type": "interline_equation", + "content": "x [ n ] = \\frac {1}{N} \\sum_ {k = 0} ^ {N - 1} X [ k ] \\cdot e ^ {i \\frac {2 \\pi}{N} k n} = \\frac {1}{N} \\sum_ {k = 0} ^ {N - 1} X [ k ] \\left(\\cos \\left(\\frac {2 \\pi}{N} k n\\right) + i \\sin \\left(\\frac {2 \\pi}{N} k n\\right)\\right) \\tag {21}", + "image_path": "e26464d7766d0ae78567e26537d0f92f5a4a7cc653c7b49ce625e232b5f014f1.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "text", + "content": "The computational complexity of the DFT is typically " + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(N^2)" + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "text", + "content": " (Zhou et al., 2022b). In practice, we use the Fast Fourier Transform (FFT) to efficiently compute the Discrete Fourier Transform (DFT) of complex sequences, which reduces the computational complexity to " + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(N\\log N)" + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "text", + "content": ". Additionally, by employing the Real FFT (rFFT), we can compress an input sequence of " + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "text", + "content": " real numbers into a signal sequence in the complex frequency domain containing " + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "inline_equation", + "content": "N / 2 + 1" + }, + { + "bbox": [ + 104, + 305, + 506, + 362 + ], + "type": "text", + "content": " frequency components." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_content_list.json b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0ca68e3ab4b3a76f4bbb97d5074c53c1eb226da8 --- /dev/null +++ b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_content_list.json @@ -0,0 +1,2849 @@ +[ + { + "type": "text", + "text": "TIMESUITE: IMPROVING MLLMS FOR LONG VIDEO UNDERSTANDING VIA GROUNDED TUNING", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiangyu Zeng $^{1,2}$ Kunchang Li $^{3,2}$ Chenting Wang $^{6,2}$ Xinhao Li $^{1,2}$ Tianxiang Jiang $^{5,2}$", + "bbox": [ + 179, + 154, + 790, + 170 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ziang Yan $^{4,2}$ Songze Li $^{7,2}$ Yansong Shi $^{5,2}$ Zhengrong Yue $^{6,2}$ Yi Wang $^{2,8}$", + "bbox": [ + 181, + 170, + 712, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yali Wang $^{3,2}$ Yu Qiao $^{2}$ Limin Wang $^{1,2,\\dagger}$", + "bbox": [ + 183, + 185, + 472, + 200 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Nanjing University $^{2}$ Shanghai AI Laboratory $^{3}$ SIAT, Chinese Academy of Sciences $^{4}$ Zhejiang University", + "$^{5}$ University of Science and Technology of China $^{6}$ Shanghai Jiao Tong University $^{7}$ Fudan University", + "8 Shanghai Innovation Institute" + ], + "bbox": [ + 183, + 202, + 844, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "XiangyuZeng2001@outlook.com lmwang@nju.edu.cn", + "bbox": [ + 181, + 248, + 630, + 263 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ad602c480f2c6ee6f7e3e6d73c36fcef9e7c8e2a42909b47fdf01801f9b7c3d1.jpg", + "image_caption": [ + "Figure 1: VideoChat-T demonstrates high performance for both long-form video question answering and temporal grounding. Our TimeSuite presents a collection of new designs to enhance the long video understanding capability of MLLMs. It will implicitly endow the MLLM with ability of correctly attending the visual segments when generating answers, thus relieving the hallucinations." + ], + "image_footnote": [], + "bbox": [ + 173, + 280, + 444, + 440 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/25a3ed0e3f3a3d2c5da756a7ce2b57e59174fb96aafb85ec3a7bc17191ca7251.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 467, + 295, + 820, + 436 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 512, + 545, + 526 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multimodal Large Language Models (MLLMs) have demonstrated impressive performance in short video understanding. However, understanding long-form videos still remains challenging for MLLMs. This paper proposes TimeSuite, a collection of new designs to adapt the existing short-form video MLLMs for long video understanding, including a simple yet efficient framework to process long video sequence, a high-quality video dataset for grounded tuning of MLLMs, and a carefully-designed instruction tuning task to explicitly incorporate the grounding supervision in the traditional QA format. Specifically, based on VideoChat, we propose our long-video MLLM, coined as VideoChat-T, by implementing a token shuffling to compress long video tokens and introducing Temporal Adaptive Position Encoding (TAPE) to enhance the temporal awareness of visual representation. Meanwhile, we introduce the TimePro, a comprehensive grounding-centric instruction tuning dataset composed of 9 tasks and 349k high-quality grounded annotations. Notably, we design a new instruction tuning task type, called Temporal Grounded Caption, to perform detailed video descriptions with the corresponding timestamps prediction. This explicit temporal location prediction will guide MLLM to correctly attend on the visual content when generating description, and thus reduce the hallucination risk caused by the LLMs. Experimental results demonstrate that our TimeSuite provides a successful solution to enhance the long video understanding capability of short-form MLLM, achieving improvement of $5.6\\%$ and $6.8\\%$ on the benchmarks of Egoschema and VideoMME, respectively. In addition, VideoChat-T exhibits robust zero-shot temporal grounding capabilities, significantly outperforming the existing state-of-the-art MLLMs. After fine-tuning, it performs on par with the traditional supervised expert models. Our code and dataset are available at https://github.com/OpenGVLab/TimeSuite.", + "bbox": [ + 228, + 541, + 767, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^\\dagger$ denotes the corresponding author.", + "bbox": [ + 197, + 909, + 410, + 922 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 102, + 336, + 118 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Multimodal Large Language Models (MLLMs) have demonstrated impressive video understanding performance by following the general human instructions to interpret the visual content (Li et al., 2023b; Zhang et al., 2023; Lin et al., 2023a; Jin et al., 2024; Wang et al., 2024e). However, these MLLMs still struggle in long video understanding, as a long video sequence may contain various dynamic actions and complex temporal relationships, making it difficult for MLLMs to effectively locate the key segments related to questions. When humans watch long videos, their attention is consciously focused on prominent segments, which may occur within a few seconds. NExT-GQA (Xiao et al., 2024) has also verified the relevance of temporal grounding for accurately answering video QA tasks. Therefore, a natural question arises: Can we enhance long video understanding by using temporal grounding as a auxiliary task?", + "bbox": [ + 169, + 138, + 826, + 280 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Previously, some works have made progress in temporal grounding task by using general MLLMs. They often enhance the temporal grounding capability of video MLLMs by designing specialized modules and perform specific supervised fine-tuning (Ren et al., 2024; Huang et al., 2024a,b). However, these overly specialized designs significantly impair the general QA capabilities of video MLLMs, resulting in great performance drop on the video QA task (as illustrated by TimeChat in Figure 1). Meanwhile, current research on long video understanding primarily focuses on architecture design, such as long-context LLMs (Liu et al., 2024a) and token compression (Song et al., 2024a). They can only capture holistic semantics in videos without the ability of localizing fine-grained information, leading to poor performance in temporal grounding tasks (as illustrated by MovieChat in Figure 1). So far, it is still challenging to build a video MLLM that is good at both tasks of temporal grounding and long video QA. We argue long video understanding could be assisted by explicitly performing temporal grounding, as grounding supervision enables MLLM to establish the detailed correspondence between the visual segments and fine-grained semantics. This fine-grained alignment would guide the MLLM to attend correctly video segments when generating answers and thus relieve the hallucination risk caused by the LLM.", + "bbox": [ + 169, + 285, + 826, + 494 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on the above analysis, in this paper, we propose TimeSuite, a collection of new designs to improve the long video understanding capability of the existing short-form MLLMs, with a focus on incorporating grounding supervision in instruction tuning process. First, to address the high computational cost caused by the excessive number of visual tokens in long videos, we propose a simple Token Shuffle scheme to compress visual tokens, allowing the LLM to process more frame inputs. We also propose TAPE to generate adaptive position encodings, enhancing the temporal awareness of visual representations. The proposed structure does not introduce overly complex proprietary designs, which could be efficiently initialized with the parameters of short video MLLMs, without damaging the original performance of pre-trained MLLM. Second, to naturally incorporate the grounding ability into our MLLMs and yet still to preserve its original general QA capability, we design a new instruction tuning task, called Temporal Grounded Caption. This new task requires generating detailed segment-level description with corresponding timestamp prediction. Tuning on this new task will not only endow the MLLM with the extra grounding ability but also enhance its original long video QA performance, thanks to the requirement of building correspondence between grounded segments and detailed captions. Finally, we collect a comprehensive grounding-centric instruction tuning dataset for post-training our designed MLLMs, which is composed of 349K high-quality annotations covering 9 tasks. Based on this new dataset, we are able to perform grounded tuning with detailed captions on our proposed MLLMs (coined as VideoChat-T).", + "bbox": [ + 169, + 500, + 826, + 751 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We verify the effectiveness of TimeSuite design through extensive experiments on the tasks of long video understanding and temporal grounding. VideoChat-T demonstrates a significant improvement in accuracy over baseline for long video understanding, with a $5.6\\%$ increase on Egoschema (Mangalam et al., 2023) and a $6.8\\%$ increase on VideoMME (Fu et al., 2024). Additionally, VideoChat-T exhibits robust zero-shot temporal localization capabilities on Charades-STA (Gao et al., 2017) and QVHighlights (Lei et al., 2021a). Our VideoChat-T outperforms the state-of-the-art temporal grounding MLLM of TimeChat from $50\\%$ to $100\\%$ for different metrics. After fine-tuning on the training set of temporal grounding benchmarks, the performance of VideoChat-T is on par with the state-of-the-art supervised expert models. The experiments demonstrate that our VideoChat-T is the first end-to-end MLLM that is able to perform well on both temporal grounding and general video QA. In particular, we show that grounded tuning with explicit location prediction can facilitate the long video understanding and relieve the hallucination risk.", + "bbox": [ + 169, + 757, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 102, + 346, + 118 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Video MLLMs. With the advancement of open-sourced LLMs (Chiang et al., 2023; Touvron et al., 2023; Jiang et al., 2023), video MLLMs have emerged by utilizing projection bridges to link vision foundation models with LLMs (Li et al., 2023b; 2024b; Zhang et al., 2023; Li et al., 2024a). Limited by the training context length, thought these methods perform well with a small number of frame inputs, they meet significant challenges when processing long videos. The longer video length usually implies longer temporal relationships and more redundancies, resulting in the difficulty of extracting key clues (Zhou et al., 2024). Recently, several methods for long video handling have been proposed, such as exploiting long context LLM (Liu et al., 2024a; Zhang et al., 2024b; Xue et al., 2024; Wang et al., 2024d) and token compression (Li et al., 2023d; Song et al., 2024a; Zhang et al., 2024a) for enabling more visual inputs and agents for task decomposition or retrieval (Fan et al., 2024; Wang et al., 2024c;h). MovieChat (Song et al., 2024a) supports more frames by applying short-term and long-term memory to merge similar visual tokens. Yet, studies in learning objectives for long videos are less explored, making it difficult to alleviate the frequent hallucination of LLMs in long context reasoning. Our proposed TimeSuite leverages temporally-centric tasks to unlock the temporal perception potential of MLLMs, anchoring responses to the most relevant video segments.", + "bbox": [ + 169, + 141, + 826, + 349 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Temporal Grounding. Temporal grounding is a fundamental capability in video understanding, associating semantics to specific clips with corresponding timestamps. Typical expert models (Lei et al., 2021b; Moon et al., 2023a;b; Lin et al., 2023b; Zeng et al., 2024) have been developed by formulating it into a timestamp regression from visual inputs and user queries. Most existing video MLLMs fail to address it compared with expert models, while some remedy its temporal grounding by specifically designed architectures and data (Huang et al., 2024a; Wang et al., 2024f; Li et al., 2024c; Wang et al., 2024g; Huang et al., 2024b; Qu et al., 2024). Timechat (Ren et al., 2024) binds visual features of images with timestamps and uses a sliding window to handle variable token length. From the perspective of training data, an instruction-tuning dataset TimeIT is constructed. Despite impressive improvements in temporal performance, these MLLMs still lag behind expert models and compromise general video dialogue capabilities. In this paper, we explore how to enhance the temporal grounding of MLLMs while preserving their original capabilities.", + "bbox": [ + 169, + 378, + 826, + 546 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 METHOD", + "text_level": 1, + "bbox": [ + 171, + 579, + 284, + 593 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we detail the proposed TimeSuite, a new collection of designs for improving short video MLLMs. Specifically, our TimeSuite includes a long video modeling framework, a high-quality video dataset for grounded tuning, and a carefully-designed instruction tuning task. With this new TimeSuite design, we are able to adapt the short-form video MLLM, obtaining significant performance improvements on two types of long video understanding tasks: traditional long video QA and temporal video grounding.", + "bbox": [ + 169, + 618, + 823, + 703 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 VIDEOCHAT-T", + "text_level": 1, + "bbox": [ + 171, + 732, + 318, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We first describe the architecture of our proposed long video modeling framework. Specifically, built upon VideoChat2 (Li et al., 2024b), we devise long-video version of VideoChat-T. Our VideoChat-T is composed of a video backbone for extracting visual representations, a visual-language connector to compress visual tokens and bridge the visual and languages modalities, a LLM to follow human instructions to interpret the video content.", + "bbox": [ + 169, + 763, + 823, + 834 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The architecture of VideoChat-T is illustrated in Figure 2. Its workflow has three stages. In the first stage, long videos are evenly segmented into clips and the clips are embedded by the Video Encoder and Q-Former (Li et al., 2023a). Then, for compressing visual token number and highlighting crucial ones, token shuffling is employed to merge adjacent tokens, and TAPE is used to add temporal adaptive positional encodings. Finally, the compressed video token sequence is fed to the LLM to generate accurate responses that adhere to user requirements.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/076f20a316ea038edc9dee59b2e5486f4caff907815706c4663eda300ce62196.jpg", + "image_caption": [ + "Figure 2: Overall Architecture of VideoChat-T. First, long videos are segmented into clips, which are then transformed into feature embeddings by video encoder and time-aware Qformer. Next, all visual tokens undergo Token Shuffle to compress overly long tokens, and generate adaptive positional encodings through TAPE. Finally, the long video tokens are concatenated with the user query, serving as the input of LLM, thereby generating appropriate responses." + ], + "image_footnote": [], + "bbox": [ + 173, + 99, + 826, + 300 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.1 BACKBONE DESIGN", + "text_level": 1, + "bbox": [ + 171, + 409, + 366, + 422 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Video clip encoding. For the given long video, we perform uniform sampling (Wang et al., 2019) to obtain $K \\times T$ frames. We divide these frames into $K$ video segments in chronological order, and sample $T$ frames from each segment. Next, we use the video encoder and its visual-linguistic connector (Q-Former here) to encode each segment into $N$ tokens. After the aforementioned processing, the entire video is encoded into a sequence of visual tokens, denoted by $\\mathbf{V}_q \\in \\mathbb{R}^{L \\times C_q}$ , where $C_q$ is the dimension of output token by the Q-Former and $L = K \\times N$ is the total number of tokens for the entire video.", + "bbox": [ + 169, + 431, + 823, + 530 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Large Language Model. According to previous research, images and visual cues are projected into the same feature space of the LLM. The LLM acts as an interaction interface in the MLLMs, being used to process multimodal inputs, parse user instructions, and generate appropriate responses. To afford the processing of long video sequence, we need to design an efficient compression module between the visual encoder and LLMs.", + "bbox": [ + 169, + 537, + 823, + 606 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.2 VL-CONNECTOR:TOKEN SHUFFLE", + "text_level": 1, + "bbox": [ + 171, + 622, + 472, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The increased number of sampled frames in long videos leads to a larger number of encoded visual tokens, causing a significant rise in the computational complexity and memory consumption of LLMs. Therefore, it is crucial to keep the number of visual tokens within an acceptable range. Some works have proposed various token compression schemes, such as clustering (Jin et al., 2024) and pooling (Huang et al., 2024b). However, clustering methods often struggle to maintain the temporal consistency, and pooling methods usually result in a certain loss of overall performance.", + "bbox": [ + 169, + 646, + 823, + 731 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address this, we propose a simple token shuffling compression scheme that ensures the temporal consistency of video tokens before and after compression while avoiding excessive performance loss. Previous methods often used a projector to achieve dimensional conversion. However, projecting visual encoding vectors from low to high dimensions does not increase information density. Therefore, we propose to rearrange multiple visual tokens along the channel dimension. Specifically, for the long video $\\mathbf{V}_q = [v_q^1,v_q^2,\\dots,v_q^L ]\\in \\mathbb{R}^{L\\times C_q}$ , we concatenate $m$ adjacent tokens along the channel dimension to obtain the reshaped visual feature $\\mathbf{V}_m = [v_m^1,v_m^2,\\dots,v_m^{\\frac{L}{m}}]\\in \\mathbb{R}_{\\frac{L}{m}}^{\\times mC_q}$ where each merged token $v_{m}^{i}$ is represented as:", + "bbox": [ + 169, + 736, + 823, + 857 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nv _ {m} ^ {i} = \\operatorname {C o n c a t} \\left(v _ {q} ^ {(i - 1) * m + 1}, v _ {q} ^ {(i - 1) * m + 2}, \\dots , v _ {q} ^ {i * m}\\right) \\quad \\forall i = 1, 2, \\dots , \\frac {L}{m}.\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 861, + 723, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Next, a linear projection layer is applied to the merged visual feature $\\mathbf{V}_m$ , generating the visual token sequences $\\mathbf{V}_l \\in \\mathbb{R}^{\\frac{L}{m} \\times C_l}$ as input into the LLM, where $C_l$ represents the token channel di", + "bbox": [ + 169, + 893, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "mension of the LLM. This scheme effectively reuses the projector of base model by replicating the original linear layer parameters $m$ times along the channel dimension, achieving an initialization equivalent to mean pooling with a window length of $m$ . This design avoids introducing additional randomly initialized parameters that might disturb the original model, thus preserving the its original capabilities. Additionally, compared to directly using pooling, this method offers higher flexibility for fine-tuning to achieve better results (see ablation study, Table 4).", + "bbox": [ + 169, + 103, + 823, + 188 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1.3 TEMPORAL ADAPTIVE POSITION ENCODING", + "text_level": 1, + "bbox": [ + 171, + 203, + 539, + 217 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To bind temporal positional information to visual tokens, we propose an adapter called Temporal Adaptive Position Encoding (TAPE). Inspired by CPVT (Chu et al., 2021), our TAPE uses zero padding at both ends of the convolution as anchors, and gradually transmits relative positional encoding information. Without the need to add any special time tokens, TAPE can automatically perceive the relative temporal positions of the token sequence and generate temporal embeddings.", + "bbox": [ + 169, + 226, + 823, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, the long video token sequence $\\mathbf{V}_q$ is first compressed in the channel dimension by a linear layer and further compressed in sequence length by a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different resolutions. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence (Chu et al., 2021). Then, we progressively upsample and restore the temporal feature sequences from short to long, using residual connections to retain temporal features at different scales. Finally, the temporal feature sequences are restored to the same length as $\\mathbf{V}_l$ and aligned in the channel dimension by a linear layer, thereby obtaining the temporal features $\\mathbf{V}_t$ output by the TAPE. For detailed implementation of TAPE, please refer to Appendix A.", + "bbox": [ + 169, + 303, + 826, + 457 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our proposed TAPE offers a plug-and-play module, which could be easily integrated into the network structure via residual connections, adding temporal position information to video tokens without disrupting the distribution of other trainable parameters. With appropriate training strategies, TAPE effectively preserves the model's generalization capabilities and enhances its temporal sensitivity (see ablation study, Table 3), which is important for temporal grounding task.", + "bbox": [ + 169, + 463, + 823, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 TIMEPRO:TEMPORAL GROUNDED INSTRUCTION DATA", + "text_level": 1, + "bbox": [ + 171, + 550, + 596, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Traditional temporal grounding datasets only contain monotonous ground truth, i.e., the start and end times of the target period. This data format performs well in training the classic expert models, but is difficult to unleash the potential of LLMs. Although several temporal grounding-centric datasets have been released for MLLM fine-tuning (Ren et al., 2024; Huang et al., 2024b), they still have deficiencies in data quantity, data quality, and task diversity. Thus, it is necessary to build a more comprehensive temporal dataset designed for the tuning of MLLMs.", + "bbox": [ + 169, + 575, + 823, + 660 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Based on the criteria of diversity, length, and difficulty, we collect and clean several existing high-quality grounding-centric datasets (Ren et al., 2024; Huang et al., 2024a,b), and create two new datasets, resulting in the TimePro. Compared to previous temporal grounding-centric datasets, TimePro offers a larger volume of data, a broader distribution, and a higher task diversity, facilitating the learning of more generalizable temporal representations for MLLMs.", + "bbox": [ + 169, + 666, + 823, + 737 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Figure 3(a), TimePro contains 9 task types from 15 datasets that are highly relevant to temporal grounding, containing approximately 349K high-quality temporal grounding annotations. The 9 tasks are specified as follows. Temporal Video Grounding involves identifying the start and end times of video content based on a natural language query (Anne Hendricks et al., 2017; Oncescu et al., 2021; Zala et al., 2023). Dense Video Captioning requires detecting events within a video and providing corresponding timestamps and descriptions (Krishna et al., 2017; Huang et al., 2020; Zhou et al., 2018). Video Summarization focuses on determining key frames or clips in the form of timestamps rather than semantic summaries (Song et al., 2015; Gygli et al., 2014). Step Localization aims to segment and describe important steps in a long video (Tang et al., 2019; Zala et al., 2023). Transcribed Speech Generation predicts speech content and its timestamps from visual signals (Zellers et al., 2022). Reasoning Temporal Localization combines timestamps with explanatory answers (Huang et al., 2024b). Multi-format Temporal Grounding includes single-turn and multi-turn dialogues with diverse question types (Huang et al., 2024a). Highlight", + "bbox": [ + 169, + 743, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/083cd70a090e43a2861582ab50537e9775960f025a9ff9e923b85e63a3e03146.jpg", + "image_caption": [ + "(a) Tasks of TimePro", + "Figure 3: (a) The proposed temporal centric instruction-tuning dataset, TimePro. This dataset contains approximately 349K high-quality and strongly temporally correlated data. (b) The proposed Temporal Grounded Caption fine-tuning data paradigm. It effectively reducing the occurrence of hallucinations. We employ a 4-stage processing pipeline to ensure the quality of the generated data." + ], + "image_footnote": [], + "bbox": [ + 204, + 99, + 486, + 273 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/aface00e068ff3e7c38e20a5aa09cc30b2c5cf813a2898118d5b09daed21e814.jpg", + "image_caption": [ + "(b) Details of Temporal Grounded Caption" + ], + "image_footnote": [], + "bbox": [ + 490, + 101, + 790, + 273 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Detection identifies the most significant moments in a video based on a query (Lei et al., 2021a). Temporal Grounded Caption uses a brief scene title to output both the time period and fine-grained description for the scene. More detailed information about TimePro is available in the appendix B. It should be noted that Temporal Grounded Caption is our newly-designed task that can help our model to establish fine-grained correspondence between visual segment and linguistic description.", + "bbox": [ + 169, + 386, + 823, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 TEMPORAL GROUNDED CAPTION TASK", + "text_level": 1, + "bbox": [ + 171, + 479, + 488, + 492 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Some studies have shown that MLLMs often exhibit severe hallucinations when dealing with fine-grained perception tasks (Ji et al., 2023; Huang et al., 2023; Golkar et al., 2023). Since our VideoChat-T directly regresses the timestamps corresponding to the text queries using MLLMs, it is more susceptible to hallucinations compared to methods that use external expert models as decoders (Wu et al., 2024). By forcing the video MLLMs to predict the event occurrence time and simultaneously describe the visual content evidence, we attempt to anchor these queries to the relevant time segments within the video, rather than generating hallucinations originating from LLM itself. Based on this analysis, we design the Temporal Grounded Caption task.", + "bbox": [ + 169, + 507, + 823, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The top of Figure 3(b) illustrates the definition of Temporal Grounded Caption. We use a brief scene title of the video segment as the query, requiring the model to simultaneously respond with the precise start and end times of the video segment and provide a detailed description of that segment. While the content in the scene title may leak into the detailed caption response, most of the missing detailed information must be correctly described by attending the corresponding segment. Moreover, temporal grounding and detailed captioning can serve as regularization task for each other, preventing caption model from hallucinations from unrelated visual or linguistic contexts and helping grounding model to regress the timestamp more accurately.", + "bbox": [ + 169, + 625, + 826, + 737 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The process for collecting our Temporal Grounded Caption data is described at the bottom of Figure 3(b). In the first stage, we use a detailed caption dataset with timestamps as our data source. We remove data with target grounding time intervals that are too short or too long and ensure that the scenes in the video are as diverse as possible. In the second stage, we use a LLM to summarize scene titles. To prevent excessive semantics of video segments from being leaked from the query to the MLLM, we try to retain the minimal subset of key features that are sufficient to distinguish the video segments. In the third stage, to avoid overly similar or identical content appearing at different temporal intervals in the video, we perform similarity filtering on the data annotations. Based on the scene titles and video features, we calculate the similarity between different segments of the same video and remove data with excessively high similarity. In the fourth stage, we randomly sample the generated data and manually assess its quality. Based on human feedback, we refine the threshold parameters for data filtering used in the first three stages to yield the final Temporal Grounded Caption dataset. This new dataset plays an important role in our grounded tuning.", + "bbox": [ + 169, + 743, + 826, + 924 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/45a634c3aab292d6b2dce1bcf29a6c0d2dcf2721b8a03c4fa9445fe0fd035b40.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodLLM SizeCharades-STAQVHighlight
R@1(IOU=0.3)R@1(IOU=0.5)R@1(IOU=0.7)mAPHIT@1
MovieChat (Song et al., 2024a)7B8.82.91.311.716.1
GroundingGPT (Li et al., 2024c)7B-29.611.9--
VTimeLLM (Huang et al., 2024a)7B51.027.511.4--
HawkEye (Wang et al., 2024f)7B50.631.414.5--
TimeChat (Ren et al., 2024)7B-32.213.414.523.9
ChatVTG (Qu et al., 2024)7B52.733.015.9--
VideoChat2 (Li et al., 2024b)7B9.63.41.413.418.6
VideoChat-T7B69.9 (+60.3)48.7 (+45.3)24.0 (+22.6)26.5 (+13.1)54.1 (+35.5)
QD-DETR※ (FT) (Moon et al., 2023b)--57.332.638.964.2
UnLoc-L※ (FT) (Yan et al., 2023)--60.838.4--
HawkEye (FT) (Wang et al., 2024f)7B72.558.328.8--
Timechat (FT) (Ren et al., 2024)7B-46.723.721.737.9
VideoChat-T (FT)7B79.467.143.027.055.3
", + "bbox": [ + 174, + 101, + 823, + 286 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1: Performance of VideoChat-T on temporal grounding and highlight detection tasks. (FT) indicates the model fine-tuned on training set of the evaluation benchmark, with the respective text marked in gray. Classic supervised expert models are marked with $\\text{※}$ .", + "bbox": [ + 169, + 296, + 823, + 339 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 349, + 328, + 364 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 382, + 408, + 395 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Built upon VideoChat2, we use UMT-L (Li et al., 2023c) and Mistral-7B (Jiang et al., 2023) as the video encoder and LLM, respectively. Except for the TAPE, all components are initialized from the pre-trained model of VideoChat2-Mistral. For the TAPE, we use random initialization, set the initial values of the final linear layer to zero, and freeze it during the first epoch of training. We set the frame count $T$ for each clip to 8, so the number of clips $K$ for a long video is equal to the total frame count divided by $T$ . We fine-tune the model for 3 epochs using the TimePro with 349K instances and a general QA task dataset with 82K instances. To ensure the stability of model training, we use 192-frame input for the first epoch. In the second and third epochs, we unfreeze the TAPE and adjust the model input to 128 frames. All experiments are conducted on 16 A100 GPUs.", + "bbox": [ + 169, + 407, + 826, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 PERFORMANCE ON TEMPORAL GROUNDING", + "text_level": 1, + "bbox": [ + 171, + 550, + 519, + 564 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate our method using two commonly used temporal localization tasks, i.e., Temporal Grounding and Highlight Detection. The performance comparison between VideoChat-T and other models is shown in Table 1. Our method's zero-shot performance surpasses all previous LLM-based methods and after fine-tuning, VideoChat-T even exceeds some classic expert models on the temporal grounding task.", + "bbox": [ + 169, + 575, + 823, + 647 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Temporal Grounding. This task aims to identify the start and end timestamps of the video content described by the query sentence, using Charades-STA as the evaluation benchmark. VideoChat-T achieves an accuracy of 48.7 in the R@1 (IOU=0.5) metric, significantly surpassing the previous state-of-the-art MLLM method, namely TimeChat, by 16.5 points. Additionally, it outperforms the fine-tuned version of TimeChat on the training set of the evaluation benchmark by $2.0\\%$ . Furthermore, the performance of VideoChat-T fine-tuned on the evaluation benchmark training set reaches 67.1 R@1 at IoU=0.5, surpassing most state-of-the-art classic supervised expert models.", + "bbox": [ + 169, + 652, + 826, + 751 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Highlight Detection. We use QVHighlights as the evaluation benchmark. For a given query, this task requires outputting all timestamps of highlight moments and their corresponding saliency scores. Since there could be many sparse highlight moments in a video, this task requires finer-grained video understanding at the frame level. VideoChat-T achieves mAP of 26.5, significantly surpassing the previous MLLM method of TimeChat by 13.0 points, and also outperforms its finetuned version by 4.8 points. We observe that after fine-tuning on the corresponding training set, VideoChat-T shows almost no performance improvement. This may be due to the bottleneck in language representation of LLMs. The Highlight Detection task requires outputting a (timestamp, saliency score) pair for each highlight moment, and a video may contain dozens of discrete highlight moments, making it challenging for the model to correctly respond with dozens to hundreds of numbers in a language format. The precise numerical salience score output is very difficult for LLMs, and VideoChat-T can only respond well to queries with fewer highlight moments. Due to the", + "bbox": [ + 169, + 757, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c15c9827e65003c39960bd0839ef3e857461f6bf0c544794dda96bdb0e2f4a9b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodLLM SizeLong VideoShort Video
EgoschemaVideoMMEMVbench
SubsetFullw/o subsw/o subs (Long)Avg
VideoAgent (Wang et al., 2024c)GPT-460.254.1---
VideoAgent (Fan et al., 2024)GPT-462.8----
TimeChat (Ren et al., 2024)7B-33.030.226.138.5
LLAMA-Vid (Li et al., 2023d)7B-38.5--41.9
MovieChat (Song et al., 2024a)7B-53.538.233.455.1
MovieChat+ (Song et al., 2024b)7B-56.4---
Chat-UniVi (Jin et al., 2024)7B--40.635.8-
VideoChat2 (Li et al., 2024b)7B63.654.439.533.260.4
VideoChat-T7B68.4 (+4.8)60.0 (+5.6)46.3 (+6.8)41.9 (+8.7)59.9 (-0.5)
", + "bbox": [ + 174, + 101, + 823, + 258 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2: Performance of VideoChat-T and other methods on video question answering tasks. By upgrading VideoChat2 with TimeSuite, VideoChat-T demonstrates significant improvements across multiple long video benchmarks.", + "bbox": [ + 169, + 268, + 823, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "specific architectural design, classic supervised expert models have a natural advantage in handling such tasks, and VideoChat-T still has a performance gap compared to expert models.", + "bbox": [ + 169, + 321, + 823, + 352 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 PERFORMANCE ON GENERAL VIDEO QA", + "text_level": 1, + "bbox": [ + 171, + 368, + 501, + 383 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In addition to test the grounding ability of our VideoChat-T, we also want to verify its general video question answering performance. According to mainstream evaluation standards, we use both long video and short video QA to assess the general video understanding capability of VideoChat-T. Table 2 shows the performance of VideoChat-T on the video QA evaluation benchmarks.", + "bbox": [ + 169, + 393, + 823, + 450 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Long Video QA. We use Egoschema (Mangalam et al., 2023) and VideoMME (Fu et al., 2024) to evaluate the long video capabilities of VideoChat-T. In conjunction with our proposed architectural improvements, we incremental fine-tune VideoChat2 using only 432K data points. VideoChat-T demonstrates outstanding performance on the Egoschema, achieving an accuracy of $68.4\\%$ on the test subset and $60.0\\%$ on the entire test set. Compared to VideoChat2, VideoChat-T obtains improvements of $4.8\\%$ and $5.6\\%$ on the subset and the full test set, respectively. Additionally, for the VideoMME benchmark, VideoChat-T achieves an accuracy of $46.3\\%$ by solely analyzing the visual content without using subtitles, representing a $6.8\\%$ improvement over VideoChat2. On the long video data division of VideoMME, VideoChat-T achieves an accuracy of $41.9\\%$ , which is an $8.7\\%$ improvement compared to VideoChat2. The upgraded VideoChat-T demonstrated significant performance improvements on long video QA benchmarks. This indicates the potential of leveraging grounding-centric video tasks to enhance the temporal awareness of MLLMs, thereby further improving long video understanding capabilities.", + "bbox": [ + 169, + 455, + 826, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Short Video QA. We use MVBench (Li et al., 2024b) to evaluate the general short video understanding capabilities of VideoChat-T. VideoChat-T achieves an overall average accuracy of $59.9\\%$ on MVBench, which is a $0.5\\%$ decrease compared to VideoChat2. It is important to note that achieving minimal performance loss is a challenging task. According to previous experiences in the field of incremental learning (Van de Ven et al., 2022), models inevitably forget old knowledge while learning new knowledge. VideoChat2 is fine-tuned with 2M data, whereas VideoChat-T is fine-tuned with only 432K data, where 349K annotations are temporal grounding centric, resulting in only a $0.5\\%$ accuracy loss. Previous temporal MLLMs like TimeChat (Ren et al., 2024), although achieving strong temporal localization capabilities, yield much weaker general video QA capability, with an accuracy of only $38.5\\%$ on MVBench. This demonstrates that the design of our TimeSuite enhances new capabilities for the model while still preserving the original general video understanding capabilities. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2.", + "bbox": [ + 169, + 643, + 826, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 QUALITATIVE ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 842, + 383, + 857 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 4 presents a qualitative comparison between our model and other methods. In the example on the left, VideoChat-T is capable of answering more complex long video reasoning questions. Our model accurately identifies the temporal location of the \"light a cigarette\" event and determines the correct key clue \"the person in a white coat\" based on the video content. This leads to the inference", + "bbox": [ + 169, + 868, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/35e92a20134204d271d52add199b9668212da945d3ff96706a6ebda7d2bd99cc.jpg", + "image_caption": [ + "Figure 4: Qualitative comparison between VideoChat-T and other methods. VideoChat-T not only possesses temporal fine-grained perception capabilities but also can perform accurate long video reasoning. Green text indicates correct answers, while red text indicates inappropriate answers." + ], + "image_footnote": [], + "bbox": [ + 173, + 101, + 488, + 268 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/97a395318cd819ea31c6f8a8f40d2462eee8825dbbf4fe4859d6dca452f7a506.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 101, + 823, + 258 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/94d9776459cc5e5d0fff9a197b234bec613225a8b63a126dd7bc6e694db72f32.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
VideoChat-T (Ours)60.046.348.754.1
w/o TAPE59.145.947.150.4
w/o frz59.045.252.453.7
", + "bbox": [ + 173, + 328, + 486, + 378 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/1ef628e7b61d7ac79750d1dde1f1c939db03d2b5d66922bc048e68475aa032ae.jpg", + "table_caption": [ + "Table 3: Performance results of the ablation study on the TAPE. Here, w/o adapter refers to removing our proposed TAPE, and w/o frz refers to not using the training method where the TAPE is frozen during the first epoch." + ], + "table_footnote": [], + "table_body": "
ModelEgoschema FullVideoMME w/o subsCharades-STA R@ 1 IOU=0.5QVHighlights Hit@1
VideoChat-T(Ours)60.046.348.754.1
r/w pooling59.844.840.347.3
r/w clustering59.545.039.840.1
w/o init57.443.442.053.9
", + "bbox": [ + 509, + 328, + 821, + 388 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: Performance results of the ablation study on the Token Shuffle. Here, r/w refers to replacing Token Shuffle with the other component, and w/o init refers to removing the efficient initialization.", + "bbox": [ + 504, + 393, + 823, + 463 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "that \"playing the piano very fast and pressing the keys very hard\" are the true reasons. The example on the right demonstrates our model's fine-grained perception ability. The appearance of \"money in the briefcase\" is very brief, and most models easily overlook this detail. Thanks to its strong fine-grained perception ability, our model precisely captures this visual content.", + "bbox": [ + 169, + 476, + 823, + 532 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.5 ABLATION STUDY", + "text_level": 1, + "bbox": [ + 171, + 550, + 341, + 564 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Role of TAPE. To verify the performance improvement brought by TAPE, ablation experiments were conducted. Table 3 lists the performance results of the conducted adapter-related ablation experiments. It can be observed that when the TAPE is removed, the model's performance on long video understanding and temporal grounding benchmarks decreases. TAPE can adaptively embed positional encodings into video tokens, and the absence of TAPE leads to a certain loss in temporal awareness capability. When we unfroze the TAPE in the first epoch, the performance improved on the temporal grounding task but declined on the long video QA task. This is because the TAPE is highly suited for tasks with strong temporal dependencies. If unfrozen too early, the model may become biased towards fitting temporal grounding tasks. Freezing the TAPE during the first epoch allows the model to first optimize and learn a relatively generalized feature representation, thereby balancing the performance across different tasks.", + "bbox": [ + 169, + 575, + 823, + 729 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Effectiveness of Token Shuffle. To verify the effectiveness of token shuffle, we conducted ablation experiments. Table 4 presents the results of these ablation experiments. We compared token shuffle with conventional methods such as pooling and clustering, and also observed the results after removing efficient initialization. When we replaced token shuffle with pooling or clustering methods, the model's performance declined. This is because the efficient initialization of the linear layer in token shuffle makes the initial values of the module equivalent to average pooling, which gradually optimizes better solutions during training. Therefore, our method is inherently superior to pooling. On the other hand, clustering often fails to maintain the spatial/temporal consistency of the video, leading to temporal confusion. When we removed the efficient initialization of the linear layer, the negative impact of random initialization severely damaged the model's original performance.", + "bbox": [ + 169, + 734, + 823, + 875 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Effect of TimePro. We conducted ablation studies to evaluate the effectiveness of the TimePro data components. As shown in Table 5, by gradually adding subsets of TimePro, we observed the model's performance changes across various temporal grounding-centric instruction-tuning data. As we pro", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/3cbe39df81cbc638085740dff8291a6f2871a0f25d1273b3eb5e76126c5d65ad.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
NormalTimeITTGCHDMTGRTLEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
56.642.68.024.4
57.843.632.225.2
58.344.039.133.9
59.844.941.943.8
60.045.145.848.3
60.046.348.754.1
", + "bbox": [ + 207, + 99, + 787, + 196 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 5: Performance results of the ablation study on different components of TimePro. We use 82K normal training data as the baseline. TimeIT refers to the training data with five task types from Ren et al. (2024), TGC refers to Temporal Grounded Caption, HD refers to Highlight Detection, MTG refers to Multi-format Temporal Grounding, and RTL refers to Reasoning Temporal Localization.", + "bbox": [ + 169, + 205, + 823, + 263 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/21867962bd38420a9c54f8c16c40cde0165a85826b166be4e57f7fc2c727e0f1.jpg", + "image_caption": [ + "Figure 5: Performance of VideoChat-T with varying input frame numbers. As the number of input frames increases, the performance of VideoChat-T shows an upward trend in both long video QA and temporal grounding tasks. Due to the over low temporal grounding performance of VideoChat2, its curve is omitted." + ], + "image_footnote": [], + "bbox": [ + 205, + 273, + 397, + 378 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/312cd59dc4ad40f3e5a575aef589070bd88564e1070e8538a70ffe6a79829bd9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 403, + 272, + 594, + 378 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/af3929d838db2730e5cd5fd741a457c44f1e3db2810c9a3c5d45fa025cdb22aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 601, + 273, + 790, + 378 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "gressively added subsets of TimePro, not only did the model's performance on temporal grounding tasks show a stable and significant improvement, but we also observed a noticeable upward trend in performance on long video benchmarks. This to some extent corroborates that temporal grounding centric tasks have a positive impact on long video understanding.", + "bbox": [ + 169, + 458, + 823, + 513 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Impact of frames. To investigate the impact of input frame count on model performance, we conducted an ablation study. Figure 5 illustrates the scalability of our model's performance with respect to input frame count. VideoChat-T demonstrates good stability as the input frame count varies, and its performance in long video QA and temporal grounding tasks improves with an increase in frame count. In contrast, the baseline model, VideoChat2, exhibited catastrophic performance degradation when the frame count was significantly increased. As the input frame count increases, the number of visual encoding tokens grows linearly. Excessive visual token input imposes an additional computational burden on the temporal modeling of the LLM. TimeSuite mitigates this by employing Token Shuffle to reduce the number of tokens, ensuring the stable operation of the model.", + "bbox": [ + 169, + 521, + 826, + 647 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 666, + 320, + 681 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this paper, we have introduced TimeSuite, a collection of new designs from perspectives of efficient architecture, high-quality data, and new instruction tuning task, to achieve long video understanding by fine-tuning short video MLLMs with temporal grounding-centric data. We address the computational challenges of processing long videos by introducing token shuffle to compress visual tokens. We also propose the TAPE for adaptive position encoding, enhancing the temporal awareness of visual representation. Additionally, our designed Temporal Grounded Caption training task ensures MLLMs to build correspondence between grounded segments and detailed caption, while the TimePro dataset provides comprehensive instruction tuning data for learning more effective temporal perception capability. Experimental results demonstrate that VideoChat-T significantly improves long video understanding, with notable performance gains on Egoschema and VideoMME. Furthermore, VideoChat-T exhibits strong zero-shot temporal grounding capabilities, significantly outperforming the previous MLLMs on temporal grounding. Overall, our TimeSuite provides effective designs for short MLLMs to enhance their performance on temporal grounding and long video QA. We hope our TimeSuite could yield some insights on designing long video MLLMs.", + "bbox": [ + 169, + 696, + 826, + 893 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 171, + 102, + 361, + 118 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This work is supported by the National Key R&D Program of China (No. 2022ZD0160900), the Fundamental Research Funds for the Central Universities (No. 020214380119), Jiangsu Frontier Technology Research and Development Program (No. BF2024076), and the Collaborative Innovation Center of Novel Software Technology and Industrialization.", + "bbox": [ + 171, + 132, + 826, + 189 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 208, + 287, + 223 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pp. 5803-5812, 2017.", + "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023.", + "Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024.", + "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\\%$ chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023.", + "Xiangxiang Chu, Zhi Tian, Bo Zhang, Xinlong Wang, and Chunhua Shen. Conditional positional encodings for vision transformers. arXiv preprint arXiv:2102.10882, 2021.", + "Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. arXiv preprint arXiv:2403.11481, 2024.", + "Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024.", + "Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pp. 5267-5275, 2017.", + "Siavash Golkar, Mariel Pettee, Michael Eickenberg, Alberto Bietti, Miles Cranmer, Geraud Krawezik, Francois Lanusse, Michael McCabe, Ruben Ohana, Liam Parker, et al. xval: A continuous number encoding for large language models. arXiv preprint arXiv:2310.02989, 2023.", + "Michael Gygli, Helmut Grabner, Hayko Riemenschneider, and Luc Van Gool. Creating summaries from user videos. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pp. 505-520. Springer, 2014.", + "Bin Huang, Xin Wang, Hong Chen, Zihan Song, and Wenwu Zhu. Vtimellm: Empower llm to grasp video moments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14271-14280, 2024a.", + "De-An Huang, Shijia Liao, Subhashree Radhakrishnan, Hongxu Yin, Pavlo Molchanov, Zhiding Yu, and Jan Kautz. Lita: Language instructed temporal-localization assistant. arXiv preprint arXiv:2403.19046, 2024b.", + "Gabriel Huang, Bo Pang, Zhenhai Zhu, Clara Rivera, and Radu Soricut. Multimodal pretraining for dense video captioning. arXiv preprint arXiv:2011.11760, 2020.", + "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. arXiv preprint arXiv:2311.05232, 2023." + ], + "bbox": [ + 171, + 231, + 825, + 922 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Ye Jin Bang, Andrea Madotto, and Pascale Fung. Survey of hallucination in natural language generation. ACM Computing Surveys, 55(12):1-38, 2023.", + "Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023.", + "Peng Jin, Ryuichi Takanobu, Wancai Zhang, Xiaochun Cao, and Li Yuan. Chat-univi: Unified visual representation empowers large language models with image and video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13700-13710, 2024.", + "Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pp. 706-715, 2017.", + "Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021a.", + "Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021b.", + "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a.", + "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023a.", + "KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023b.", + "Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19948-19960, 2023c.", + "Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 22195-22206, 2024b.", + "Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. arXiv preprint arXiv:2311.17043, 2023d.", + "Zhaowei Li, Qi Xu, Dong Zhang, Hang Song, Yiqing Cai, Qi Qi, Ran Zhou, Junting Pan, Zefeng Li, Van Tu Vu, et al. Groundinggpt: Language enhanced multi-modal grounding model. CoRR, 2024c.", + "Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023a.", + "Kevin Qinghong Lin, Pengchuan Zhang, Joya Chen, Shraman Pramanick, Difei Gao, Alex Jinpeng Wang, Rui Yan, and Mike Zheng Shou. Univtg: Towards unified video-language temporal grounding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2794-2804, 2023b.", + "Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a.", + "Ruyang Liu, Chen Li, Haoran Tang, Yixiao Ge, Ying Shan, and Ge Li. St-llm: Large language models are effective temporal learners. arXiv preprint arXiv:2404.00308, 2024b." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023.", + "Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2023.", + "WonJun Moon, Sangeek Hyun, SuBeen Lee, and Jae-Pil Heo. Correlation-guided query-dependency calibration in video representation learning for temporal grounding. arXiv preprint arXiv:2311.08835, 2023a.", + "WonJun Moon, Sangeek Hyun, SangUk Park, Dongchan Park, and Jae-Pil Heo. Query-dependent video representation for moment retrieval and highlight detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23023-23033, 2023b.", + "Andreea-Maria Oncescu, Joao F Henriques, Yang Liu, Andrew Zisserman, and Samuel Albanie. Queryd: A video dataset with high-quality text and audio narrations. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2265-2269. IEEE, 2021.", + "Mengxue Qu, Xiaodong Chen, Wu Liu, Alicia Li, and Yao Zhao. Chatvtg: Video temporal grounding via chat with video dialogue large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1847-1856, 2024.", + "Shuhuai Ren, Linli Yao, Shicheng Li, Xu Sun, and Lu Hou. Timechat: A time-sensitive multimodal large language model for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14313-14323, 2024.", + "Share. Sharegemini: Scaling up video caption data for multimodal large language models, June 2024. URL https://github.com/Share14/ShareGemini.", + "Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18221-18232, 2024a.", + "Enxin Song, Wenhao Chai, Tian Ye, Jenq-Neng Hwang, Xi Li, and Gaoang Wang. Moviechat+: Question-aware sparse memory for long video question answering. arXiv preprint arXiv:2404.17176, 2024b.", + "Yale Song, Jordi Vallmitjana, Amanda Stent, and Alejandro Jaime. Tvsum: Summarizing web videos using titles. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5179-5187, 2015.", + "Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1207-1216, 2019.", + "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.", + "Gido M Van de Ven, Tinne Tuytelaars, and Andreas S Tolias. Three types of incremental learning. Nature Machine Intelligence, 4(12):1185-1197, 2022.", + "Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaou Tang, and Luc Van Gool. Temporal segment networks for action recognition in videos. IEEE Trans. Pattern Anal. Mach. Intell., 41(11):2740-2755, 2019.", + "Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024a." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36, 2024b.", + "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024c.", + "Xidong Wang, Dingjie Song, Shunian Chen, Chen Zhang, and Benyou Wang. Longllava: Scaling multi-modal llms to 1000 images efficiently via hybrid architecture. arXiv preprint arXiv:2409.02889, 2024d.", + "Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Internvideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024e.", + "Yueqian Wang, Xiaojun Meng, Jianxin Liang, Yuxuan Wang, Qun Liu, and Dongyan Zhao. Hawkeye: Training video-text llms for grounding text in videos. arXiv preprint arXiv:2403.10228, 2024f.", + "Yuxuan Wang, Yueqian Wang, Pengfei Wu, Jianxin Liang, Dongyan Zhao, Yang Liu, and Zilong Zheng. Efficient temporal extrapolation of multimodal large language models with temporal grounding bridge. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 9972-9987, 2024g.", + "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024h.", + "Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Wenhai Wang, Zhe Chen, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionllm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. arXiv preprint arXiv:2406.08394, 2024.", + "Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13204-13214, 2024.", + "Fuzhao Xue, Yukang Chen, Dacheng Li, Qinghao Hu, Ligeng Zhu, Xiuyu Li, Yunhao Fang, Haotian Tang, Shang Yang, Zhijian Liu, et al. Longvila: Scaling long-context visual language models for long videos. arXiv preprint arXiv:2408.10188, 2024.", + "Shen Yan, Xuehan Xiong, Arsha Nagrani, Anurag Arnab, Zhonghao Wang, Weina Ge, David Ross, and Cordelia Schmid. Unloc: A unified framework for video localization tasks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 13623-13633, 2023.", + "En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023.", + "Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23056-23065, 2023.", + "Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16375-16387, 2022.", + "Xiangyu Zeng, Mingzhu Xu, Yijun Hu, Haoyu Tang, Yupeng Hu, and Liqiang Nie. Adaptive edge-aware semantic interaction network for salient object detection in optical remote sensing images. IEEE Transactions on Geoscience and Remote Sensing, 2023.", + "Yingsen Zeng, Yujie Zhong, Chengjian Feng, and Lin Ma. Unimd: Towards unifying moment retrieval and temporal action detection. arXiv preprint arXiv:2404.04933, 2024." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023.", + "bbox": [ + 171, + 103, + 823, + 133 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Haoji Zhang, Yiqin Wang, Yansong Tang, Yong Liu, Jiashi Feng, Jifeng Dai, and Xiaojie Jin. Flash-vstream: Memory-based real-time understanding for long video streams. arXiv preprint arXiv:2406.08085, 2024a.", + "bbox": [ + 171, + 140, + 823, + 184 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024b.", + "bbox": [ + 171, + 190, + 823, + 233 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024.", + "bbox": [ + 171, + 241, + 823, + 284 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018.", + "bbox": [ + 171, + 291, + 823, + 333 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A IMPLEMENTATION OF TAPE", + "text_level": 1, + "bbox": [ + 171, + 359, + 444, + 375 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Algorithm 1 PyTorch snippet of TAPE.", + "text_level": 1, + "bbox": [ + 171, + 395, + 434, + 410 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Initialize related package", + "text_level": 1, + "bbox": [ + 200, + 414, + 367, + 430 + ], + "page_idx": 14 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "class TemporalAdapter(nnModule): def __init__(self, merge_len, clip_num, input_dim, mid_dim, output_dim, sample_rate): super().__init_() self.AvgPool = nn.AvgPool1d(merge_len, stride = merge_len) self.upsample = nn.UpSample(scale_factor = sample_rate) self_linear_input = nn.Linear(input_dim, mid_dim) self_linear_output = nn.Linear(mid_dim, output_dim) nn.init_constant_(self_linear_output_weight, 0) nn.init_constant_(self_linear_output.bias, 0) self.Downsample_Depthwise_Separable_Conv1 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Downsample_Depthwise_Separable_Conv2 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.fc = nnSequential (nn.Conv1d(mid_dim, mid_dim, clip_num+1, stride=1, padding=clip_num//2), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv2 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv1 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) def forward(self, input_tokens): time_ad = self(linear_input(input_tokens).transpose(1, 2) time_ad1 = self.AvgPool(time_ad) time_ad2 = self.Downsample_Depthwise_Separable_Conv1(time_adl) time_ad3 = self.Downsample_Depthwise_Separable_Conv2(time_ad2) time_ad3 = self.fc(time_ad3) time_ad2 = self.upsample(time_ad3) + time_ad2 time_ad2 = self.Conv2(time_ad2) time_ad1 = self.upsample(time_ad2) + time_ad1 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad2 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad1 return time_ad_out", + "guess_lang": "python", + "bbox": [ + 199, + 435, + 795, + 825 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Algorithm 1 details the implementation process of TAPE in code form. Specifically, the long video token sequence input_tokens is first compressed in the channel dimension by a linear layer to obtain time_ad, and the sequence length is compressed through a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different", + "bbox": [ + 169, + 854, + 823, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "time resolutions, namely time_ad1, time_ad2, and time_ad3. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence time_ad3, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence. Then, we progressively upsample the temporal feature sequences from short to long, using residual connections to preserve temporal features at different scales. Finally, the temporal feature sequence time_ad_out is restored to the same length as the video features after token shuffling and aligned in the channel dimension through a linear layer.", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B INSTRUCTION-TUNING DATA", + "text_level": 1, + "bbox": [ + 171, + 224, + 447, + 239 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We fine-tuned VideoChat-T using 432K data, which includes 349K instances from TimePro and 82K instances from normal data. All videos were sampled from existing open-source video datasets, with specific information about the relevant data provided in Table 6.", + "bbox": [ + 169, + 256, + 823, + 299 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/1b9e6ef83ab9afc63694e00f5519b32bd5204559a5ab4e39d51403d8be6c638c.jpg", + "table_caption": [], + "table_footnote": [ + "Table 6: The complete instruction fine-tuning data used for training. We utilized a total of approximately 432K data points, which can be divided into 349K instances of TimePro and 82K instances of regular video data, covering 13 tasks across 21 datasets." + ], + "table_body": "
SetTaskCourseInstance Num
TimeProTemporal Video GroundingDideMo32,944
QueryD14,602
HiREST-grounding459
Dense Video CaptioningActivityNet-Captions10,009
ViTT5,086
Youcook28,700
Video SummarizationTVSum50
SumMe25
Step Localization and CaptioningCOIN9,026
HiREST-step459
Transcribed Speech GenerationYT-Temporal31,190
Reasoning Temporal LocalizationActivityNet-RTL33,557
Multi-format Temporal GroundingInternVid-VTime100,000
Highlight DetectionActivityNet-HL10,340
Temporal Grounded CaptionCosMo-TGC93,118
NormalConversationVideoChatGPT13,303
VideoChat13,884
Video QAEgoQA7,813
MovieChat-QA808
ReasoningSTAR45,731
CaptionMovieChat-Caption808
", + "bbox": [ + 241, + 314, + 758, + 657 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We evaluate the quality of the data from three perspectives: diversity, length, and difficulty. We strive to include different datasets for various tasks, and the distribution of videos in the datasets is as broad as possible. The length of the videos should be controlled within an appropriate range, as excessively long or short videos may pose challenges for training. Each query should clearly describe the video content of the target time segment and avoid corresponding to multiple time segments in the video. Based on these principles, we have screened and integrated existing high-quality datasets, which significantly contribute to enhancing the model's temporal awareness capabilities.", + "bbox": [ + 169, + 728, + 823, + 827 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "TimePro encompasses a series of open-source temporal grounding datasets that we have integrated, cleaned, and refined, such as TimeIT (Ren et al., 2024), ANet-RTL (Huang et al., 2024b), and InternVid-VTime (Huang et al., 2024a). These high-quality open-source datasets have been experimentally validated by us. We also added two new self-made datasets, ANet-HL and CosMo-TGC.", + "bbox": [ + 169, + 832, + 823, + 888 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Temporal Video Grounding. This task involves providing a natural language query and requires outputting the corresponding video's start and end times. The datasets include DiDeMo (Anne Hen", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "dricks et al., 2017), QuerYD (Oncescu et al., 2021), and HiREST-grounding (Zala et al., 2023), aiming to achieve precise temporal localization during user interaction with natural language.", + "Dense Video Captioning. This task requires the model to detect a series of events occurring in a given video and output the corresponding timestamps and coarse-grained descriptions. The datasets for this part include ActivityNet-Caption (Krishna et al., 2017), ViTT Huang et al. (2020), and YouCook2 (Zhou et al., 2018), which help the model learn the temporal relationships between different events within the video.", + "Video Summarization. The goal of this task is not to summarize at the semantic level of natural language, but to determine a set of compressed frames or clips in the form of timestamps, representing the most informative content in a given video. Our datasets include TVSum (Song et al., 2015) and SumMe (Gygli et al., 2014), which effectively combine the model's temporal perception capabilities with its semantic content inference abilities.", + "Step Localization and Captioning. This task differs from dense video captioning as it is designed to segment and describe the important steps within a long video. We have integrated two datasets, COIN (Tang et al., 2019) and HiREST-step (Zala et al., 2023), which can help the model learn the procedural temporal logic relationships of different steps within a single event.", + "Transcribed Speech Generation. The purpose of this task is to predict speech content and its corresponding start and end timestamps based on visual signals in the video. Including the YT-Temporal (Zellers et al., 2022) dataset, this task can be viewed as a weakly supervised event localization and description task.", + "Reasoning Temporal Localization. The answers to the questions in this task include both timestamps and explanations. We used the ANet-RTL (Huang et al., 2024b) dataset as training data for this task. By combining temporal localization and reasoning, we can more specifically enhance the model's temporal perception capabilities.", + "Multi-format Temporal Grounding. This task includes both single-turn and multi-turn dialogues, with a variety of question types. We use the InternVid-VTime (Huang et al., 2024a) dataset for training this task. The broader range of task types and more diverse output formats can effectively enhance the model's temporal generalization capabilities.", + "Highlight Detection. Unlike video summarization, this task identifies only the most salient moments of a video in response to a natural language query, without covering the entire scope of the original video (Lei et al., 2021a). We used a custom dataset, ANet-HL, derived from temporal localization data. We extract video segments between the start and end times of the target's appearance and use CLIP to calculate the similarity between each frame's scene and the target. This is converted into discrete saliency levels ranging from 1 to 5, at intervals of 0.5. This task effectively enhances the model's temporal perception capabilities for specific events.", + "Temporal Grounded Caption. This task involves using scene titles as queries, requiring the model to output both the time segments when the scenes appear and the fine-grained subtitles for those segments. We used our custom dataset, CosMo-TGC. This task format, which combines temporal localization and semantic understanding, can effectively prevent large language models from focusing on irrelevant video segments, thereby improving the quality of the model's responses to questions.", + "We also used normal data comprising four tasks and six different data sources. These general data help prevent the model from overfitting to temporal grounding-related tasks during training, thereby preserving the model's original capabilities." + ], + "bbox": [ + 169, + 103, + 826, + 782 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C COMPUTATIONAL EFFICIENCY", + "text_level": 1, + "bbox": [ + 171, + 806, + 460, + 821 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "By applying Token Shuffle, we further reduced the computational cost of VideoChat-T, giving it a significant computational advantage over high-performance models like LLaVA-OneVision (Li et al., 2024a) and Qwen2-VL (Wang et al., 2024a). Under the same settings, VideoChat-T uses only 3 tokens per frame, with flops consumption at just $5.1\\%$ of LLaVA-OneVision. Its inference time on single A100 is only 0.63 seconds, reaching real-time response levels, making it highly suitable for applications requiring rapid response, such as online video understanding.", + "bbox": [ + 169, + 839, + 826, + 926 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/caee47541cc17bb9a6d7f948345ff67c964770d537438e21fa3a86a3d156a5c2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodToken num per frameflops 128 framesInference Time 128f & on single A100 GPUCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME Vision
Qwen2-VL (Wang et al., 2024a)138929.8 TOut Of Memory15.013.067.066.763.3
LLaVA-OneVision (Li et al., 2024a)196693.7 T4.95 s7.314.9856.760.158.2
VideoChat-T (Ours)335.5 T0.63 s48.726.559.960.046.3
", + "bbox": [ + 174, + 99, + 823, + 150 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In terms of performance, VideoChat-T significantly outperforms LLaVA-OneVision in temporal grounding tasks. It has a slight advantage on MVBench; both perform comparably on Egoschema; but VideoChat-T performs worse on VideoMME. Given the substantial savings in computational resources with VideoChat-T, we consider the disadvantages on some datasets to be acceptable.", + "bbox": [ + 169, + 229, + 823, + 287 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Moreover, our model's ability to maintain reasonable performance under high compression ratios suggests that the token embedding spaces of contemporary models may be characterized by considerable feature redundancy. This observation presents a promising avenue for future research, as efficient techniques for compressing or discarding redundant features could substantially reduce computational costs without sacrificing model performance, enabling longer context reasoning.", + "bbox": [ + 169, + 292, + 826, + 364 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D DETAILS OF HYPERPARAMETERS", + "text_level": 1, + "bbox": [ + 171, + 383, + 486, + 398 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/ca3cd1656aee9afc53e2bd99a4c8c3260116905a553d6e5a926e02ff61453318.jpg", + "table_caption": [ + "Table 7: Comparison of the computational efficiency and performance of VideoChat-T with other methods. Our approach achieves relatively impressive performance with extremely low computational cost." + ], + "table_footnote": [], + "table_body": "
configepoch1epoch2&3
input frame192128
max text length15361024
freeze TAPETrueFalse
learning rate2e-51.5e-5
input resolution224
clip frame8
merge lenth4
QFormer token (per clip)96
lora rank16
lora alpha32
lora dropout0.1
batch size (per GPU)2
optimizerAdamW
optimizer momentum0.9, 0.999
weight decay0.02
learning rate schedulecosine decay
", + "bbox": [ + 338, + 419, + 660, + 647 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 8 lists the hyperparameters used during different epochs of the training process. In the first epoch, we used a larger number of input frames and froze the TAPE. At the beginning of the second epoch, we unfroze the TAPE and fixed the model's input frames to 128. Following the settings of VideoChat2, we integrated the lora module into the LLM and applied flash attention to accelerate the training process.", + "bbox": [ + 169, + 690, + 823, + 760 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E FULL PERFORMANCES", + "text_level": 1, + "bbox": [ + 171, + 781, + 393, + 796 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/a3c57aa34b9a7662f0cafe3a8bd4673ab5a234ba215d29502ec0f8057f1d24ee.jpg", + "table_caption": [ + "Table 8: Hyper-parameter Settings During the Training Process of VideoChat-T." + ], + "table_footnote": [], + "table_body": "
ModelLLMAvgASAPAAFAUAOEOIOSMDALSTACMCMASCFPCOENERCI
VideoChatGPT (Maaz et al., 2023)7B32.723.526.062.022.526.554.028.040.023.020.031.030.525.539.548.529.033.029.526.035.5
VideoLLaMA (Zhang et al., 2023)7B34.127.525.551.029.039.048.040.538.022.522.543.034.022.532.545.532.540.030.021.037.0
VideoChat (Li et al., 2023b)7B35.533.526.556.033.540.553.040.530.025.527.048.535.020.542.546.026.541.023.523.536.0
ST-LLM (Liu et al., 2024b)7B54.966.053.584.044.058.580.573.538.542.531.086.536.556.578.543.044.546.534.541.558.5
VideoChat2 (Li et al., 2024b)7B60.475.558.083.550.560.587.574.545.047.544.082.537.064.587.551.066.547.035.037.072.5
VideoChat-T7B59.983.568.580.544.061.071.084.035.548.056.587.046.056.578.049.559.046.037.040.066.5
", + "bbox": [ + 174, + 816, + 823, + 901 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 9: The full performance of VideoChat-T on MVBench. VideoChat-T still demonstrates strong performance, effectively prevents catastrophic forgetting caused by incremental fine-tuning.", + "bbox": [ + 169, + 902, + 823, + 931 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The performance of VideoChat-T on MVBench is shown in Table 9. Compared to VideoChat2, VideoChat-T only experienced a $0.5\\%$ accuracy loss. This indicates that our method effectively preserves the capabilities of the base model, preventing catastrophic forgetting caused by incremental fine-tuning. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2. For the Action Localization (AL) task, which requires the model to determine the coarse-grained temporal position of events, the test accuracy improved from $44.0\\%$ to $56.5\\%$ . This indirectly confirms that our method significantly enhances the model's temporal awareness capabilities.", + "bbox": [ + 169, + 103, + 826, + 213 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/9fbfb9620bae9079a0ba61060047fd8e6d129aaef5b0c2bd83deda49e64a4c6e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelLLM sizeOverall (%)Short Video (%)Medium Video (%)Long Video (%)
w/o subsw subsw/o subsw subsw/o subsw subsw/o subsw subs
ST-LLM (Liu et al., 2024b)7B37.942.345.748.436.841.431.336.9
Video-LLaVA (Lin et al., 2023a)7B39.941.645.346.138.040.736.238.1
ShareGPT4Video (Chen et al., 2024)8B39.943.648.353.636.339.335.037.9
Chat-UniVi-v1.5 (Jin et al., 2024)7B40.645.945.751.240.344.635.841.8
Qwen-VL-Chat (Bai et al., 2023)7B41.141.946.947.338.740.437.837.9
ShareGemini (Share, 2024)7B43.247.949.152.841.347.339.143.4
VideoChat2 (Li et al., 2024b)7B39.543.848.352.837.039.433.239.2
VideoChat-T7B46.355.853.359.943.854.041.953.4
", + "bbox": [ + 173, + 224, + 823, + 356 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 10: The full performance of VideoChat-T on VideoMME. VideoChat-T achieved significant performance improvements, particularly in the long video subset.", + "bbox": [ + 169, + 357, + 823, + 386 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The overall performance of our model on VideoMME is presented in Table 10. VideoChat-T achieved significant improvements on both evaluation benchmarks of VideoMME, which include watching videos only and videos with subtitles. The improvements are particularly notable in the long video subset.", + "bbox": [ + 169, + 393, + 823, + 450 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "F EXTRA ABLATION", + "text_level": 1, + "bbox": [ + 171, + 470, + 357, + 486 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "F.1 DOMAIN CORRELATION OF DATA", + "text_level": 1, + "bbox": [ + 171, + 503, + 442, + 517 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/d52dbcd29db220724ab1b19d9d0dfe3b2034156c2c9f9efc3f8c3682565afc72.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelCharades-STA(R@1 IOU=0.5)MVBench(avg)
VideoChat-T48.759.9
w/o STAR47.5 (-1.2)59.4 (-0.5)
", + "bbox": [ + 305, + 532, + 692, + 579 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 11: The performance changes of the model after removing STAR. Although the video sources of STAR may have some domain correlation with those of Charades-STA and MVBench, the performance of our model is minimally affected by STAR.", + "bbox": [ + 169, + 588, + 823, + 633 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We found that the video sources in the STAR dataset might have some domain correlation with the video sources in MVBench and Charades-STA. Therefore, we removed STAR from the training set while keeping other training settings consistent with the original. The performance on benchmarks where the video sources might have domain correlation is shown in Table 11. The model's accuracy on Charades-STA (R@1 IOU=0.5) decreased by $1.2\\%$ , and the average accuracy on MVBench decreased by $0.5\\%$ . This indicates that the domain correlation of video sources did not significantly impact performance for our model. Notably, after removing STAR, our normal data volume was reduced to approximately 36K. This implies that, with sufficiently parameter-efficient initialization and appropriate training strategies, using only a small amount of high-quality normal data is sufficient to retain the model's original capabilities.", + "bbox": [ + 169, + 648, + 826, + 790 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "F.2 DEeper INVESTIGATION OF THE PERFORMANCE DROP ON MVBENCH", + "text_level": 1, + "bbox": [ + 171, + 806, + 694, + 821 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We conducted a deeper investigation into the performance decline on MVBench. Through additional ablation experiments (as shown in Tabel 12), we identified two main factors contributing to the performance drop.", + "bbox": [ + 169, + 832, + 823, + 875 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Architectural Discrepancy: The original VideoChat2 model was designed to process only 16 frames, leading to a mismatch in the learned feature distribution compared to the architecture of VideoChatT. As shown in the first two rows of the table, increasing the input frame number for VideoChat2", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/e70e31bf2b744396da75b43e21554740075609040d00ca2f8013334baf836e66.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methodpost ft datadata sizeframe numtoken num (per frame)MVBatch(AVG)
VideoChat2--161260.4
VideoChat2--1281242.1
VideoChat-T (Common_Init)--128325.3
VideoChat-T (Ours)--128348.6
VideoChat-T (Ours)TimePro+Normal (Ours)0.43M128359.9
VideoChat-T (Ours)TimePro+FullVideoChat22M128362.9
", + "bbox": [ + 176, + 99, + 823, + 202 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 12: Performance of VideoChat2 and VideoChat-T on MVBench under different settings.", + "bbox": [ + 186, + 210, + 808, + 226 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "resulted in a significant performance drop (from 60.4 to 42.1). When initializing VideoChat-T with VideoChat2, performance was close to random (25.3) due to the newly introduced randomly initialized layers. By applying efficient initialization to these new layers, we partially recovered the original capabilities of the model, bringing the MVBench performance of the un-trained VideoChat-T back to 48.6, representing an improvement of 6.5 compared to the 128-frame VideoChat2. After further fine-tuning, the short-video processing capability of VideoChat-T improved significantly, reaching 59.9.", + "bbox": [ + 169, + 251, + 826, + 349 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Fine-tuning Data Discrepancy: We fine-tuned VideoChat-T using only 432K data, significantly less than the 2M non-grounded regular data used for training VideoChat2. The fine-tuning data for VideoChat2 primarily consisted of short videos of around ten seconds, which closely matched the length distribution of the MVBench evaluation videos, playing a crucial role in improving MVBench performance. To validate our hypothesis, we conducted additional experiments by training our VideoChat-T model using the TimePro and full VideoChat2 training data. It can be observed that VideoChat-T showed a slight improvement in performance on the MVBench dataset, achieving an accuracy of 62.9, which is an increase of 2.5 compared to the original VideoChat2.", + "bbox": [ + 169, + 356, + 823, + 467 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Based on the above, we can conclude the fundamental reasons affecting the model's foundational generalization capabilities. When a model undergoes adjustments, the learned original distribution may not perfectly match the new architecture, making the efficient initialization of new layers crucial. The features learned from the original dataset might be forgotten due to changes in various parameters. Utilizing a more comprehensive and diverse dataset for fine-tuning can restore and even further enhance performance.", + "bbox": [ + 169, + 474, + 823, + 559 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/2050e3f990134bcd5ee5f66999a9af1feccafb67a8f70c10c65785883179abb3.jpg", + "table_caption": [ + "F.3 ASSOCIATION BETWEEN PERFORMANCE AND MODEL DESIGN" + ], + "table_footnote": [], + "table_body": "
MethodFT DataCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME w/o subs
TimeChatTimeIT+Valley32.214.538.533.030.2
TimeChatTimePro+Normal34.216.341.638.933.4
VideoChat-TTimePro+Normal48.726.559.960.046.3
", + "bbox": [ + 207, + 603, + 790, + 672 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 13: Comparison of other model architectures trained on our dataset with our method, demonstrating the impact of the overall model structure design.", + "bbox": [ + 169, + 681, + 823, + 710 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To eliminate the influence of training data and auxiliary tasks, and to more clearly evaluate the association between performance and model design, we fine-tuned TimeChat using the full set of fine-tuning data and auxiliary tasks from VideoChat-T. Table 13 presents the performance of TimeChat, fine-tuned with our data, across five datasets. It can be observed that TimeChat, fine-tuned with our data, shows improvements across all benchmarks. However, its performance still lags significantly behind VideoChat-T. This indicates that an efficient fine-tuning architecture design and high-quality, diverse datasets are both essential and complementary.", + "bbox": [ + 169, + 728, + 826, + 825 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F.4 VALIDATION OF TRANSFERABILITY", + "text_level": 1, + "bbox": [ + 171, + 842, + 460, + 856 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To verify the robustness of our TimeSuite for other MLLMs, we transferred our method to Llava-OneVision (Li et al., 2024a). Table 14 shows the performance changes of Llava-OneVision after applying our TimeSuite. It can be seen that when we apply the full set of methods in TimeSuite to Llava-OneVision, the model's performance on two different long-video evaluation benchmarks", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/13ffe7f9c75f9998a7d1704ae8920b30bf5392370e2081b05de584158f20f245.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCharades-STA IOU0.5QVHighlight mapVideoMME w/o subsMLVU AvgMVBench Avg
Llava-OneVision (baseline)7.315.058.264.756.7
Llava-OneVision-T (Ours)42.521.761.469.456.1
", + "bbox": [ + 207, + 99, + 789, + 161 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "improves (+3.2 on VideoMME and +4.7 on MLVU), effectively demonstrating the robustness of our TimeSuite for different MLLMs.", + "bbox": [ + 169, + 241, + 823, + 268 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.5 EXPLORATIONS OF DATA CONFIGRATIONS OF TIMEPRO", + "text_level": 1, + "bbox": [ + 171, + 287, + 599, + 301 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/3790862ec03db5b7017f9227c34d426e2e1ad7cafeb723dbfdc0a261b24da217.jpg", + "table_caption": [ + "Table 14: Performance comparison of TimeSuite migration to other MLLMs. The application of our method shows a certain improvement in long video comprehension, demonstrating the transferability of our approach." + ], + "table_footnote": [], + "table_body": "
MethodMVBench AvgEgoschema FullVideoMME w/o subsCharades-STA IOU=0.5QVHighlight mAP
TimePro615K+Normal82K (old version)60.061.046.345.425.7
TimePro349K+Normal82K (Ours)59.960.046.348.726.5
", + "bbox": [ + 207, + 316, + 789, + 369 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 15: Comparison of different versions of our proposed TimePro. More data does not necessarily lead to higher overall performance, highlighting the importance of data quality.", + "bbox": [ + 169, + 378, + 823, + 409 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In the early version of TimePro, we employed datasets comprising 309K Multi-format Temporal Grounding instances, 150K Temporal Grounded Caption instances and other data. Through extensive experimentation (as shown in Tabel 15), we discovered that removing low-quality data while retaining high-quality instances could significantly reduce training time without compromising performance. Consequently, we pruned these two part datasets to 100K and 93K instances, respectively. The data distribution presented in the paper represents the optimized and relatively balanced configuration we arrived at.", + "bbox": [ + 169, + 426, + 823, + 525 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 545, + 316, + 560 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G.1 CAN THE OVERALL PERFORMANCE OF MLLMS BE ENHANCED BY CONTINUOUSLY INTEGRATING EXPERT TASKS?", + "text_level": 1, + "bbox": [ + 171, + 577, + 790, + 604 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "By appropriately fine-tuning the Multimodal Large Language Model (MLLM), we have developed a general MLLM with powerful zero-shot temporal grounding capabilities. Its performance, after fine-tuning on the training set of evaluation benchmarks, can rival the current state-of-the-art supervised expert models. Based on these results, we can boldly speculate whether it is possible to internalize the capabilities of expert models such as spatial grounding, tracking and detection (Zeng et al., 2023) into the MLLM itself, without using any external expert decoders, to enhance the comprehensive understanding performance of the MLLM and achieve a unified generalist MLLM for multiple tasks.", + "bbox": [ + 169, + 617, + 823, + 715 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Merlin (Yu et al., 2023) and VisionLLM (Wang et al., 2024b) have already attempted something similar, but its performance is limited by the reasoning capabilities and language representation bottlenecks of the LLM. There is still a significant gap between its performance and that of expert models for various tasks. We observed similar phenomena in our experiments. The temporal grounding task only requires outputting two timestamps, and the task format is relatively simple, so our model achieved good results. However, the highlight detection task requires outputting multiple discrete timestamps and their corresponding saliency scores. The model needs to accurately predict dozens of numbers in language form to answer the question correctly. Our model performed well only on data with fewer timestamps. Therefore, how to simplify the complex output format of expert tasks into the language representation of LLMs, or to design special processing procedures to simplify complex expert tasks, is a question worth exploring.", + "bbox": [ + 169, + 720, + 823, + 876 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Moreover, designing diverse data formats is also crucial for enhancing the expert capabilities of MLLMs. Compared to classic expert models, MLLMs have a natural advantage in task type diversity and can enhance their performance through various different variants tasks of a single capability.", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For temporal grounding tasks, we found that enhancing task diversity has a significant effect on improving the model's temporal perception generalization ability. We can boldly speculate that if there are sufficiently diverse training data task types, most tasks with relatively simple output formats can achieve results comparable to expert models through appropriate instruction fine-tuning.", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Through the integration of diverse expert tasks and the optimization of language representations, MLLMs can achieve substantial improvements in their overall capabilities. This allows them to effectively comprehend and address complex tasks, rivaling or even exceeding the performance of specialized expert models within specific domains. Looking ahead, MLLMs have the potential to evolve into highly versatile AI models, transcending traditional conversational and QA capabilities. They will be equipped to handle a wide range of complex expert tasks across various domains, such as vision, language, and reasoning.", + "bbox": [ + 169, + 166, + 826, + 265 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G.2 WHY DOES TEMPORAL GROUNDING DATA LEAD TO ACCURACY LOSS IN SHORT-TERM VIDEOS?", + "text_level": 1, + "bbox": [ + 169, + 297, + 808, + 327 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We conducted ablation experiments using different combinations of temporal grounding data and regular data. The accuracy of VideoChat-T on MVBench after fine-tuning with various data combinations is shown in Table 16.", + "bbox": [ + 169, + 344, + 823, + 387 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/de6e6aaab81cb54eefcf31bcb060a69ffd1354b82ac2e2170d6377afdfec2cf8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
FT DataMVBench (AVG)
TimeIT54.7
TimeIT+Normal55.3
Normal56.1
TimePro57.4
TimePro+Normal (Ours)59.9
", + "bbox": [ + 338, + 412, + 658, + 518 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 16: Performance VideoChat-T on MVBench under different fine-tuning data settings.", + "bbox": [ + 196, + 527, + 797, + 542 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The diversity of grounding data formats in the past has often been limited, which can lead to overfitting on Temporal Grounding tasks and cause the model to lose its general question-answering capability. We compared the TimeIT dataset proposed in TimeChat (Ren et al., 2024) with our TimePro dataset on MVBench. As shown in the Table 16, fine-tuning with only TimeIT resulted in the lowest accuracy, and the combined use of TimeIT+Normal also performed slightly worse than using Normal alone. This indicates that monotonous grounding data indeed damages the model's original performance (as shown in Figure 1 at the beginning of the paper, TimeChat loses some of its general question-answering capability after fine-tuning, where it outputs localization times for general questions).", + "bbox": [ + 169, + 575, + 823, + 702 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In contrast, our TimePro dataset includes diverse data, encompassing 9 different task types from 15 datasets, which helps mitigate the generalization loss caused by homogeneous grounding data types. Additionally, our dataset integrates Grounding with various general tasks. For instance, Grounded Caption requires detailed descriptions of corresponding video segments, while Reasoning Temporal Localization demands the model to reason about questions. This approach significantly enhances the model's generalization ability and minimizes the impact on its original capability (e.g., short video accuracy). As demonstrated in the Table 16, the performance of using only TimePro exceeds that of using Normal alone, and the combined use of TimePro and Normal far surpasses all other combinations. This also confirms that our TimePro effectively preserves the model's original performance.", + "bbox": [ + 169, + 708, + 826, + 847 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Overall, using a single type of expert task training data can easily lead to model overfitting, resulting in significant loss of the model's original capabilities. To preserve the model's foundational generalization abilities, it is essential to use diversified training data. Additionally, incorporating data of various types and distributions, such as text, images, and videos, can further enhance the model's generalization capabilities.", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G.3 COULD TRAINING THE MODEL ON BOTH TEMPORAL AND NON-TEMPORAL GROUNDING DATA MITIGATE PERFORMANCE LOSS IN SHORT-TERM VIDEOS?", + "text_level": 1, + "bbox": [ + 169, + 103, + 821, + 132 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To address this question, we conducted additional ablation experiments. By training VideoChat-T with different combinations of temporal and non-temporal grounding data, we were able to clearly observe the effects of both types of data on the model's performance. The results of the experiments are shown in the Table 17.", + "bbox": [ + 169, + 143, + 823, + 200 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/23e060406cc7ca5dd024e0fd494c5e7c33a333e7c1b025f0cab54e825a31606b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
FT DataMVBench AvgVideoMME w/o subsCharades-STA R1@0.5
Normal56.142.68.0
TimePro57.446.045.6
TimePro+Normal (Ours)59.946.348.7
", + "bbox": [ + 241, + 210, + 754, + 303 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Table 17: Performance comparison of VideoChat-T using different combinations of temporal grounding and non-temporal grounding data.", + "bbox": [ + 169, + 311, + 823, + 342 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "It can be observed that the combined use of TimePro+Normal for VideoChat-T achieves the highest performance in short video QA, long video QA, and temporal grounding tasks. This not only demonstrates that using both temporal grounding and non-temporal grounding data can reduce performance loss in short videos, but also reveals that the effects of temporal and non-temporal grounding data are complementary across various tasks. The distinct differences between temporal grounding and non-temporal grounding tasks can respectively compensate for the model's shortcomings in different task perspectives and feature distributions. The simultaneous use of both types of data can effectively enhance the model's overall capabilities.", + "bbox": [ + 169, + 359, + 826, + 472 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "H CASE STUDY", + "text_level": 1, + "bbox": [ + 171, + 492, + 318, + 508 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "H.1 MORE QUALITATIVE ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 523, + 429, + 539 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To further qualitatively analyze our model, we supplemented it with three types of examples. These examples are about long video QA, short video QA, and captioning tasks, all of which include temporal grounding.", + "bbox": [ + 169, + 550, + 823, + 594 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "More qualitative comparisons about long video QA are shown in Figure 6. VideoChat-T effectively handles various questions across different domains. By better perceiving the temporal relationships of different events occurring in long videos, it can more accurately and deeply understand the detailed content of the entire video.", + "bbox": [ + 169, + 599, + 823, + 656 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "More qualitative comparisons about short video QA are shown in Figure 7. VideoChat-T effectively retains the original capabilities of the base model. Through parameter-efficient initialization methods and appropriate training strategies, we minimize the damage to the base model's capabilities caused by new architectures and data.", + "bbox": [ + 169, + 662, + 823, + 720 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "More qualitative comparisons about captioning are shown in Figure 8. Although VideoChat2 describes more local details in some scenarios compared to VideoChat-T, VideoChat-T focuses more on a series of temporal events, which aligns better with how humans typically describe videos.", + "bbox": [ + 169, + 726, + 823, + 768 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "H.2 SHORTCOMINGS", + "text_level": 1, + "bbox": [ + 171, + 785, + 333, + 800 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We also conducted a qualitative analysis of the shortcomings of VideoChat-T through examples. As shown in Figure 9, VideoChat-T performs poorly on examples with complex logic. In the left example, although VideoChat-T accurately identified the timing of the event, it failed to fully explain the motivation behind the man opening the isolation door, which was \"to fight the hijackers of the space elevator, seize the controller, and thus save the people in the entire space elevator.\" In the right example, VideoChat-T correctly identified the event where Mr. Bean reached out to touch his desk mate's table, but it incorrectly explained the true reason for this action, which was \"to cover up the fact that he was copying his desk mate's exam by pretending to wipe dust off the desk.\"", + "bbox": [ + 169, + 811, + 825, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/e52d5f38eabd8a9e955e523265302051c3b150ffd1cb3d8bde4b9df1f3a4805f.jpg", + "image_caption": [ + "Figure 6: More qualitative comparisons in temporal grounding & long video QA." + ], + "image_footnote": [], + "bbox": [ + 173, + 99, + 488, + 426 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7a0c7dcb93ea05e6d55e7cd110f7f6731f43f222eef388781bf8d351bc78fe07.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 99, + 823, + 425 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/1c8ccced94116db04aa8157061d63ee3b4fc32ba71d140812c5ca8a41962a7a9.jpg", + "image_caption": [ + "Figure 7: More qualitative comparisons in temporal grounding & short video QA." + ], + "image_footnote": [], + "bbox": [ + 173, + 464, + 490, + 617 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/6ce4c7c9de45e48dbb9ea0a84be1a3868ed6615bd5840777217d3668c4e9131e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 464, + 823, + 618 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Due to the preponderance of single-turn, perceptual questions in our training data and the lack of multi-step reasoning data with complex logic, our model struggles to handle more challenging scenarios that demand intricate logical reasoning. To address this limitation, we propose constructing data in a chain-of-thought format to guide the model through multi-step reasoning, enabling it to delve deeper into the underlying motivations and causal relationships within a video.", + "bbox": [ + 169, + 670, + 826, + 742 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/80a2a36824b3311ed40441b8c36bf0418f18e8c27ac646dea3885618de83ec4b.jpg", + "image_caption": [ + "Figure 8: More qualitative comparisons in temporal grounding & captioning." + ], + "image_footnote": [], + "bbox": [ + 173, + 204, + 488, + 393 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/2778c174efb9e72fc415a2a83c66f0215f739af772831010d5f229d7adb68871.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 204, + 823, + 393 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/623ccbd51596cb694021ccac483f15bdda05f6f38d7caa766cd8ea5fdc15ce4a.jpg", + "image_caption": [ + "Figure 9: Examples of poor performance by VideoChat-T. While it accurately identifies the time of events, it struggles to answer questions that involve more complex logic." + ], + "image_footnote": [], + "bbox": [ + 173, + 637, + 478, + 777 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/d038dda1b056cb1d2be4292749a2aebf0529a81ae05fae500e01321ca9708227.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 637, + 821, + 775 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + } +] \ No newline at end of file diff --git a/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_model.json b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7b0524c38dd638194b4b977b8d44e292e645481f --- /dev/null +++ b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_model.json @@ -0,0 +1,3671 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "TIMESUITE: IMPROVING MLLMS FOR LONG VIDEO UNDERSTANDING VIA GROUNDED TUNING" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.155, + 0.792, + 0.171 + ], + "angle": 0, + "content": "Xiangyu Zeng\\(^{1,2}\\) Kunchang Li\\(^{3,2}\\) Chenting Wang\\(^{6,2}\\) Xinhao Li\\(^{1,2}\\) Tianxiang Jiang\\(^{5,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.171, + 0.714, + 0.186 + ], + "angle": 0, + "content": "Ziang Yan\\(^{4,2}\\) Songze Li\\(^{7,2}\\) Yansong Shi\\(^{5,2}\\) Zhengrong Yue\\(^{6,2}\\) Yi Wang\\(^{2,8}\\)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.186, + 0.473, + 0.201 + ], + "angle": 0, + "content": "Yali Wang\\(^{3,2}\\) Yu Qiao\\(^{2}\\) Limin Wang\\(^{1,2,\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.203, + 0.845, + 0.218 + ], + "angle": 0, + "content": "\\(^{1}\\)Nanjing University \\(^{2}\\)Shanghai AI Laboratory \\(^{3}\\)SIAT, Chinese Academy of Sciences \\(^{4}\\)Zhejiang University" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.218, + 0.797, + 0.232 + ], + "angle": 0, + "content": "\\(^{5}\\)University of Science and Technology of China \\(^{6}\\)Shanghai Jiao Tong University \\(^{7}\\)Fudan University" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.232, + 0.367, + 0.245 + ], + "angle": 0, + "content": "8 Shanghai Innovation Institute" + }, + { + "type": "list", + "bbox": [ + 0.184, + 0.203, + 0.845, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.249, + 0.632, + 0.264 + ], + "angle": 0, + "content": "XiangyuZeng2001@outlook.com lmwang@nju.edu.cn" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.281, + 0.446, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.468, + 0.296, + 0.821, + 0.437 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.441, + 0.825, + 0.497 + ], + "angle": 0, + "content": "Figure 1: VideoChat-T demonstrates high performance for both long-form video question answering and temporal grounding. Our TimeSuite presents a collection of new designs to enhance the long video understanding capability of MLLMs. It will implicitly endow the MLLM with ability of correctly attending the visual segments when generating answers, thus relieving the hallucinations." + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.513, + 0.547, + 0.527 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.542, + 0.768, + 0.903 + ], + "angle": 0, + "content": "Multimodal Large Language Models (MLLMs) have demonstrated impressive performance in short video understanding. However, understanding long-form videos still remains challenging for MLLMs. This paper proposes TimeSuite, a collection of new designs to adapt the existing short-form video MLLMs for long video understanding, including a simple yet efficient framework to process long video sequence, a high-quality video dataset for grounded tuning of MLLMs, and a carefully-designed instruction tuning task to explicitly incorporate the grounding supervision in the traditional QA format. Specifically, based on VideoChat, we propose our long-video MLLM, coined as VideoChat-T, by implementing a token shuffling to compress long video tokens and introducing Temporal Adaptive Position Encoding (TAPE) to enhance the temporal awareness of visual representation. Meanwhile, we introduce the TimePro, a comprehensive grounding-centric instruction tuning dataset composed of 9 tasks and 349k high-quality grounded annotations. Notably, we design a new instruction tuning task type, called Temporal Grounded Caption, to perform detailed video descriptions with the corresponding timestamps prediction. This explicit temporal location prediction will guide MLLM to correctly attend on the visual content when generating description, and thus reduce the hallucination risk caused by the LLMs. Experimental results demonstrate that our TimeSuite provides a successful solution to enhance the long video understanding capability of short-form MLLM, achieving improvement of \\(5.6\\%\\) and \\(6.8\\%\\) on the benchmarks of Egoschema and VideoMME, respectively. In addition, VideoChat-T exhibits robust zero-shot temporal grounding capabilities, significantly outperforming the existing state-of-the-art MLLMs. After fine-tuning, it performs on par with the traditional supervised expert models. Our code and dataset are available at https://github.com/OpenGVLab/TimeSuite." + }, + { + "type": "page_footnote", + "bbox": [ + 0.199, + 0.91, + 0.411, + 0.924 + ], + "angle": 0, + "content": "\\(^\\dagger\\) denotes the corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.338, + 0.119 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.14, + 0.827, + 0.281 + ], + "angle": 0, + "content": "Multimodal Large Language Models (MLLMs) have demonstrated impressive video understanding performance by following the general human instructions to interpret the visual content (Li et al., 2023b; Zhang et al., 2023; Lin et al., 2023a; Jin et al., 2024; Wang et al., 2024e). However, these MLLMs still struggle in long video understanding, as a long video sequence may contain various dynamic actions and complex temporal relationships, making it difficult for MLLMs to effectively locate the key segments related to questions. When humans watch long videos, their attention is consciously focused on prominent segments, which may occur within a few seconds. NExT-GQA (Xiao et al., 2024) has also verified the relevance of temporal grounding for accurately answering video QA tasks. Therefore, a natural question arises: Can we enhance long video understanding by using temporal grounding as a auxiliary task?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.286, + 0.828, + 0.496 + ], + "angle": 0, + "content": "Previously, some works have made progress in temporal grounding task by using general MLLMs. They often enhance the temporal grounding capability of video MLLMs by designing specialized modules and perform specific supervised fine-tuning (Ren et al., 2024; Huang et al., 2024a,b). However, these overly specialized designs significantly impair the general QA capabilities of video MLLMs, resulting in great performance drop on the video QA task (as illustrated by TimeChat in Figure 1). Meanwhile, current research on long video understanding primarily focuses on architecture design, such as long-context LLMs (Liu et al., 2024a) and token compression (Song et al., 2024a). They can only capture holistic semantics in videos without the ability of localizing fine-grained information, leading to poor performance in temporal grounding tasks (as illustrated by MovieChat in Figure 1). So far, it is still challenging to build a video MLLM that is good at both tasks of temporal grounding and long video QA. We argue long video understanding could be assisted by explicitly performing temporal grounding, as grounding supervision enables MLLM to establish the detailed correspondence between the visual segments and fine-grained semantics. This fine-grained alignment would guide the MLLM to attend correctly video segments when generating answers and thus relieve the hallucination risk caused by the LLM." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.501, + 0.827, + 0.752 + ], + "angle": 0, + "content": "Based on the above analysis, in this paper, we propose TimeSuite, a collection of new designs to improve the long video understanding capability of the existing short-form MLLMs, with a focus on incorporating grounding supervision in instruction tuning process. First, to address the high computational cost caused by the excessive number of visual tokens in long videos, we propose a simple Token Shuffle scheme to compress visual tokens, allowing the LLM to process more frame inputs. We also propose TAPE to generate adaptive position encodings, enhancing the temporal awareness of visual representations. The proposed structure does not introduce overly complex proprietary designs, which could be efficiently initialized with the parameters of short video MLLMs, without damaging the original performance of pre-trained MLLM. Second, to naturally incorporate the grounding ability into our MLLMs and yet still to preserve its original general QA capability, we design a new instruction tuning task, called Temporal Grounded Caption. This new task requires generating detailed segment-level description with corresponding timestamp prediction. Tuning on this new task will not only endow the MLLM with the extra grounding ability but also enhance its original long video QA performance, thanks to the requirement of building correspondence between grounded segments and detailed captions. Finally, we collect a comprehensive grounding-centric instruction tuning dataset for post-training our designed MLLMs, which is composed of 349K high-quality annotations covering 9 tasks. Based on this new dataset, we are able to perform grounded tuning with detailed captions on our proposed MLLMs (coined as VideoChat-T)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.758, + 0.828, + 0.926 + ], + "angle": 0, + "content": "We verify the effectiveness of TimeSuite design through extensive experiments on the tasks of long video understanding and temporal grounding. VideoChat-T demonstrates a significant improvement in accuracy over baseline for long video understanding, with a \\(5.6\\%\\) increase on Egoschema (Mangalam et al., 2023) and a \\(6.8\\%\\) increase on VideoMME (Fu et al., 2024). Additionally, VideoChat-T exhibits robust zero-shot temporal localization capabilities on Charades-STA (Gao et al., 2017) and QVHighlights (Lei et al., 2021a). Our VideoChat-T outperforms the state-of-the-art temporal grounding MLLM of TimeChat from \\(50\\%\\) to \\(100\\%\\) for different metrics. After fine-tuning on the training set of temporal grounding benchmarks, the performance of VideoChat-T is on par with the state-of-the-art supervised expert models. The experiments demonstrate that our VideoChat-T is the first end-to-end MLLM that is able to perform well on both temporal grounding and general video QA. In particular, we show that grounded tuning with explicit location prediction can facilitate the long video understanding and relieve the hallucination risk." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.347, + 0.119 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.142, + 0.827, + 0.351 + ], + "angle": 0, + "content": "Video MLLMs. With the advancement of open-sourced LLMs (Chiang et al., 2023; Touvron et al., 2023; Jiang et al., 2023), video MLLMs have emerged by utilizing projection bridges to link vision foundation models with LLMs (Li et al., 2023b; 2024b; Zhang et al., 2023; Li et al., 2024a). Limited by the training context length, thought these methods perform well with a small number of frame inputs, they meet significant challenges when processing long videos. The longer video length usually implies longer temporal relationships and more redundancies, resulting in the difficulty of extracting key clues (Zhou et al., 2024). Recently, several methods for long video handling have been proposed, such as exploiting long context LLM (Liu et al., 2024a; Zhang et al., 2024b; Xue et al., 2024; Wang et al., 2024d) and token compression (Li et al., 2023d; Song et al., 2024a; Zhang et al., 2024a) for enabling more visual inputs and agents for task decomposition or retrieval (Fan et al., 2024; Wang et al., 2024c;h). MovieChat (Song et al., 2024a) supports more frames by applying short-term and long-term memory to merge similar visual tokens. Yet, studies in learning objectives for long videos are less explored, making it difficult to alleviate the frequent hallucination of LLMs in long context reasoning. Our proposed TimeSuite leverages temporally-centric tasks to unlock the temporal perception potential of MLLMs, anchoring responses to the most relevant video segments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.379, + 0.828, + 0.547 + ], + "angle": 0, + "content": "Temporal Grounding. Temporal grounding is a fundamental capability in video understanding, associating semantics to specific clips with corresponding timestamps. Typical expert models (Lei et al., 2021b; Moon et al., 2023a;b; Lin et al., 2023b; Zeng et al., 2024) have been developed by formulating it into a timestamp regression from visual inputs and user queries. Most existing video MLLMs fail to address it compared with expert models, while some remedy its temporal grounding by specifically designed architectures and data (Huang et al., 2024a; Wang et al., 2024f; Li et al., 2024c; Wang et al., 2024g; Huang et al., 2024b; Qu et al., 2024). Timechat (Ren et al., 2024) binds visual features of images with timestamps and uses a sliding window to handle variable token length. From the perspective of training data, an instruction-tuning dataset TimeIT is constructed. Despite impressive improvements in temporal performance, these MLLMs still lag behind expert models and compromise general video dialogue capabilities. In this paper, we explore how to enhance the temporal grounding of MLLMs while preserving their original capabilities." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.58, + 0.285, + 0.594 + ], + "angle": 0, + "content": "3 METHOD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.825, + 0.704 + ], + "angle": 0, + "content": "In this section, we detail the proposed TimeSuite, a new collection of designs for improving short video MLLMs. Specifically, our TimeSuite includes a long video modeling framework, a high-quality video dataset for grounded tuning, and a carefully-designed instruction tuning task. With this new TimeSuite design, we are able to adapt the short-form video MLLM, obtaining significant performance improvements on two types of long video understanding tasks: traditional long video QA and temporal video grounding." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.733, + 0.319, + 0.746 + ], + "angle": 0, + "content": "3.1 VIDEOCHAT-T" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.825, + 0.835 + ], + "angle": 0, + "content": "We first describe the architecture of our proposed long video modeling framework. Specifically, built upon VideoChat2 (Li et al., 2024b), we devise long-video version of VideoChat-T. Our VideoChat-T is composed of a video backbone for extracting visual representations, a visual-language connector to compress visual tokens and bridge the visual and languages modalities, a LLM to follow human instructions to interpret the video content." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.827, + 0.926 + ], + "angle": 0, + "content": "The architecture of VideoChat-T is illustrated in Figure 2. Its workflow has three stages. In the first stage, long videos are evenly segmented into clips and the clips are embedded by the Video Encoder and Q-Former (Li et al., 2023a). Then, for compressing visual token number and highlighting crucial ones, token shuffling is employed to merge adjacent tokens, and TAPE is used to add temporal adaptive positional encodings. Finally, the compressed video token sequence is fed to the LLM to generate accurate responses that adhere to user requirements." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.827, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.313, + 0.825, + 0.385 + ], + "angle": 0, + "content": "Figure 2: Overall Architecture of VideoChat-T. First, long videos are segmented into clips, which are then transformed into feature embeddings by video encoder and time-aware Qformer. Next, all visual tokens undergo Token Shuffle to compress overly long tokens, and generate adaptive positional encodings through TAPE. Finally, the long video tokens are concatenated with the user query, serving as the input of LLM, thereby generating appropriate responses." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.41, + 0.367, + 0.423 + ], + "angle": 0, + "content": "3.1.1 BACKBONE DESIGN" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.531 + ], + "angle": 0, + "content": "Video clip encoding. For the given long video, we perform uniform sampling (Wang et al., 2019) to obtain \\( K \\times T \\) frames. We divide these frames into \\( K \\) video segments in chronological order, and sample \\( T \\) frames from each segment. Next, we use the video encoder and its visual-linguistic connector (Q-Former here) to encode each segment into \\( N \\) tokens. After the aforementioned processing, the entire video is encoded into a sequence of visual tokens, denoted by \\( \\mathbf{V}_q \\in \\mathbb{R}^{L \\times C_q} \\), where \\( C_q \\) is the dimension of output token by the Q-Former and \\( L = K \\times N \\) is the total number of tokens for the entire video." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.825, + 0.607 + ], + "angle": 0, + "content": "Large Language Model. According to previous research, images and visual cues are projected into the same feature space of the LLM. The LLM acts as an interaction interface in the MLLMs, being used to process multimodal inputs, parse user instructions, and generate appropriate responses. To afford the processing of long video sequence, we need to design an efficient compression module between the visual encoder and LLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.623, + 0.473, + 0.637 + ], + "angle": 0, + "content": "3.1.2 VL-CONNECTOR:TOKEN SHUFFLE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.825, + 0.732 + ], + "angle": 0, + "content": "The increased number of sampled frames in long videos leads to a larger number of encoded visual tokens, causing a significant rise in the computational complexity and memory consumption of LLMs. Therefore, it is crucial to keep the number of visual tokens within an acceptable range. Some works have proposed various token compression schemes, such as clustering (Jin et al., 2024) and pooling (Huang et al., 2024b). However, clustering methods often struggle to maintain the temporal consistency, and pooling methods usually result in a certain loss of overall performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.737, + 0.825, + 0.858 + ], + "angle": 0, + "content": "To address this, we propose a simple token shuffling compression scheme that ensures the temporal consistency of video tokens before and after compression while avoiding excessive performance loss. Previous methods often used a projector to achieve dimensional conversion. However, projecting visual encoding vectors from low to high dimensions does not increase information density. Therefore, we propose to rearrange multiple visual tokens along the channel dimension. Specifically, for the long video \\(\\mathbf{V}_q = [v_q^1,v_q^2,\\dots,v_q^L ]\\in \\mathbb{R}^{L\\times C_q}\\), we concatenate \\(m\\) adjacent tokens along the channel dimension to obtain the reshaped visual feature \\(\\mathbf{V}_m = [v_m^1,v_m^2,\\dots,v_m^{\\frac{L}{m}}]\\in \\mathbb{R}_{\\frac{L}{m}}^{\\times mC_q}\\) where each merged token \\(v_{m}^{i}\\) is represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.27, + 0.862, + 0.724, + 0.884 + ], + "angle": 0, + "content": "\\[\nv _ {m} ^ {i} = \\operatorname {C o n c a t} \\left(v _ {q} ^ {(i - 1) * m + 1}, v _ {q} ^ {(i - 1) * m + 2}, \\dots , v _ {q} ^ {i * m}\\right) \\quad \\forall i = 1, 2, \\dots , \\frac {L}{m}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.894, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Next, a linear projection layer is applied to the merged visual feature \\(\\mathbf{V}_m\\), generating the visual token sequences \\(\\mathbf{V}_l \\in \\mathbb{R}^{\\frac{L}{m} \\times C_l}\\) as input into the LLM, where \\(C_l\\) represents the token channel di" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.189 + ], + "angle": 0, + "content": "mension of the LLM. This scheme effectively reuses the projector of base model by replicating the original linear layer parameters \\( m \\) times along the channel dimension, achieving an initialization equivalent to mean pooling with a window length of \\( m \\). This design avoids introducing additional randomly initialized parameters that might disturb the original model, thus preserving the its original capabilities. Additionally, compared to directly using pooling, this method offers higher flexibility for fine-tuning to achieve better results (see ablation study, Table 4)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.204, + 0.54, + 0.218 + ], + "angle": 0, + "content": "3.1.3 TEMPORAL ADAPTIVE POSITION ENCODING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.227, + 0.825, + 0.299 + ], + "angle": 0, + "content": "To bind temporal positional information to visual tokens, we propose an adapter called Temporal Adaptive Position Encoding (TAPE). Inspired by CPVT (Chu et al., 2021), our TAPE uses zero padding at both ends of the convolution as anchors, and gradually transmits relative positional encoding information. Without the need to add any special time tokens, TAPE can automatically perceive the relative temporal positions of the token sequence and generate temporal embeddings." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.304, + 0.827, + 0.458 + ], + "angle": 0, + "content": "Specifically, the long video token sequence \\(\\mathbf{V}_q\\) is first compressed in the channel dimension by a linear layer and further compressed in sequence length by a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different resolutions. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence (Chu et al., 2021). Then, we progressively upsample and restore the temporal feature sequences from short to long, using residual connections to retain temporal features at different scales. Finally, the temporal feature sequences are restored to the same length as \\(\\mathbf{V}_l\\) and aligned in the channel dimension by a linear layer, thereby obtaining the temporal features \\(\\mathbf{V}_t\\) output by the TAPE. For detailed implementation of TAPE, please refer to Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Our proposed TAPE offers a plug-and-play module, which could be easily integrated into the network structure via residual connections, adding temporal position information to video tokens without disrupting the distribution of other trainable parameters. With appropriate training strategies, TAPE effectively preserves the model's generalization capabilities and enhances its temporal sensitivity (see ablation study, Table 3), which is important for temporal grounding task." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.551, + 0.598, + 0.565 + ], + "angle": 0, + "content": "3.2 TIMEPRO:TEMPORAL GROUNDED INSTRUCTION DATA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.577, + 0.825, + 0.661 + ], + "angle": 0, + "content": "Traditional temporal grounding datasets only contain monotonous ground truth, i.e., the start and end times of the target period. This data format performs well in training the classic expert models, but is difficult to unleash the potential of LLMs. Although several temporal grounding-centric datasets have been released for MLLM fine-tuning (Ren et al., 2024; Huang et al., 2024b), they still have deficiencies in data quantity, data quality, and task diversity. Thus, it is necessary to build a more comprehensive temporal dataset designed for the tuning of MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.825, + 0.738 + ], + "angle": 0, + "content": "Based on the criteria of diversity, length, and difficulty, we collect and clean several existing high-quality grounding-centric datasets (Ren et al., 2024; Huang et al., 2024a,b), and create two new datasets, resulting in the TimePro. Compared to previous temporal grounding-centric datasets, TimePro offers a larger volume of data, a broader distribution, and a higher task diversity, facilitating the learning of more generalizable temporal representations for MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.744, + 0.827, + 0.926 + ], + "angle": 0, + "content": "As shown in Figure 3(a), TimePro contains 9 task types from 15 datasets that are highly relevant to temporal grounding, containing approximately 349K high-quality temporal grounding annotations. The 9 tasks are specified as follows. Temporal Video Grounding involves identifying the start and end times of video content based on a natural language query (Anne Hendricks et al., 2017; Oncescu et al., 2021; Zala et al., 2023). Dense Video Captioning requires detecting events within a video and providing corresponding timestamps and descriptions (Krishna et al., 2017; Huang et al., 2020; Zhou et al., 2018). Video Summarization focuses on determining key frames or clips in the form of timestamps rather than semantic summaries (Song et al., 2015; Gygli et al., 2014). Step Localization aims to segment and describe important steps in a long video (Tang et al., 2019; Zala et al., 2023). Transcribed Speech Generation predicts speech content and its timestamps from visual signals (Zellers et al., 2022). Reasoning Temporal Localization combines timestamps with explanatory answers (Huang et al., 2024b). Multi-format Temporal Grounding includes single-turn and multi-turn dialogues with diverse question types (Huang et al., 2024a). Highlight" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.101, + 0.488, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.273, + 0.275, + 0.382, + 0.286 + ], + "angle": 0, + "content": "(a) Tasks of TimePro" + }, + { + "type": "image", + "bbox": [ + 0.491, + 0.102, + 0.792, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.275, + 0.744, + 0.287 + ], + "angle": 0, + "content": "(b) Details of Temporal Grounded Caption" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.298, + 0.825, + 0.355 + ], + "angle": 0, + "content": "Figure 3: (a) The proposed temporal centric instruction-tuning dataset, TimePro. This dataset contains approximately 349K high-quality and strongly temporally correlated data. (b) The proposed Temporal Grounded Caption fine-tuning data paradigm. It effectively reducing the occurrence of hallucinations. We employ a 4-stage processing pipeline to ensure the quality of the generated data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Detection identifies the most significant moments in a video based on a query (Lei et al., 2021a). Temporal Grounded Caption uses a brief scene title to output both the time period and fine-grained description for the scene. More detailed information about TimePro is available in the appendix B. It should be noted that Temporal Grounded Caption is our newly-designed task that can help our model to establish fine-grained correspondence between visual segment and linguistic description." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.48, + 0.49, + 0.493 + ], + "angle": 0, + "content": "3.3 TEMPORAL GROUNDED CAPTION TASK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.508, + 0.825, + 0.62 + ], + "angle": 0, + "content": "Some studies have shown that MLLMs often exhibit severe hallucinations when dealing with fine-grained perception tasks (Ji et al., 2023; Huang et al., 2023; Golkar et al., 2023). Since our VideoChat-T directly regresses the timestamps corresponding to the text queries using MLLMs, it is more susceptible to hallucinations compared to methods that use external expert models as decoders (Wu et al., 2024). By forcing the video MLLMs to predict the event occurrence time and simultaneously describe the visual content evidence, we attempt to anchor these queries to the relevant time segments within the video, rather than generating hallucinations originating from LLM itself. Based on this analysis, we design the Temporal Grounded Caption task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.827, + 0.738 + ], + "angle": 0, + "content": "The top of Figure 3(b) illustrates the definition of Temporal Grounded Caption. We use a brief scene title of the video segment as the query, requiring the model to simultaneously respond with the precise start and end times of the video segment and provide a detailed description of that segment. While the content in the scene title may leak into the detailed caption response, most of the missing detailed information must be correctly described by attending the corresponding segment. Moreover, temporal grounding and detailed captioning can serve as regularization task for each other, preventing caption model from hallucinations from unrelated visual or linguistic contexts and helping grounding model to regress the timestamp more accurately." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.744, + 0.827, + 0.925 + ], + "angle": 0, + "content": "The process for collecting our Temporal Grounded Caption data is described at the bottom of Figure 3(b). In the first stage, we use a detailed caption dataset with timestamps as our data source. We remove data with target grounding time intervals that are too short or too long and ensure that the scenes in the video are as diverse as possible. In the second stage, we use a LLM to summarize scene titles. To prevent excessive semantics of video segments from being leaked from the query to the MLLM, we try to retain the minimal subset of key features that are sufficient to distinguish the video segments. In the third stage, to avoid overly similar or identical content appearing at different temporal intervals in the video, we perform similarity filtering on the data annotations. Based on the scene titles and video features, we calculate the similarity between different segments of the same video and remove data with excessively high similarity. In the fourth stage, we randomly sample the generated data and manually assess its quality. Based on human feedback, we refine the threshold parameters for data filtering used in the first three stages to yield the final Temporal Grounded Caption dataset. This new dataset plays an important role in our grounded tuning." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.102, + 0.825, + 0.287 + ], + "angle": 0, + "content": "
MethodLLM SizeCharades-STAQVHighlight
R@1(IOU=0.3)R@1(IOU=0.5)R@1(IOU=0.7)mAPHIT@1
MovieChat (Song et al., 2024a)7B8.82.91.311.716.1
GroundingGPT (Li et al., 2024c)7B-29.611.9--
VTimeLLM (Huang et al., 2024a)7B51.027.511.4--
HawkEye (Wang et al., 2024f)7B50.631.414.5--
TimeChat (Ren et al., 2024)7B-32.213.414.523.9
ChatVTG (Qu et al., 2024)7B52.733.015.9--
VideoChat2 (Li et al., 2024b)7B9.63.41.413.418.6
VideoChat-T7B69.9 (+60.3)48.7 (+45.3)24.0 (+22.6)26.5 (+13.1)54.1 (+35.5)
QD-DETR※ (FT) (Moon et al., 2023b)--57.332.638.964.2
UnLoc-L※ (FT) (Yan et al., 2023)--60.838.4--
HawkEye (FT) (Wang et al., 2024f)7B72.558.328.8--
Timechat (FT) (Ren et al., 2024)7B-46.723.721.737.9
VideoChat-T (FT)7B79.467.143.027.055.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.297, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Table 1: Performance of VideoChat-T on temporal grounding and highlight detection tasks. (FT) indicates the model fine-tuned on training set of the evaluation benchmark, with the respective text marked in gray. Classic supervised expert models are marked with \\(\\text{※}\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.351, + 0.329, + 0.366 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.383, + 0.409, + 0.396 + ], + "angle": 0, + "content": "4.1 IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.408, + 0.827, + 0.534 + ], + "angle": 0, + "content": "Built upon VideoChat2, we use UMT-L (Li et al., 2023c) and Mistral-7B (Jiang et al., 2023) as the video encoder and LLM, respectively. Except for the TAPE, all components are initialized from the pre-trained model of VideoChat2-Mistral. For the TAPE, we use random initialization, set the initial values of the final linear layer to zero, and freeze it during the first epoch of training. We set the frame count \\( T \\) for each clip to 8, so the number of clips \\( K \\) for a long video is equal to the total frame count divided by \\( T \\). We fine-tune the model for 3 epochs using the TimePro with 349K instances and a general QA task dataset with 82K instances. To ensure the stability of model training, we use 192-frame input for the first epoch. In the second and third epochs, we unfreeze the TAPE and adjust the model input to 128 frames. All experiments are conducted on 16 A100 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.551, + 0.521, + 0.565 + ], + "angle": 0, + "content": "4.2 PERFORMANCE ON TEMPORAL GROUNDING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.577, + 0.825, + 0.648 + ], + "angle": 0, + "content": "We evaluate our method using two commonly used temporal localization tasks, i.e., Temporal Grounding and Highlight Detection. The performance comparison between VideoChat-T and other models is shown in Table 1. Our method's zero-shot performance surpasses all previous LLM-based methods and after fine-tuning, VideoChat-T even exceeds some classic expert models on the temporal grounding task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.653, + 0.827, + 0.752 + ], + "angle": 0, + "content": "Temporal Grounding. This task aims to identify the start and end timestamps of the video content described by the query sentence, using Charades-STA as the evaluation benchmark. VideoChat-T achieves an accuracy of 48.7 in the R@1 (IOU=0.5) metric, significantly surpassing the previous state-of-the-art MLLM method, namely TimeChat, by 16.5 points. Additionally, it outperforms the fine-tuned version of TimeChat on the training set of the evaluation benchmark by \\(2.0\\%\\). Furthermore, the performance of VideoChat-T fine-tuned on the evaluation benchmark training set reaches 67.1 R@1 at IoU=0.5, surpassing most state-of-the-art classic supervised expert models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Highlight Detection. We use QVHighlights as the evaluation benchmark. For a given query, this task requires outputting all timestamps of highlight moments and their corresponding saliency scores. Since there could be many sparse highlight moments in a video, this task requires finer-grained video understanding at the frame level. VideoChat-T achieves mAP of 26.5, significantly surpassing the previous MLLM method of TimeChat by 13.0 points, and also outperforms its finetuned version by 4.8 points. We observe that after fine-tuning on the corresponding training set, VideoChat-T shows almost no performance improvement. This may be due to the bottleneck in language representation of LLMs. The Highlight Detection task requires outputting a (timestamp, saliency score) pair for each highlight moment, and a video may contain dozens of discrete highlight moments, making it challenging for the model to correctly respond with dozens to hundreds of numbers in a language format. The precise numerical salience score output is very difficult for LLMs, and VideoChat-T can only respond well to queries with fewer highlight moments. Due to the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.102, + 0.825, + 0.26 + ], + "angle": 0, + "content": "
MethodLLM SizeLong VideoShort Video
EgoschemaVideoMMEMVbench
SubsetFullw/o subsw/o subs (Long)Avg
VideoAgent (Wang et al., 2024c)GPT-460.254.1---
VideoAgent (Fan et al., 2024)GPT-462.8----
TimeChat (Ren et al., 2024)7B-33.030.226.138.5
LLAMA-Vid (Li et al., 2023d)7B-38.5--41.9
MovieChat (Song et al., 2024a)7B-53.538.233.455.1
MovieChat+ (Song et al., 2024b)7B-56.4---
Chat-UniVi (Jin et al., 2024)7B--40.635.8-
VideoChat2 (Li et al., 2024b)7B63.654.439.533.260.4
VideoChat-T7B68.4 (+4.8)60.0 (+5.6)46.3 (+6.8)41.9 (+8.7)59.9 (-0.5)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.269, + 0.825, + 0.312 + ], + "angle": 0, + "content": "Table 2: Performance of VideoChat-T and other methods on video question answering tasks. By upgrading VideoChat2 with TimeSuite, VideoChat-T demonstrates significant improvements across multiple long video benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.353 + ], + "angle": 0, + "content": "specific architectural design, classic supervised expert models have a natural advantage in handling such tasks, and VideoChat-T still has a performance gap compared to expert models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.369, + 0.502, + 0.384 + ], + "angle": 0, + "content": "4.3 PERFORMANCE ON GENERAL VIDEO QA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.395, + 0.825, + 0.452 + ], + "angle": 0, + "content": "In addition to test the grounding ability of our VideoChat-T, we also want to verify its general video question answering performance. According to mainstream evaluation standards, we use both long video and short video QA to assess the general video understanding capability of VideoChat-T. Table 2 shows the performance of VideoChat-T on the video QA evaluation benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.827, + 0.64 + ], + "angle": 0, + "content": "Long Video QA. We use Egoschema (Mangalam et al., 2023) and VideoMME (Fu et al., 2024) to evaluate the long video capabilities of VideoChat-T. In conjunction with our proposed architectural improvements, we incremental fine-tune VideoChat2 using only 432K data points. VideoChat-T demonstrates outstanding performance on the Egoschema, achieving an accuracy of \\(68.4\\%\\) on the test subset and \\(60.0\\%\\) on the entire test set. Compared to VideoChat2, VideoChat-T obtains improvements of \\(4.8\\%\\) and \\(5.6\\%\\) on the subset and the full test set, respectively. Additionally, for the VideoMME benchmark, VideoChat-T achieves an accuracy of \\(46.3\\%\\) by solely analyzing the visual content without using subtitles, representing a \\(6.8\\%\\) improvement over VideoChat2. On the long video data division of VideoMME, VideoChat-T achieves an accuracy of \\(41.9\\%\\), which is an \\(8.7\\%\\) improvement compared to VideoChat2. The upgraded VideoChat-T demonstrated significant performance improvements on long video QA benchmarks. This indicates the potential of leveraging grounding-centric video tasks to enhance the temporal awareness of MLLMs, thereby further improving long video understanding capabilities." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.645, + 0.827, + 0.827 + ], + "angle": 0, + "content": "Short Video QA. We use MVBench (Li et al., 2024b) to evaluate the general short video understanding capabilities of VideoChat-T. VideoChat-T achieves an overall average accuracy of \\(59.9\\%\\) on MVBench, which is a \\(0.5\\%\\) decrease compared to VideoChat2. It is important to note that achieving minimal performance loss is a challenging task. According to previous experiences in the field of incremental learning (Van de Ven et al., 2022), models inevitably forget old knowledge while learning new knowledge. VideoChat2 is fine-tuned with 2M data, whereas VideoChat-T is fine-tuned with only 432K data, where 349K annotations are temporal grounding centric, resulting in only a \\(0.5\\%\\) accuracy loss. Previous temporal MLLMs like TimeChat (Ren et al., 2024), although achieving strong temporal localization capabilities, yield much weaker general video QA capability, with an accuracy of only \\(38.5\\%\\) on MVBench. This demonstrates that the design of our TimeSuite enhances new capabilities for the model while still preserving the original general video understanding capabilities. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.843, + 0.385, + 0.858 + ], + "angle": 0, + "content": "4.4 QUALITATIVE ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Figure 4 presents a qualitative comparison between our model and other methods. In the example on the left, VideoChat-T is capable of answering more complex long video reasoning questions. Our model accurately identifies the temporal location of the \"light a cigarette\" event and determines the correct key clue \"the person in a white coat\" based on the video content. This leads to the inference" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.102, + 0.49, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.102, + 0.824, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.279, + 0.825, + 0.323 + ], + "angle": 0, + "content": "Figure 4: Qualitative comparison between VideoChat-T and other methods. VideoChat-T not only possesses temporal fine-grained perception capabilities but also can perform accurate long video reasoning. Green text indicates correct answers, while red text indicates inappropriate answers." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.329, + 0.487, + 0.38 + ], + "angle": 0, + "content": "
ModelEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
VideoChat-T (Ours)60.046.348.754.1
w/o TAPE59.145.947.150.4
w/o frz59.045.252.453.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.385, + 0.489, + 0.456 + ], + "angle": 0, + "content": "Table 3: Performance results of the ablation study on the TAPE. Here, w/o adapter refers to removing our proposed TAPE, and w/o frz refers to not using the training method where the TAPE is frozen during the first epoch." + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.329, + 0.822, + 0.389 + ], + "angle": 0, + "content": "
ModelEgoschema FullVideoMME w/o subsCharades-STA R@ 1 IOU=0.5QVHighlights Hit@1
VideoChat-T(Ours)60.046.348.754.1
r/w pooling59.844.840.347.3
r/w clustering59.545.039.840.1
w/o init57.443.442.053.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.506, + 0.394, + 0.825, + 0.464 + ], + "angle": 0, + "content": "Table 4: Performance results of the ablation study on the Token Shuffle. Here, r/w refers to replacing Token Shuffle with the other component, and w/o init refers to removing the efficient initialization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.477, + 0.825, + 0.534 + ], + "angle": 0, + "content": "that \"playing the piano very fast and pressing the keys very hard\" are the true reasons. The example on the right demonstrates our model's fine-grained perception ability. The appearance of \"money in the briefcase\" is very brief, and most models easily overlook this detail. Thanks to its strong fine-grained perception ability, our model precisely captures this visual content." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.551, + 0.343, + 0.565 + ], + "angle": 0, + "content": "4.5 ABLATION STUDY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.576, + 0.825, + 0.73 + ], + "angle": 0, + "content": "Role of TAPE. To verify the performance improvement brought by TAPE, ablation experiments were conducted. Table 3 lists the performance results of the conducted adapter-related ablation experiments. It can be observed that when the TAPE is removed, the model's performance on long video understanding and temporal grounding benchmarks decreases. TAPE can adaptively embed positional encodings into video tokens, and the absence of TAPE leads to a certain loss in temporal awareness capability. When we unfroze the TAPE in the first epoch, the performance improved on the temporal grounding task but declined on the long video QA task. This is because the TAPE is highly suited for tasks with strong temporal dependencies. If unfrozen too early, the model may become biased towards fitting temporal grounding tasks. Freezing the TAPE during the first epoch allows the model to first optimize and learn a relatively generalized feature representation, thereby balancing the performance across different tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.736, + 0.825, + 0.876 + ], + "angle": 0, + "content": "Effectiveness of Token Shuffle. To verify the effectiveness of token shuffle, we conducted ablation experiments. Table 4 presents the results of these ablation experiments. We compared token shuffle with conventional methods such as pooling and clustering, and also observed the results after removing efficient initialization. When we replaced token shuffle with pooling or clustering methods, the model's performance declined. This is because the efficient initialization of the linear layer in token shuffle makes the initial values of the module equivalent to average pooling, which gradually optimizes better solutions during training. Therefore, our method is inherently superior to pooling. On the other hand, clustering often fails to maintain the spatial/temporal consistency of the video, leading to temporal confusion. When we removed the efficient initialization of the linear layer, the negative impact of random initialization severely damaged the model's original performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Effect of TimePro. We conducted ablation studies to evaluate the effectiveness of the TimePro data components. As shown in Table 5, by gradually adding subsets of TimePro, we observed the model's performance changes across various temporal grounding-centric instruction-tuning data. As we pro" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.101, + 0.789, + 0.198 + ], + "angle": 0, + "content": "
NormalTimeITTGCHDMTGRTLEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
56.642.68.024.4
57.843.632.225.2
58.344.039.133.9
59.844.941.943.8
60.045.145.848.3
60.046.348.754.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.207, + 0.825, + 0.264 + ], + "angle": 0, + "content": "Table 5: Performance results of the ablation study on different components of TimePro. We use 82K normal training data as the baseline. TimeIT refers to the training data with five task types from Ren et al. (2024), TGC refers to Temporal Grounded Caption, HD refers to Highlight Detection, MTG refers to Multi-format Temporal Grounding, and RTL refers to Reasoning Temporal Localization." + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.274, + 0.398, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.404, + 0.273, + 0.596, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.602, + 0.274, + 0.792, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.445 + ], + "angle": 0, + "content": "Figure 5: Performance of VideoChat-T with varying input frame numbers. As the number of input frames increases, the performance of VideoChat-T shows an upward trend in both long video QA and temporal grounding tasks. Due to the over low temporal grounding performance of VideoChat2, its curve is omitted." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.825, + 0.515 + ], + "angle": 0, + "content": "gressively added subsets of TimePro, not only did the model's performance on temporal grounding tasks show a stable and significant improvement, but we also observed a noticeable upward trend in performance on long video benchmarks. This to some extent corroborates that temporal grounding centric tasks have a positive impact on long video understanding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.522, + 0.827, + 0.648 + ], + "angle": 0, + "content": "Impact of frames. To investigate the impact of input frame count on model performance, we conducted an ablation study. Figure 5 illustrates the scalability of our model's performance with respect to input frame count. VideoChat-T demonstrates good stability as the input frame count varies, and its performance in long video QA and temporal grounding tasks improves with an increase in frame count. In contrast, the baseline model, VideoChat2, exhibited catastrophic performance degradation when the frame count was significantly increased. As the input frame count increases, the number of visual encoding tokens grows linearly. Excessive visual token input imposes an additional computational burden on the temporal modeling of the LLM. TimeSuite mitigates this by employing Token Shuffle to reduce the number of tokens, ensuring the stable operation of the model." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.667, + 0.321, + 0.683 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.698, + 0.827, + 0.895 + ], + "angle": 0, + "content": "In this paper, we have introduced TimeSuite, a collection of new designs from perspectives of efficient architecture, high-quality data, and new instruction tuning task, to achieve long video understanding by fine-tuning short video MLLMs with temporal grounding-centric data. We address the computational challenges of processing long videos by introducing token shuffle to compress visual tokens. We also propose the TAPE for adaptive position encoding, enhancing the temporal awareness of visual representation. Additionally, our designed Temporal Grounded Caption training task ensures MLLMs to build correspondence between grounded segments and detailed caption, while the TimePro dataset provides comprehensive instruction tuning data for learning more effective temporal perception capability. Experimental results demonstrate that VideoChat-T significantly improves long video understanding, with notable performance gains on Egoschema and VideoMME. Furthermore, VideoChat-T exhibits strong zero-shot temporal grounding capabilities, significantly outperforming the previous MLLMs on temporal grounding. Overall, our TimeSuite provides effective designs for short MLLMs to enhance their performance on temporal grounding and long video QA. We hope our TimeSuite could yield some insights on designing long video MLLMs." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.362, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.133, + 0.827, + 0.19 + ], + "angle": 0, + "content": "This work is supported by the National Key R&D Program of China (No. 2022ZD0160900), the Fundamental Research Funds for the Central Universities (No. 020214380119), Jiangsu Frontier Technology Research and Development Program (No. BF2024076), and the Collaborative Innovation Center of Novel Software Technology and Industrialization." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.209, + 0.289, + 0.224 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.232, + 0.826, + 0.276 + ], + "angle": 0, + "content": "Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pp. 5803-5812, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.283, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.332, + 0.825, + 0.375 + ], + "angle": 0, + "content": "Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.381, + 0.825, + 0.439 + ], + "angle": 0, + "content": "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with \\(90\\%\\) chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.446, + 0.825, + 0.476 + ], + "angle": 0, + "content": "Xiangxiang Chu, Zhi Tian, Bo Zhang, Xinlong Wang, and Chunhua Shen. Conditional positional encodings for vision transformers. arXiv preprint arXiv:2102.10882, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.482, + 0.825, + 0.525 + ], + "angle": 0, + "content": "Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. arXiv preprint arXiv:2403.11481, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.532, + 0.825, + 0.576 + ], + "angle": 0, + "content": "Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.582, + 0.825, + 0.625 + ], + "angle": 0, + "content": "Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pp. 5267-5275, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.632, + 0.825, + 0.676 + ], + "angle": 0, + "content": "Siavash Golkar, Mariel Pettee, Michael Eickenberg, Alberto Bietti, Miles Cranmer, Geraud Krawezik, Francois Lanusse, Michael McCabe, Ruben Ohana, Liam Parker, et al. xval: A continuous number encoding for large language models. arXiv preprint arXiv:2310.02989, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.682, + 0.825, + 0.725 + ], + "angle": 0, + "content": "Michael Gygli, Helmut Grabner, Hayko Riemenschneider, and Luc Van Gool. Creating summaries from user videos. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pp. 505-520. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.732, + 0.825, + 0.775 + ], + "angle": 0, + "content": "Bin Huang, Xin Wang, Hong Chen, Zihan Song, and Wenwu Zhu. Vtimellm: Empower llm to grasp video moments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14271-14280, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.782, + 0.825, + 0.825 + ], + "angle": 0, + "content": "De-An Huang, Shijia Liao, Subhashree Radhakrishnan, Hongxu Yin, Pavlo Molchanov, Zhiding Yu, and Jan Kautz. Lita: Language instructed temporal-localization assistant. arXiv preprint arXiv:2403.19046, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.832, + 0.825, + 0.862 + ], + "angle": 0, + "content": "Gabriel Huang, Bo Pang, Zhenhai Zhu, Clara Rivera, and Radu Soricut. Multimodal pretraining for dense video captioning. arXiv preprint arXiv:2011.11760, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.868, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. arXiv preprint arXiv:2311.05232, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.232, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Ye Jin Bang, Andrea Madotto, and Pascale Fung. Survey of hallucination in natural language generation. ACM Computing Surveys, 55(12):1-38, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.263 + ], + "angle": 0, + "content": "Peng Jin, Ryuichi Takanobu, Wancai Zhang, Xiaochun Cao, and Li Yuan. Chat-univi: Unified visual representation empowers large language models with image and video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13700-13710, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.271, + 0.826, + 0.314 + ], + "angle": 0, + "content": "Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pp. 706-715, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.322, + 0.826, + 0.353 + ], + "angle": 0, + "content": "Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.36, + 0.826, + 0.39 + ], + "angle": 0, + "content": "Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.397, + 0.826, + 0.44 + ], + "angle": 0, + "content": "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.448, + 0.826, + 0.492 + ], + "angle": 0, + "content": "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.499, + 0.826, + 0.542 + ], + "angle": 0, + "content": "KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.551, + 0.826, + 0.595 + ], + "angle": 0, + "content": "Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19948-19960, 2023c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.602, + 0.826, + 0.658 + ], + "angle": 0, + "content": "Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 22195-22206, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.667, + 0.826, + 0.697 + ], + "angle": 0, + "content": "Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. arXiv preprint arXiv:2311.17043, 2023d." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.704, + 0.826, + 0.747 + ], + "angle": 0, + "content": "Zhaowei Li, Qi Xu, Dong Zhang, Hang Song, Yiqing Cai, Qi Qi, Ran Zhou, Junting Pan, Zefeng Li, Van Tu Vu, et al. Groundinggpt: Language enhanced multi-modal grounding model. CoRR, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.755, + 0.826, + 0.786 + ], + "angle": 0, + "content": "Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.793, + 0.826, + 0.849 + ], + "angle": 0, + "content": "Kevin Qinghong Lin, Pengchuan Zhang, Joya Chen, Shraman Pramanick, Difei Gao, Alex Jinpeng Wang, Rui Yan, and Mike Zheng Shou. Univtg: Towards unified video-language temporal grounding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2794-2804, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.826, + 0.888 + ], + "angle": 0, + "content": "Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Ruyang Liu, Chen Li, Haoran Tang, Yixiao Ge, Ying Shan, and Ge Li. St-llm: Large language models are effective temporal learners. arXiv preprint arXiv:2404.00308, 2024b." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.199 + ], + "angle": 0, + "content": "Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.825, + 0.251 + ], + "angle": 0, + "content": "WonJun Moon, Sangeek Hyun, SuBeen Lee, and Jae-Pil Heo. Correlation-guided query-dependency calibration in video representation learning for temporal grounding. arXiv preprint arXiv:2311.08835, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.259, + 0.825, + 0.303 + ], + "angle": 0, + "content": "WonJun Moon, Sangeek Hyun, SangUk Park, Dongchan Park, and Jae-Pil Heo. Query-dependent video representation for moment retrieval and highlight detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23023-23033, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.311, + 0.825, + 0.368 + ], + "angle": 0, + "content": "Andreea-Maria Oncescu, Joao F Henriques, Yang Liu, Andrew Zisserman, and Samuel Albanie. Queryd: A video dataset with high-quality text and audio narrations. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2265-2269. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.377, + 0.825, + 0.421 + ], + "angle": 0, + "content": "Mengxue Qu, Xiaodong Chen, Wu Liu, Alicia Li, and Yao Zhao. Chatvtg: Video temporal grounding via chat with video dialogue large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1847-1856, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.429, + 0.825, + 0.473 + ], + "angle": 0, + "content": "Shuhuai Ren, Linli Yao, Shicheng Li, Xu Sun, and Lu Hou. Timechat: A time-sensitive multimodal large language model for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14313-14323, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.481, + 0.825, + 0.511 + ], + "angle": 0, + "content": "Share. Sharegemini: Scaling up video caption data for multimodal large language models, June 2024. URL https://github.com/Share14/ShareGemini." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.518, + 0.825, + 0.576 + ], + "angle": 0, + "content": "Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18221-18232, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.584, + 0.825, + 0.627 + ], + "angle": 0, + "content": "Enxin Song, Wenhao Chai, Tian Ye, Jenq-Neng Hwang, Xi Li, and Gaoang Wang. Moviechat+: Question-aware sparse memory for long video question answering. arXiv preprint arXiv:2404.17176, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.636, + 0.825, + 0.68 + ], + "angle": 0, + "content": "Yale Song, Jordi Vallmitjana, Amanda Stent, and Alejandro Jaime. Tvsum: Summarizing web videos using titles. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5179-5187, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.688, + 0.825, + 0.732 + ], + "angle": 0, + "content": "Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1207-1216, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.74, + 0.825, + 0.784 + ], + "angle": 0, + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.792, + 0.825, + 0.821 + ], + "angle": 0, + "content": "Gido M Van de Ven, Tinne Tuytelaars, and Andreas S Tolias. Three types of incremental learning. Nature Machine Intelligence, 4(12):1185-1197, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.825, + 0.872 + ], + "angle": 0, + "content": "Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaou Tang, and Luc Van Gool. Temporal segment networks for action recognition in videos. IEEE Trans. Pattern Anal. Mach. Intell., 41(11):2740-2755, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024a." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.155, + 0.825, + 0.185 + ], + "angle": 0, + "content": "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.192, + 0.825, + 0.234 + ], + "angle": 0, + "content": "Xidong Wang, Dingjie Song, Shunian Chen, Chen Zhang, and Benyou Wang. Longllava: Scaling multi-modal llms to 1000 images efficiently via hybrid architecture. arXiv preprint arXiv:2409.02889, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.243, + 0.825, + 0.285 + ], + "angle": 0, + "content": "Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Internvideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024e." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.293, + 0.825, + 0.336 + ], + "angle": 0, + "content": "Yueqian Wang, Xiaojun Meng, Jianxin Liang, Yuxuan Wang, Qun Liu, and Dongyan Zhao. Hawkeye: Training video-text llms for grounding text in videos. arXiv preprint arXiv:2403.10228, 2024f." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.344, + 0.825, + 0.402 + ], + "angle": 0, + "content": "Yuxuan Wang, Yueqian Wang, Pengfei Wu, Jianxin Liang, Dongyan Zhao, Yang Liu, and Zilong Zheng. Efficient temporal extrapolation of multimodal large language models with temporal grounding bridge. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 9972-9987, 2024g." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.409, + 0.825, + 0.452 + ], + "angle": 0, + "content": "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024h." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.46, + 0.825, + 0.516 + ], + "angle": 0, + "content": "Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Wenhai Wang, Zhe Chen, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionllm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. arXiv preprint arXiv:2406.08394, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.525, + 0.825, + 0.568 + ], + "angle": 0, + "content": "Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13204-13214, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.576, + 0.825, + 0.619 + ], + "angle": 0, + "content": "Fuzhao Xue, Yukang Chen, Dacheng Li, Qinghao Hu, Ligeng Zhu, Xiuyu Li, Yunhao Fang, Haotian Tang, Shang Yang, Zhijian Liu, et al. Longvila: Scaling long-context visual language models for long videos. arXiv preprint arXiv:2408.10188, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.627, + 0.825, + 0.671 + ], + "angle": 0, + "content": "Shen Yan, Xuehan Xiong, Arsha Nagrani, Anurag Arnab, Zhonghao Wang, Weina Ge, David Ross, and Cordelia Schmid. Unloc: A unified framework for video localization tasks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 13623-13633, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.678, + 0.825, + 0.721 + ], + "angle": 0, + "content": "En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.728, + 0.825, + 0.772 + ], + "angle": 0, + "content": "Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23056-23065, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.779, + 0.825, + 0.837 + ], + "angle": 0, + "content": "Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16375-16387, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.845, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Xiangyu Zeng, Mingzhu Xu, Yijun Hu, Haoyu Tang, Yupeng Hu, and Liqiang Nie. Adaptive edge-aware semantic interaction network for salient object detection in optical remote sensing images. IEEE Transactions on Geoscience and Remote Sensing, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Yingsen Zeng, Yujie Zhong, Chengjian Feng, and Lin Ma. Unimd: Towards unifying moment retrieval and temporal action detection. arXiv preprint arXiv:2404.04933, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.141, + 0.825, + 0.185 + ], + "angle": 0, + "content": "Haoji Zhang, Yiqin Wang, Yansong Tang, Yong Liu, Jiashi Feng, Jifeng Dai, and Xiaojie Jin. Flash-vstream: Memory-based real-time understanding for long video streams. arXiv preprint arXiv:2406.08085, 2024a." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.191, + 0.825, + 0.234 + ], + "angle": 0, + "content": "Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024b." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.242, + 0.825, + 0.285 + ], + "angle": 0, + "content": "Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.292, + 0.825, + 0.334 + ], + "angle": 0, + "content": "Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.36, + 0.446, + 0.375 + ], + "angle": 0, + "content": "A IMPLEMENTATION OF TAPE" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.396, + 0.436, + 0.411 + ], + "angle": 0, + "content": "Algorithm 1 PyTorch snippet of TAPE." + }, + { + "type": "title", + "bbox": [ + 0.201, + 0.415, + 0.369, + 0.431 + ], + "angle": 0, + "content": "Initialize related package" + }, + { + "type": "code", + "bbox": [ + 0.2, + 0.436, + 0.797, + 0.827 + ], + "angle": 0, + "content": "class TemporalAdapter(nnModule): def __init__(self, merge_len, clip_num, input_dim, mid_dim, output_dim, sample_rate): super().__init_() self.AvgPool = nn.AvgPool1d(merge_len, stride = merge_len) self.upsample = nn.UpSample(scale_factor = sample_rate) self_linear_input = nn.Linear(input_dim, mid_dim) self_linear_output = nn.Linear(mid_dim, output_dim) nn.init_constant_(self_linear_output_weight, 0) nn.init_constant_(self_linear_output.bias, 0) self.Downsample_Depthwise_Separable_Conv1 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Downsample_Depthwise_Separable_Conv2 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.fc = nnSequential (nn.Conv1d(mid_dim, mid_dim, clip_num+1, stride=1, padding=clip_num//2), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv2 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv1 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) def forward(self, input_tokens): time_ad = self(linear_input(input_tokens).transpose(1, 2) time_ad1 = self.AvgPool(time_ad) time_ad2 = self.Downsample_Depthwise_Separable_Conv1(time_adl) time_ad3 = self.Downsample_Depthwise_Separable_Conv2(time_ad2) time_ad3 = self.fc(time_ad3) time_ad2 = self.upsample(time_ad3) + time_ad2 time_ad2 = self.Conv2(time_ad2) time_ad1 = self.upsample(time_ad2) + time_ad1 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad2 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad1 return time_ad_out" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Algorithm 1 details the implementation process of TAPE in code form. Specifically, the long video token sequence input_tokens is first compressed in the channel dimension by a linear layer to obtain time_ad, and the sequence length is compressed through a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.204 + ], + "angle": 0, + "content": "time resolutions, namely time_ad1, time_ad2, and time_ad3. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence time_ad3, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence. Then, we progressively upsample the temporal feature sequences from short to long, using residual connections to preserve temporal features at different scales. Finally, the temporal feature sequence time_ad_out is restored to the same length as the video features after token shuffling and aligned in the channel dimension through a linear layer." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.225, + 0.449, + 0.24 + ], + "angle": 0, + "content": "B INSTRUCTION-TUNING DATA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.257, + 0.825, + 0.3 + ], + "angle": 0, + "content": "We fine-tuned VideoChat-T using 432K data, which includes 349K instances from TimePro and 82K instances from normal data. All videos were sampled from existing open-source video datasets, with specific information about the relevant data provided in Table 6." + }, + { + "type": "table", + "bbox": [ + 0.242, + 0.315, + 0.759, + 0.658 + ], + "angle": 0, + "content": "
SetTaskCourseInstance Num
TimeProTemporal Video GroundingDideMo32,944
QueryD14,602
HiREST-grounding459
Dense Video CaptioningActivityNet-Captions10,009
ViTT5,086
Youcook28,700
Video SummarizationTVSum50
SumMe25
Step Localization and CaptioningCOIN9,026
HiREST-step459
Transcribed Speech GenerationYT-Temporal31,190
Reasoning Temporal LocalizationActivityNet-RTL33,557
Multi-format Temporal GroundingInternVid-VTime100,000
Highlight DetectionActivityNet-HL10,340
Temporal Grounded CaptionCosMo-TGC93,118
NormalConversationVideoChatGPT13,303
VideoChat13,884
Video QAEgoQA7,813
MovieChat-QA808
ReasoningSTAR45,731
CaptionMovieChat-Caption808
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.17, + 0.668, + 0.825, + 0.711 + ], + "angle": 0, + "content": "Table 6: The complete instruction fine-tuning data used for training. We utilized a total of approximately 432K data points, which can be divided into 349K instances of TimePro and 82K instances of regular video data, covering 13 tasks across 21 datasets." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.729, + 0.825, + 0.828 + ], + "angle": 0, + "content": "We evaluate the quality of the data from three perspectives: diversity, length, and difficulty. We strive to include different datasets for various tasks, and the distribution of videos in the datasets is as broad as possible. The length of the videos should be controlled within an appropriate range, as excessively long or short videos may pose challenges for training. Each query should clearly describe the video content of the target time segment and avoid corresponding to multiple time segments in the video. Based on these principles, we have screened and integrated existing high-quality datasets, which significantly contribute to enhancing the model's temporal awareness capabilities." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.833, + 0.825, + 0.89 + ], + "angle": 0, + "content": "TimePro encompasses a series of open-source temporal grounding datasets that we have integrated, cleaned, and refined, such as TimeIT (Ren et al., 2024), ANet-RTL (Huang et al., 2024b), and InternVid-VTime (Huang et al., 2024a). These high-quality open-source datasets have been experimentally validated by us. We also added two new self-made datasets, ANet-HL and CosMo-TGC." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Temporal Video Grounding. This task involves providing a natural language query and requires outputting the corresponding video's start and end times. The datasets include DiDeMo (Anne Hen" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "dricks et al., 2017), QuerYD (Oncescu et al., 2021), and HiREST-grounding (Zala et al., 2023), aiming to achieve precise temporal localization during user interaction with natural language." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.21 + ], + "angle": 0, + "content": "Dense Video Captioning. This task requires the model to detect a series of events occurring in a given video and output the corresponding timestamps and coarse-grained descriptions. The datasets for this part include ActivityNet-Caption (Krishna et al., 2017), ViTT Huang et al. (2020), and YouCook2 (Zhou et al., 2018), which help the model learn the temporal relationships between different events within the video." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.216, + 0.828, + 0.288 + ], + "angle": 0, + "content": "Video Summarization. The goal of this task is not to summarize at the semantic level of natural language, but to determine a set of compressed frames or clips in the form of timestamps, representing the most informative content in a given video. Our datasets include TVSum (Song et al., 2015) and SumMe (Gygli et al., 2014), which effectively combine the model's temporal perception capabilities with its semantic content inference abilities." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.293, + 0.828, + 0.351 + ], + "angle": 0, + "content": "Step Localization and Captioning. This task differs from dense video captioning as it is designed to segment and describe the important steps within a long video. We have integrated two datasets, COIN (Tang et al., 2019) and HiREST-step (Zala et al., 2023), which can help the model learn the procedural temporal logic relationships of different steps within a single event." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.356, + 0.828, + 0.412 + ], + "angle": 0, + "content": "Transcribed Speech Generation. The purpose of this task is to predict speech content and its corresponding start and end timestamps based on visual signals in the video. Including the YT-Temporal (Zellers et al., 2022) dataset, this task can be viewed as a weakly supervised event localization and description task." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.419, + 0.827, + 0.476 + ], + "angle": 0, + "content": "Reasoning Temporal Localization. The answers to the questions in this task include both timestamps and explanations. We used the ANet-RTL (Huang et al., 2024b) dataset as training data for this task. By combining temporal localization and reasoning, we can more specifically enhance the model's temporal perception capabilities." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.481, + 0.827, + 0.54 + ], + "angle": 0, + "content": "Multi-format Temporal Grounding. This task includes both single-turn and multi-turn dialogues, with a variety of question types. We use the InternVid-VTime (Huang et al., 2024a) dataset for training this task. The broader range of task types and more diverse output formats can effectively enhance the model's temporal generalization capabilities." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.544, + 0.828, + 0.643 + ], + "angle": 0, + "content": "Highlight Detection. Unlike video summarization, this task identifies only the most salient moments of a video in response to a natural language query, without covering the entire scope of the original video (Lei et al., 2021a). We used a custom dataset, ANet-HL, derived from temporal localization data. We extract video segments between the start and end times of the target's appearance and use CLIP to calculate the similarity between each frame's scene and the target. This is converted into discrete saliency levels ranging from 1 to 5, at intervals of 0.5. This task effectively enhances the model's temporal perception capabilities for specific events." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.649, + 0.828, + 0.735 + ], + "angle": 0, + "content": "Temporal Grounded Caption. This task involves using scene titles as queries, requiring the model to output both the time segments when the scenes appear and the fine-grained subtitles for those segments. We used our custom dataset, CosMo-TGC. This task format, which combines temporal localization and semantic understanding, can effectively prevent large language models from focusing on irrelevant video segments, thereby improving the quality of the model's responses to questions." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.74, + 0.825, + 0.784 + ], + "angle": 0, + "content": "We also used normal data comprising four tasks and six different data sources. These general data help prevent the model from overfitting to temporal grounding-related tasks during training, thereby preserving the model's original capabilities." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.784 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.807, + 0.462, + 0.823 + ], + "angle": 0, + "content": "C COMPUTATIONAL EFFICIENCY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.828, + 0.927 + ], + "angle": 0, + "content": "By applying Token Shuffle, we further reduced the computational cost of VideoChat-T, giving it a significant computational advantage over high-performance models like LLaVA-OneVision (Li et al., 2024a) and Qwen2-VL (Wang et al., 2024a). Under the same settings, VideoChat-T uses only 3 tokens per frame, with flops consumption at just \\(5.1\\%\\) of LLaVA-OneVision. Its inference time on single A100 is only 0.63 seconds, reaching real-time response levels, making it highly suitable for applications requiring rapid response, such as online video understanding." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.101, + 0.825, + 0.151 + ], + "angle": 0, + "content": "
MethodToken num per frameflops 128 framesInference Time 128f & on single A100 GPUCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME Vision
Qwen2-VL (Wang et al., 2024a)138929.8 TOut Of Memory15.013.067.066.763.3
LLaVA-OneVision (Li et al., 2024a)196693.7 T4.95 s7.314.9856.760.158.2
VideoChat-T (Ours)335.5 T0.63 s48.726.559.960.046.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.16, + 0.825, + 0.203 + ], + "angle": 0, + "content": "Table 7: Comparison of the computational efficiency and performance of VideoChat-T with other methods. Our approach achieves relatively impressive performance with extremely low computational cost." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.231, + 0.825, + 0.288 + ], + "angle": 0, + "content": "In terms of performance, VideoChat-T significantly outperforms LLaVA-OneVision in temporal grounding tasks. It has a slight advantage on MVBench; both perform comparably on Egoschema; but VideoChat-T performs worse on VideoMME. Given the substantial savings in computational resources with VideoChat-T, we consider the disadvantages on some datasets to be acceptable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.294, + 0.827, + 0.366 + ], + "angle": 0, + "content": "Moreover, our model's ability to maintain reasonable performance under high compression ratios suggests that the token embedding spaces of contemporary models may be characterized by considerable feature redundancy. This observation presents a promising avenue for future research, as efficient techniques for compressing or discarding redundant features could substantially reduce computational costs without sacrificing model performance, enabling longer context reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.385, + 0.488, + 0.4 + ], + "angle": 0, + "content": "D DETAILS OF HYPERPARAMETERS" + }, + { + "type": "table", + "bbox": [ + 0.339, + 0.42, + 0.661, + 0.648 + ], + "angle": 0, + "content": "
configepoch1epoch2&3
input frame192128
max text length15361024
freeze TAPETrueFalse
learning rate2e-51.5e-5
input resolution224
clip frame8
merge lenth4
QFormer token (per clip)96
lora rank16
lora alpha32
lora dropout0.1
batch size (per GPU)2
optimizerAdamW
optimizer momentum0.9, 0.999
weight decay0.02
learning rate schedulecosine decay
" + }, + { + "type": "table_caption", + "bbox": [ + 0.235, + 0.657, + 0.761, + 0.673 + ], + "angle": 0, + "content": "Table 8: Hyper-parameter Settings During the Training Process of VideoChat-T." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.691, + 0.825, + 0.761 + ], + "angle": 0, + "content": "Table 8 lists the hyperparameters used during different epochs of the training process. In the first epoch, we used a larger number of input frames and froze the TAPE. At the beginning of the second epoch, we unfroze the TAPE and fixed the model's input frames to 128. Following the settings of VideoChat2, we integrated the lora module into the LLM and applied flash attention to accelerate the training process." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.782, + 0.395, + 0.797 + ], + "angle": 0, + "content": "E FULL PERFORMANCES" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.818, + 0.825, + 0.902 + ], + "angle": 0, + "content": "
ModelLLMAvgASAPAAFAUAOEOIOSMDALSTACMCMASCFPCOENERCI
VideoChatGPT (Maaz et al., 2023)7B32.723.526.062.022.526.554.028.040.023.020.031.030.525.539.548.529.033.029.526.035.5
VideoLLaMA (Zhang et al., 2023)7B34.127.525.551.029.039.048.040.538.022.522.543.034.022.532.545.532.540.030.021.037.0
VideoChat (Li et al., 2023b)7B35.533.526.556.033.540.553.040.530.025.527.048.535.020.542.546.026.541.023.523.536.0
ST-LLM (Liu et al., 2024b)7B54.966.053.584.044.058.580.573.538.542.531.086.536.556.578.543.044.546.534.541.558.5
VideoChat2 (Li et al., 2024b)7B60.475.558.083.550.560.587.574.545.047.544.082.537.064.587.551.066.547.035.037.072.5
VideoChat-T7B59.983.568.580.544.061.071.084.035.548.056.587.046.056.578.049.559.046.037.040.066.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.903, + 0.825, + 0.933 + ], + "angle": 0, + "content": "Table 9: The full performance of VideoChat-T on MVBench. VideoChat-T still demonstrates strong performance, effectively prevents catastrophic forgetting caused by incremental fine-tuning." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.214 + ], + "angle": 0, + "content": "The performance of VideoChat-T on MVBench is shown in Table 9. Compared to VideoChat2, VideoChat-T only experienced a \\(0.5\\%\\) accuracy loss. This indicates that our method effectively preserves the capabilities of the base model, preventing catastrophic forgetting caused by incremental fine-tuning. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2. For the Action Localization (AL) task, which requires the model to determine the coarse-grained temporal position of events, the test accuracy improved from \\(44.0\\%\\) to \\(56.5\\%\\). This indirectly confirms that our method significantly enhances the model's temporal awareness capabilities." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.226, + 0.825, + 0.357 + ], + "angle": 0, + "content": "
ModelLLM sizeOverall (%)Short Video (%)Medium Video (%)Long Video (%)
w/o subsw subsw/o subsw subsw/o subsw subsw/o subsw subs
ST-LLM (Liu et al., 2024b)7B37.942.345.748.436.841.431.336.9
Video-LLaVA (Lin et al., 2023a)7B39.941.645.346.138.040.736.238.1
ShareGPT4Video (Chen et al., 2024)8B39.943.648.353.636.339.335.037.9
Chat-UniVi-v1.5 (Jin et al., 2024)7B40.645.945.751.240.344.635.841.8
Qwen-VL-Chat (Bai et al., 2023)7B41.141.946.947.338.740.437.837.9
ShareGemini (Share, 2024)7B43.247.949.152.841.347.339.143.4
VideoChat2 (Li et al., 2024b)7B39.543.848.352.837.039.433.239.2
VideoChat-T7B46.355.853.359.943.854.041.953.4
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.358, + 0.825, + 0.387 + ], + "angle": 0, + "content": "Table 10: The full performance of VideoChat-T on VideoMME. VideoChat-T achieved significant performance improvements, particularly in the long video subset." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.395, + 0.825, + 0.452 + ], + "angle": 0, + "content": "The overall performance of our model on VideoMME is presented in Table 10. VideoChat-T achieved significant improvements on both evaluation benchmarks of VideoMME, which include watching videos only and videos with subtitles. The improvements are particularly notable in the long video subset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.471, + 0.359, + 0.487 + ], + "angle": 0, + "content": "F EXTRA ABLATION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.504, + 0.444, + 0.518 + ], + "angle": 0, + "content": "F.1 DOMAIN CORRELATION OF DATA" + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.533, + 0.694, + 0.58 + ], + "angle": 0, + "content": "
ModelCharades-STA(R@1 IOU=0.5)MVBench(avg)
VideoChat-T48.759.9
w/o STAR47.5 (-1.2)59.4 (-0.5)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.589, + 0.825, + 0.634 + ], + "angle": 0, + "content": "Table 11: The performance changes of the model after removing STAR. Although the video sources of STAR may have some domain correlation with those of Charades-STA and MVBench, the performance of our model is minimally affected by STAR." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.65, + 0.827, + 0.791 + ], + "angle": 0, + "content": "We found that the video sources in the STAR dataset might have some domain correlation with the video sources in MVBench and Charades-STA. Therefore, we removed STAR from the training set while keeping other training settings consistent with the original. The performance on benchmarks where the video sources might have domain correlation is shown in Table 11. The model's accuracy on Charades-STA (R@1 IOU=0.5) decreased by \\(1.2\\%\\), and the average accuracy on MVBench decreased by \\(0.5\\%\\). This indicates that the domain correlation of video sources did not significantly impact performance for our model. Notably, after removing STAR, our normal data volume was reduced to approximately 36K. This implies that, with sufficiently parameter-efficient initialization and appropriate training strategies, using only a small amount of high-quality normal data is sufficient to retain the model's original capabilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.807, + 0.696, + 0.822 + ], + "angle": 0, + "content": "F.2 DEeper INVESTIGATION OF THE PERFORMANCE DROP ON MVBENCH" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.833, + 0.825, + 0.876 + ], + "angle": 0, + "content": "We conducted a deeper investigation into the performance decline on MVBench. Through additional ablation experiments (as shown in Tabel 12), we identified two main factors contributing to the performance drop." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Architectural Discrepancy: The original VideoChat2 model was designed to process only 16 frames, leading to a mismatch in the learned feature distribution compared to the architecture of VideoChatT. As shown in the first two rows of the table, increasing the input frame number for VideoChat2" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.101, + 0.825, + 0.203 + ], + "angle": 0, + "content": "
Methodpost ft datadata sizeframe numtoken num (per frame)MVBatch(AVG)
VideoChat2--161260.4
VideoChat2--1281242.1
VideoChat-T (Common_Init)--128325.3
VideoChat-T (Ours)--128348.6
VideoChat-T (Ours)TimePro+Normal (Ours)0.43M128359.9
VideoChat-T (Ours)TimePro+FullVideoChat22M128362.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.187, + 0.212, + 0.809, + 0.227 + ], + "angle": 0, + "content": "Table 12: Performance of VideoChat2 and VideoChat-T on MVBench under different settings." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.252, + 0.827, + 0.351 + ], + "angle": 0, + "content": "resulted in a significant performance drop (from 60.4 to 42.1). When initializing VideoChat-T with VideoChat2, performance was close to random (25.3) due to the newly introduced randomly initialized layers. By applying efficient initialization to these new layers, we partially recovered the original capabilities of the model, bringing the MVBench performance of the un-trained VideoChat-T back to 48.6, representing an improvement of 6.5 compared to the 128-frame VideoChat2. After further fine-tuning, the short-video processing capability of VideoChat-T improved significantly, reaching 59.9." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.357, + 0.825, + 0.468 + ], + "angle": 0, + "content": "Fine-tuning Data Discrepancy: We fine-tuned VideoChat-T using only 432K data, significantly less than the 2M non-grounded regular data used for training VideoChat2. The fine-tuning data for VideoChat2 primarily consisted of short videos of around ten seconds, which closely matched the length distribution of the MVBench evaluation videos, playing a crucial role in improving MVBench performance. To validate our hypothesis, we conducted additional experiments by training our VideoChat-T model using the TimePro and full VideoChat2 training data. It can be observed that VideoChat-T showed a slight improvement in performance on the MVBench dataset, achieving an accuracy of 62.9, which is an increase of 2.5 compared to the original VideoChat2." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.475, + 0.825, + 0.56 + ], + "angle": 0, + "content": "Based on the above, we can conclude the fundamental reasons affecting the model's foundational generalization capabilities. When a model undergoes adjustments, the learned original distribution may not perfectly match the new architecture, making the efficient initialization of new layers crucial. The features learned from the original dataset might be forgotten due to changes in various parameters. Utilizing a more comprehensive and diverse dataset for fine-tuning can restore and even further enhance performance." + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.576, + 0.646, + 0.59 + ], + "angle": 0, + "content": "F.3 ASSOCIATION BETWEEN PERFORMANCE AND MODEL DESIGN" + }, + { + "type": "table", + "bbox": [ + 0.208, + 0.604, + 0.791, + 0.673 + ], + "angle": 0, + "content": "
MethodFT DataCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME w/o subs
TimeChatTimeIT+Valley32.214.538.533.030.2
TimeChatTimePro+Normal34.216.341.638.933.4
VideoChat-TTimePro+Normal48.726.559.960.046.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.682, + 0.825, + 0.711 + ], + "angle": 0, + "content": "Table 13: Comparison of other model architectures trained on our dataset with our method, demonstrating the impact of the overall model structure design." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.729, + 0.827, + 0.827 + ], + "angle": 0, + "content": "To eliminate the influence of training data and auxiliary tasks, and to more clearly evaluate the association between performance and model design, we fine-tuned TimeChat using the full set of fine-tuning data and auxiliary tasks from VideoChat-T. Table 13 presents the performance of TimeChat, fine-tuned with our data, across five datasets. It can be observed that TimeChat, fine-tuned with our data, shows improvements across all benchmarks. However, its performance still lags significantly behind VideoChat-T. This indicates that an efficient fine-tuning architecture design and high-quality, diverse datasets are both essential and complementary." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.843, + 0.462, + 0.857 + ], + "angle": 0, + "content": "F.4 VALIDATION OF TRANSFERABILITY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To verify the robustness of our TimeSuite for other MLLMs, we transferred our method to Llava-OneVision (Li et al., 2024a). Table 14 shows the performance changes of Llava-OneVision after applying our TimeSuite. It can be seen that when we apply the full set of methods in TimeSuite to Llava-OneVision, the model's performance on two different long-video evaluation benchmarks" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.101, + 0.79, + 0.162 + ], + "angle": 0, + "content": "
MethodCharades-STA IOU0.5QVHighlight mapVideoMME w/o subsMLVU AvgMVBench Avg
Llava-OneVision (baseline)7.315.058.264.756.7
Llava-OneVision-T (Ours)42.521.761.469.456.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.172, + 0.825, + 0.215 + ], + "angle": 0, + "content": "Table 14: Performance comparison of TimeSuite migration to other MLLMs. The application of our method shows a certain improvement in long video comprehension, demonstrating the transferability of our approach." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.825, + 0.27 + ], + "angle": 0, + "content": "improves (+3.2 on VideoMME and +4.7 on MLVU), effectively demonstrating the robustness of our TimeSuite for different MLLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.288, + 0.601, + 0.302 + ], + "angle": 0, + "content": "F.5 EXPLORATIONS OF DATA CONFIGRATIONS OF TIMEPRO" + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.318, + 0.79, + 0.371 + ], + "angle": 0, + "content": "
MethodMVBench AvgEgoschema FullVideoMME w/o subsCharades-STA IOU=0.5QVHighlight mAP
TimePro615K+Normal82K (old version)60.061.046.345.425.7
TimePro349K+Normal82K (Ours)59.960.046.348.726.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.38, + 0.825, + 0.41 + ], + "angle": 0, + "content": "Table 15: Comparison of different versions of our proposed TimePro. More data does not necessarily lead to higher overall performance, highlighting the importance of data quality." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.825, + 0.526 + ], + "angle": 0, + "content": "In the early version of TimePro, we employed datasets comprising 309K Multi-format Temporal Grounding instances, 150K Temporal Grounded Caption instances and other data. Through extensive experimentation (as shown in Tabel 15), we discovered that removing low-quality data while retaining high-quality instances could significantly reduce training time without compromising performance. Consequently, we pruned these two part datasets to 100K and 93K instances, respectively. The data distribution presented in the paper represents the optimized and relatively balanced configuration we arrived at." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.546, + 0.317, + 0.561 + ], + "angle": 0, + "content": "G DISCUSSION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.578, + 0.791, + 0.606 + ], + "angle": 0, + "content": "G.1 CAN THE OVERALL PERFORMANCE OF MLLMS BE ENHANCED BY CONTINUOUSLY INTEGRATING EXPERT TASKS?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.618, + 0.825, + 0.716 + ], + "angle": 0, + "content": "By appropriately fine-tuning the Multimodal Large Language Model (MLLM), we have developed a general MLLM with powerful zero-shot temporal grounding capabilities. Its performance, after fine-tuning on the training set of evaluation benchmarks, can rival the current state-of-the-art supervised expert models. Based on these results, we can boldly speculate whether it is possible to internalize the capabilities of expert models such as spatial grounding, tracking and detection (Zeng et al., 2023) into the MLLM itself, without using any external expert decoders, to enhance the comprehensive understanding performance of the MLLM and achieve a unified generalist MLLM for multiple tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.722, + 0.825, + 0.877 + ], + "angle": 0, + "content": "Merlin (Yu et al., 2023) and VisionLLM (Wang et al., 2024b) have already attempted something similar, but its performance is limited by the reasoning capabilities and language representation bottlenecks of the LLM. There is still a significant gap between its performance and that of expert models for various tasks. We observed similar phenomena in our experiments. The temporal grounding task only requires outputting two timestamps, and the task format is relatively simple, so our model achieved good results. However, the highlight detection task requires outputting multiple discrete timestamps and their corresponding saliency scores. The model needs to accurately predict dozens of numbers in language form to answer the question correctly. Our model performed well only on data with fewer timestamps. Therefore, how to simplify the complex output format of expert tasks into the language representation of LLMs, or to design special processing procedures to simplify complex expert tasks, is a question worth exploring." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Moreover, designing diverse data formats is also crucial for enhancing the expert capabilities of MLLMs. Compared to classic expert models, MLLMs have a natural advantage in task type diversity and can enhance their performance through various different variants tasks of a single capability." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "For temporal grounding tasks, we found that enhancing task diversity has a significant effect on improving the model's temporal perception generalization ability. We can boldly speculate that if there are sufficiently diverse training data task types, most tasks with relatively simple output formats can achieve results comparable to expert models through appropriate instruction fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.167, + 0.827, + 0.266 + ], + "angle": 0, + "content": "Through the integration of diverse expert tasks and the optimization of language representations, MLLMs can achieve substantial improvements in their overall capabilities. This allows them to effectively comprehend and address complex tasks, rivaling or even exceeding the performance of specialized expert models within specific domains. Looking ahead, MLLMs have the potential to evolve into highly versatile AI models, transcending traditional conversational and QA capabilities. They will be equipped to handle a wide range of complex expert tasks across various domains, such as vision, language, and reasoning." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.299, + 0.809, + 0.328 + ], + "angle": 0, + "content": "G.2 WHY DOES TEMPORAL GROUNDING DATA LEAD TO ACCURACY LOSS IN SHORT-TERM VIDEOS?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.345, + 0.825, + 0.388 + ], + "angle": 0, + "content": "We conducted ablation experiments using different combinations of temporal grounding data and regular data. The accuracy of VideoChat-T on MVBench after fine-tuning with various data combinations is shown in Table 16." + }, + { + "type": "table", + "bbox": [ + 0.339, + 0.413, + 0.66, + 0.519 + ], + "angle": 0, + "content": "
FT DataMVBench (AVG)
TimeIT54.7
TimeIT+Normal55.3
Normal56.1
TimePro57.4
TimePro+Normal (Ours)59.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.197, + 0.528, + 0.799, + 0.544 + ], + "angle": 0, + "content": "Table 16: Performance VideoChat-T on MVBench under different fine-tuning data settings." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.576, + 0.825, + 0.703 + ], + "angle": 0, + "content": "The diversity of grounding data formats in the past has often been limited, which can lead to overfitting on Temporal Grounding tasks and cause the model to lose its general question-answering capability. We compared the TimeIT dataset proposed in TimeChat (Ren et al., 2024) with our TimePro dataset on MVBench. As shown in the Table 16, fine-tuning with only TimeIT resulted in the lowest accuracy, and the combined use of TimeIT+Normal also performed slightly worse than using Normal alone. This indicates that monotonous grounding data indeed damages the model's original performance (as shown in Figure 1 at the beginning of the paper, TimeChat loses some of its general question-answering capability after fine-tuning, where it outputs localization times for general questions)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.709, + 0.827, + 0.848 + ], + "angle": 0, + "content": "In contrast, our TimePro dataset includes diverse data, encompassing 9 different task types from 15 datasets, which helps mitigate the generalization loss caused by homogeneous grounding data types. Additionally, our dataset integrates Grounding with various general tasks. For instance, Grounded Caption requires detailed descriptions of corresponding video segments, while Reasoning Temporal Localization demands the model to reason about questions. This approach significantly enhances the model's generalization ability and minimizes the impact on its original capability (e.g., short video accuracy). As demonstrated in the Table 16, the performance of using only TimePro exceeds that of using Normal alone, and the combined use of TimePro and Normal far surpasses all other combinations. This also confirms that our TimePro effectively preserves the model's original performance." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Overall, using a single type of expert task training data can easily lead to model overfitting, resulting in significant loss of the model's original capabilities. To preserve the model's foundational generalization abilities, it is essential to use diversified training data. Additionally, incorporating data of various types and distributions, such as text, images, and videos, can further enhance the model's generalization capabilities." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.104, + 0.822, + 0.133 + ], + "angle": 0, + "content": "G.3 COULD TRAINING THE MODEL ON BOTH TEMPORAL AND NON-TEMPORAL GROUNDING DATA MITIGATE PERFORMANCE LOSS IN SHORT-TERM VIDEOS?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.144, + 0.825, + 0.201 + ], + "angle": 0, + "content": "To address this question, we conducted additional ablation experiments. By training VideoChat-T with different combinations of temporal and non-temporal grounding data, we were able to clearly observe the effects of both types of data on the model's performance. The results of the experiments are shown in the Table 17." + }, + { + "type": "table", + "bbox": [ + 0.243, + 0.211, + 0.756, + 0.304 + ], + "angle": 0, + "content": "
FT DataMVBench AvgVideoMME w/o subsCharades-STA R1@0.5
Normal56.142.68.0
TimePro57.446.045.6
TimePro+Normal (Ours)59.946.348.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.313, + 0.825, + 0.343 + ], + "angle": 0, + "content": "Table 17: Performance comparison of VideoChat-T using different combinations of temporal grounding and non-temporal grounding data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.36, + 0.827, + 0.473 + ], + "angle": 0, + "content": "It can be observed that the combined use of TimePro+Normal for VideoChat-T achieves the highest performance in short video QA, long video QA, and temporal grounding tasks. This not only demonstrates that using both temporal grounding and non-temporal grounding data can reduce performance loss in short videos, but also reveals that the effects of temporal and non-temporal grounding data are complementary across various tasks. The distinct differences between temporal grounding and non-temporal grounding tasks can respectively compensate for the model's shortcomings in different task perspectives and feature distributions. The simultaneous use of both types of data can effectively enhance the model's overall capabilities." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.493, + 0.32, + 0.509 + ], + "angle": 0, + "content": "H CASE STUDY" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.525, + 0.431, + 0.54 + ], + "angle": 0, + "content": "H.1 MORE QUALITATIVE ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.551, + 0.825, + 0.595 + ], + "angle": 0, + "content": "To further qualitatively analyze our model, we supplemented it with three types of examples. These examples are about long video QA, short video QA, and captioning tasks, all of which include temporal grounding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.6, + 0.825, + 0.657 + ], + "angle": 0, + "content": "More qualitative comparisons about long video QA are shown in Figure 6. VideoChat-T effectively handles various questions across different domains. By better perceiving the temporal relationships of different events occurring in long videos, it can more accurately and deeply understand the detailed content of the entire video." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.663, + 0.825, + 0.721 + ], + "angle": 0, + "content": "More qualitative comparisons about short video QA are shown in Figure 7. VideoChat-T effectively retains the original capabilities of the base model. Through parameter-efficient initialization methods and appropriate training strategies, we minimize the damage to the base model's capabilities caused by new architectures and data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.825, + 0.77 + ], + "angle": 0, + "content": "More qualitative comparisons about captioning are shown in Figure 8. Although VideoChat2 describes more local details in some scenarios compared to VideoChat-T, VideoChat-T focuses more on a series of temporal events, which aligns better with how humans typically describe videos." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.786, + 0.334, + 0.801 + ], + "angle": 0, + "content": "H.2 SHORTCOMINGS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.826, + 0.926 + ], + "angle": 0, + "content": "We also conducted a qualitative analysis of the shortcomings of VideoChat-T through examples. As shown in Figure 9, VideoChat-T performs poorly on examples with complex logic. In the left example, although VideoChat-T accurately identified the timing of the event, it failed to fully explain the motivation behind the man opening the isolation door, which was \"to fight the hijackers of the space elevator, seize the controller, and thus save the people in the entire space elevator.\" In the right example, VideoChat-T correctly identified the event where Mr. Bean reached out to touch his desk mate's table, but it incorrectly explained the true reason for this action, which was \"to cover up the fact that he was copying his desk mate's exam by pretending to wipe dust off the desk.\"" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.49, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.101, + 0.824, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.436, + 0.766, + 0.452 + ], + "angle": 0, + "content": "Figure 6: More qualitative comparisons in temporal grounding & long video QA." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.465, + 0.491, + 0.618 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.465, + 0.824, + 0.619 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.63, + 0.767, + 0.646 + ], + "angle": 0, + "content": "Figure 7: More qualitative comparisons in temporal grounding & short video QA." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.828, + 0.743 + ], + "angle": 0, + "content": "Due to the preponderance of single-turn, perceptual questions in our training data and the lack of multi-step reasoning data with complex logic, our model struggles to handle more challenging scenarios that demand intricate logical reasoning. To address this limitation, we propose constructing data in a chain-of-thought format to guide the model through multi-step reasoning, enabling it to delve deeper into the underlying motivations and causal relationships within a video." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.205, + 0.49, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.205, + 0.824, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.405, + 0.752, + 0.421 + ], + "angle": 0, + "content": "Figure 8: More qualitative comparisons in temporal grounding & captioning." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.638, + 0.48, + 0.778 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.638, + 0.822, + 0.776 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.787, + 0.827, + 0.817 + ], + "angle": 0, + "content": "Figure 9: Examples of poor performance by VideoChat-T. While it accurately identifies the time of events, it struggles to answer questions that involve more complex logic." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ] +] \ No newline at end of file diff --git a/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_origin.pdf b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..01b384a6ef8d4f133744111e6b1f4223c7f57560 --- /dev/null +++ b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/f48bb6a8-358b-46f9-aa7b-783937ea3be0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:729f39f07e09ff57a7f0878770743fcd3d64eaf43f02b1a20dbcaf97e1c97901 +size 2802554 diff --git a/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/full.md b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1840c0cb643e364b3aab9486d7b6af423577ad0e --- /dev/null +++ b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/full.md @@ -0,0 +1,466 @@ +# TIMESUITE: IMPROVING MLLMS FOR LONG VIDEO UNDERSTANDING VIA GROUNDED TUNING + +Xiangyu Zeng $^{1,2}$ Kunchang Li $^{3,2}$ Chenting Wang $^{6,2}$ Xinhao Li $^{1,2}$ Tianxiang Jiang $^{5,2}$ + +Ziang Yan $^{4,2}$ Songze Li $^{7,2}$ Yansong Shi $^{5,2}$ Zhengrong Yue $^{6,2}$ Yi Wang $^{2,8}$ + +Yali Wang $^{3,2}$ Yu Qiao $^{2}$ Limin Wang $^{1,2,\dagger}$ + +$^{1}$ Nanjing University $^{2}$ Shanghai AI Laboratory $^{3}$ SIAT, Chinese Academy of Sciences $^{4}$ Zhejiang University +$^{5}$ University of Science and Technology of China $^{6}$ Shanghai Jiao Tong University $^{7}$ Fudan University +8 Shanghai Innovation Institute + +XiangyuZeng2001@outlook.com lmwang@nju.edu.cn + +![](images/ad602c480f2c6ee6f7e3e6d73c36fcef9e7c8e2a42909b47fdf01801f9b7c3d1.jpg) +Figure 1: VideoChat-T demonstrates high performance for both long-form video question answering and temporal grounding. Our TimeSuite presents a collection of new designs to enhance the long video understanding capability of MLLMs. It will implicitly endow the MLLM with ability of correctly attending the visual segments when generating answers, thus relieving the hallucinations. + +![](images/25a3ed0e3f3a3d2c5da756a7ce2b57e59174fb96aafb85ec3a7bc17191ca7251.jpg) + +# ABSTRACT + +Multimodal Large Language Models (MLLMs) have demonstrated impressive performance in short video understanding. However, understanding long-form videos still remains challenging for MLLMs. This paper proposes TimeSuite, a collection of new designs to adapt the existing short-form video MLLMs for long video understanding, including a simple yet efficient framework to process long video sequence, a high-quality video dataset for grounded tuning of MLLMs, and a carefully-designed instruction tuning task to explicitly incorporate the grounding supervision in the traditional QA format. Specifically, based on VideoChat, we propose our long-video MLLM, coined as VideoChat-T, by implementing a token shuffling to compress long video tokens and introducing Temporal Adaptive Position Encoding (TAPE) to enhance the temporal awareness of visual representation. Meanwhile, we introduce the TimePro, a comprehensive grounding-centric instruction tuning dataset composed of 9 tasks and 349k high-quality grounded annotations. Notably, we design a new instruction tuning task type, called Temporal Grounded Caption, to perform detailed video descriptions with the corresponding timestamps prediction. This explicit temporal location prediction will guide MLLM to correctly attend on the visual content when generating description, and thus reduce the hallucination risk caused by the LLMs. Experimental results demonstrate that our TimeSuite provides a successful solution to enhance the long video understanding capability of short-form MLLM, achieving improvement of $5.6\%$ and $6.8\%$ on the benchmarks of Egoschema and VideoMME, respectively. In addition, VideoChat-T exhibits robust zero-shot temporal grounding capabilities, significantly outperforming the existing state-of-the-art MLLMs. After fine-tuning, it performs on par with the traditional supervised expert models. Our code and dataset are available at https://github.com/OpenGVLab/TimeSuite. + +# 1 INTRODUCTION + +Multimodal Large Language Models (MLLMs) have demonstrated impressive video understanding performance by following the general human instructions to interpret the visual content (Li et al., 2023b; Zhang et al., 2023; Lin et al., 2023a; Jin et al., 2024; Wang et al., 2024e). However, these MLLMs still struggle in long video understanding, as a long video sequence may contain various dynamic actions and complex temporal relationships, making it difficult for MLLMs to effectively locate the key segments related to questions. When humans watch long videos, their attention is consciously focused on prominent segments, which may occur within a few seconds. NExT-GQA (Xiao et al., 2024) has also verified the relevance of temporal grounding for accurately answering video QA tasks. Therefore, a natural question arises: Can we enhance long video understanding by using temporal grounding as a auxiliary task? + +Previously, some works have made progress in temporal grounding task by using general MLLMs. They often enhance the temporal grounding capability of video MLLMs by designing specialized modules and perform specific supervised fine-tuning (Ren et al., 2024; Huang et al., 2024a,b). However, these overly specialized designs significantly impair the general QA capabilities of video MLLMs, resulting in great performance drop on the video QA task (as illustrated by TimeChat in Figure 1). Meanwhile, current research on long video understanding primarily focuses on architecture design, such as long-context LLMs (Liu et al., 2024a) and token compression (Song et al., 2024a). They can only capture holistic semantics in videos without the ability of localizing fine-grained information, leading to poor performance in temporal grounding tasks (as illustrated by MovieChat in Figure 1). So far, it is still challenging to build a video MLLM that is good at both tasks of temporal grounding and long video QA. We argue long video understanding could be assisted by explicitly performing temporal grounding, as grounding supervision enables MLLM to establish the detailed correspondence between the visual segments and fine-grained semantics. This fine-grained alignment would guide the MLLM to attend correctly video segments when generating answers and thus relieve the hallucination risk caused by the LLM. + +Based on the above analysis, in this paper, we propose TimeSuite, a collection of new designs to improve the long video understanding capability of the existing short-form MLLMs, with a focus on incorporating grounding supervision in instruction tuning process. First, to address the high computational cost caused by the excessive number of visual tokens in long videos, we propose a simple Token Shuffle scheme to compress visual tokens, allowing the LLM to process more frame inputs. We also propose TAPE to generate adaptive position encodings, enhancing the temporal awareness of visual representations. The proposed structure does not introduce overly complex proprietary designs, which could be efficiently initialized with the parameters of short video MLLMs, without damaging the original performance of pre-trained MLLM. Second, to naturally incorporate the grounding ability into our MLLMs and yet still to preserve its original general QA capability, we design a new instruction tuning task, called Temporal Grounded Caption. This new task requires generating detailed segment-level description with corresponding timestamp prediction. Tuning on this new task will not only endow the MLLM with the extra grounding ability but also enhance its original long video QA performance, thanks to the requirement of building correspondence between grounded segments and detailed captions. Finally, we collect a comprehensive grounding-centric instruction tuning dataset for post-training our designed MLLMs, which is composed of 349K high-quality annotations covering 9 tasks. Based on this new dataset, we are able to perform grounded tuning with detailed captions on our proposed MLLMs (coined as VideoChat-T). + +We verify the effectiveness of TimeSuite design through extensive experiments on the tasks of long video understanding and temporal grounding. VideoChat-T demonstrates a significant improvement in accuracy over baseline for long video understanding, with a $5.6\%$ increase on Egoschema (Mangalam et al., 2023) and a $6.8\%$ increase on VideoMME (Fu et al., 2024). Additionally, VideoChat-T exhibits robust zero-shot temporal localization capabilities on Charades-STA (Gao et al., 2017) and QVHighlights (Lei et al., 2021a). Our VideoChat-T outperforms the state-of-the-art temporal grounding MLLM of TimeChat from $50\%$ to $100\%$ for different metrics. After fine-tuning on the training set of temporal grounding benchmarks, the performance of VideoChat-T is on par with the state-of-the-art supervised expert models. The experiments demonstrate that our VideoChat-T is the first end-to-end MLLM that is able to perform well on both temporal grounding and general video QA. In particular, we show that grounded tuning with explicit location prediction can facilitate the long video understanding and relieve the hallucination risk. + +# 2 RELATED WORK + +Video MLLMs. With the advancement of open-sourced LLMs (Chiang et al., 2023; Touvron et al., 2023; Jiang et al., 2023), video MLLMs have emerged by utilizing projection bridges to link vision foundation models with LLMs (Li et al., 2023b; 2024b; Zhang et al., 2023; Li et al., 2024a). Limited by the training context length, thought these methods perform well with a small number of frame inputs, they meet significant challenges when processing long videos. The longer video length usually implies longer temporal relationships and more redundancies, resulting in the difficulty of extracting key clues (Zhou et al., 2024). Recently, several methods for long video handling have been proposed, such as exploiting long context LLM (Liu et al., 2024a; Zhang et al., 2024b; Xue et al., 2024; Wang et al., 2024d) and token compression (Li et al., 2023d; Song et al., 2024a; Zhang et al., 2024a) for enabling more visual inputs and agents for task decomposition or retrieval (Fan et al., 2024; Wang et al., 2024c;h). MovieChat (Song et al., 2024a) supports more frames by applying short-term and long-term memory to merge similar visual tokens. Yet, studies in learning objectives for long videos are less explored, making it difficult to alleviate the frequent hallucination of LLMs in long context reasoning. Our proposed TimeSuite leverages temporally-centric tasks to unlock the temporal perception potential of MLLMs, anchoring responses to the most relevant video segments. + +Temporal Grounding. Temporal grounding is a fundamental capability in video understanding, associating semantics to specific clips with corresponding timestamps. Typical expert models (Lei et al., 2021b; Moon et al., 2023a;b; Lin et al., 2023b; Zeng et al., 2024) have been developed by formulating it into a timestamp regression from visual inputs and user queries. Most existing video MLLMs fail to address it compared with expert models, while some remedy its temporal grounding by specifically designed architectures and data (Huang et al., 2024a; Wang et al., 2024f; Li et al., 2024c; Wang et al., 2024g; Huang et al., 2024b; Qu et al., 2024). Timechat (Ren et al., 2024) binds visual features of images with timestamps and uses a sliding window to handle variable token length. From the perspective of training data, an instruction-tuning dataset TimeIT is constructed. Despite impressive improvements in temporal performance, these MLLMs still lag behind expert models and compromise general video dialogue capabilities. In this paper, we explore how to enhance the temporal grounding of MLLMs while preserving their original capabilities. + +# 3 METHOD + +In this section, we detail the proposed TimeSuite, a new collection of designs for improving short video MLLMs. Specifically, our TimeSuite includes a long video modeling framework, a high-quality video dataset for grounded tuning, and a carefully-designed instruction tuning task. With this new TimeSuite design, we are able to adapt the short-form video MLLM, obtaining significant performance improvements on two types of long video understanding tasks: traditional long video QA and temporal video grounding. + +# 3.1 VIDEOCHAT-T + +We first describe the architecture of our proposed long video modeling framework. Specifically, built upon VideoChat2 (Li et al., 2024b), we devise long-video version of VideoChat-T. Our VideoChat-T is composed of a video backbone for extracting visual representations, a visual-language connector to compress visual tokens and bridge the visual and languages modalities, a LLM to follow human instructions to interpret the video content. + +The architecture of VideoChat-T is illustrated in Figure 2. Its workflow has three stages. In the first stage, long videos are evenly segmented into clips and the clips are embedded by the Video Encoder and Q-Former (Li et al., 2023a). Then, for compressing visual token number and highlighting crucial ones, token shuffling is employed to merge adjacent tokens, and TAPE is used to add temporal adaptive positional encodings. Finally, the compressed video token sequence is fed to the LLM to generate accurate responses that adhere to user requirements. + +![](images/076f20a316ea038edc9dee59b2e5486f4caff907815706c4663eda300ce62196.jpg) +Figure 2: Overall Architecture of VideoChat-T. First, long videos are segmented into clips, which are then transformed into feature embeddings by video encoder and time-aware Qformer. Next, all visual tokens undergo Token Shuffle to compress overly long tokens, and generate adaptive positional encodings through TAPE. Finally, the long video tokens are concatenated with the user query, serving as the input of LLM, thereby generating appropriate responses. + +# 3.1.1 BACKBONE DESIGN + +Video clip encoding. For the given long video, we perform uniform sampling (Wang et al., 2019) to obtain $K \times T$ frames. We divide these frames into $K$ video segments in chronological order, and sample $T$ frames from each segment. Next, we use the video encoder and its visual-linguistic connector (Q-Former here) to encode each segment into $N$ tokens. After the aforementioned processing, the entire video is encoded into a sequence of visual tokens, denoted by $\mathbf{V}_q \in \mathbb{R}^{L \times C_q}$ , where $C_q$ is the dimension of output token by the Q-Former and $L = K \times N$ is the total number of tokens for the entire video. + +Large Language Model. According to previous research, images and visual cues are projected into the same feature space of the LLM. The LLM acts as an interaction interface in the MLLMs, being used to process multimodal inputs, parse user instructions, and generate appropriate responses. To afford the processing of long video sequence, we need to design an efficient compression module between the visual encoder and LLMs. + +# 3.1.2 VL-CONNECTOR:TOKEN SHUFFLE + +The increased number of sampled frames in long videos leads to a larger number of encoded visual tokens, causing a significant rise in the computational complexity and memory consumption of LLMs. Therefore, it is crucial to keep the number of visual tokens within an acceptable range. Some works have proposed various token compression schemes, such as clustering (Jin et al., 2024) and pooling (Huang et al., 2024b). However, clustering methods often struggle to maintain the temporal consistency, and pooling methods usually result in a certain loss of overall performance. + +To address this, we propose a simple token shuffling compression scheme that ensures the temporal consistency of video tokens before and after compression while avoiding excessive performance loss. Previous methods often used a projector to achieve dimensional conversion. However, projecting visual encoding vectors from low to high dimensions does not increase information density. Therefore, we propose to rearrange multiple visual tokens along the channel dimension. Specifically, for the long video $\mathbf{V}_q = [v_q^1,v_q^2,\dots,v_q^L ]\in \mathbb{R}^{L\times C_q}$ , we concatenate $m$ adjacent tokens along the channel dimension to obtain the reshaped visual feature $\mathbf{V}_m = [v_m^1,v_m^2,\dots,v_m^{\frac{L}{m}}]\in \mathbb{R}_{\frac{L}{m}}^{\times mC_q}$ where each merged token $v_{m}^{i}$ is represented as: + +$$ +v _ {m} ^ {i} = \operatorname {C o n c a t} \left(v _ {q} ^ {(i - 1) * m + 1}, v _ {q} ^ {(i - 1) * m + 2}, \dots , v _ {q} ^ {i * m}\right) \quad \forall i = 1, 2, \dots , \frac {L}{m}. +$$ + +Next, a linear projection layer is applied to the merged visual feature $\mathbf{V}_m$ , generating the visual token sequences $\mathbf{V}_l \in \mathbb{R}^{\frac{L}{m} \times C_l}$ as input into the LLM, where $C_l$ represents the token channel di + +mension of the LLM. This scheme effectively reuses the projector of base model by replicating the original linear layer parameters $m$ times along the channel dimension, achieving an initialization equivalent to mean pooling with a window length of $m$ . This design avoids introducing additional randomly initialized parameters that might disturb the original model, thus preserving the its original capabilities. Additionally, compared to directly using pooling, this method offers higher flexibility for fine-tuning to achieve better results (see ablation study, Table 4). + +# 3.1.3 TEMPORAL ADAPTIVE POSITION ENCODING + +To bind temporal positional information to visual tokens, we propose an adapter called Temporal Adaptive Position Encoding (TAPE). Inspired by CPVT (Chu et al., 2021), our TAPE uses zero padding at both ends of the convolution as anchors, and gradually transmits relative positional encoding information. Without the need to add any special time tokens, TAPE can automatically perceive the relative temporal positions of the token sequence and generate temporal embeddings. + +Specifically, the long video token sequence $\mathbf{V}_q$ is first compressed in the channel dimension by a linear layer and further compressed in sequence length by a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different resolutions. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence (Chu et al., 2021). Then, we progressively upsample and restore the temporal feature sequences from short to long, using residual connections to retain temporal features at different scales. Finally, the temporal feature sequences are restored to the same length as $\mathbf{V}_l$ and aligned in the channel dimension by a linear layer, thereby obtaining the temporal features $\mathbf{V}_t$ output by the TAPE. For detailed implementation of TAPE, please refer to Appendix A. + +Our proposed TAPE offers a plug-and-play module, which could be easily integrated into the network structure via residual connections, adding temporal position information to video tokens without disrupting the distribution of other trainable parameters. With appropriate training strategies, TAPE effectively preserves the model's generalization capabilities and enhances its temporal sensitivity (see ablation study, Table 3), which is important for temporal grounding task. + +# 3.2 TIMEPRO:TEMPORAL GROUNDED INSTRUCTION DATA + +Traditional temporal grounding datasets only contain monotonous ground truth, i.e., the start and end times of the target period. This data format performs well in training the classic expert models, but is difficult to unleash the potential of LLMs. Although several temporal grounding-centric datasets have been released for MLLM fine-tuning (Ren et al., 2024; Huang et al., 2024b), they still have deficiencies in data quantity, data quality, and task diversity. Thus, it is necessary to build a more comprehensive temporal dataset designed for the tuning of MLLMs. + +Based on the criteria of diversity, length, and difficulty, we collect and clean several existing high-quality grounding-centric datasets (Ren et al., 2024; Huang et al., 2024a,b), and create two new datasets, resulting in the TimePro. Compared to previous temporal grounding-centric datasets, TimePro offers a larger volume of data, a broader distribution, and a higher task diversity, facilitating the learning of more generalizable temporal representations for MLLMs. + +As shown in Figure 3(a), TimePro contains 9 task types from 15 datasets that are highly relevant to temporal grounding, containing approximately 349K high-quality temporal grounding annotations. The 9 tasks are specified as follows. Temporal Video Grounding involves identifying the start and end times of video content based on a natural language query (Anne Hendricks et al., 2017; Oncescu et al., 2021; Zala et al., 2023). Dense Video Captioning requires detecting events within a video and providing corresponding timestamps and descriptions (Krishna et al., 2017; Huang et al., 2020; Zhou et al., 2018). Video Summarization focuses on determining key frames or clips in the form of timestamps rather than semantic summaries (Song et al., 2015; Gygli et al., 2014). Step Localization aims to segment and describe important steps in a long video (Tang et al., 2019; Zala et al., 2023). Transcribed Speech Generation predicts speech content and its timestamps from visual signals (Zellers et al., 2022). Reasoning Temporal Localization combines timestamps with explanatory answers (Huang et al., 2024b). Multi-format Temporal Grounding includes single-turn and multi-turn dialogues with diverse question types (Huang et al., 2024a). Highlight + +![](images/083cd70a090e43a2861582ab50537e9775960f025a9ff9e923b85e63a3e03146.jpg) +(a) Tasks of TimePro +Figure 3: (a) The proposed temporal centric instruction-tuning dataset, TimePro. This dataset contains approximately 349K high-quality and strongly temporally correlated data. (b) The proposed Temporal Grounded Caption fine-tuning data paradigm. It effectively reducing the occurrence of hallucinations. We employ a 4-stage processing pipeline to ensure the quality of the generated data. + +![](images/aface00e068ff3e7c38e20a5aa09cc30b2c5cf813a2898118d5b09daed21e814.jpg) +(b) Details of Temporal Grounded Caption + +Detection identifies the most significant moments in a video based on a query (Lei et al., 2021a). Temporal Grounded Caption uses a brief scene title to output both the time period and fine-grained description for the scene. More detailed information about TimePro is available in the appendix B. It should be noted that Temporal Grounded Caption is our newly-designed task that can help our model to establish fine-grained correspondence between visual segment and linguistic description. + +# 3.3 TEMPORAL GROUNDED CAPTION TASK + +Some studies have shown that MLLMs often exhibit severe hallucinations when dealing with fine-grained perception tasks (Ji et al., 2023; Huang et al., 2023; Golkar et al., 2023). Since our VideoChat-T directly regresses the timestamps corresponding to the text queries using MLLMs, it is more susceptible to hallucinations compared to methods that use external expert models as decoders (Wu et al., 2024). By forcing the video MLLMs to predict the event occurrence time and simultaneously describe the visual content evidence, we attempt to anchor these queries to the relevant time segments within the video, rather than generating hallucinations originating from LLM itself. Based on this analysis, we design the Temporal Grounded Caption task. + +The top of Figure 3(b) illustrates the definition of Temporal Grounded Caption. We use a brief scene title of the video segment as the query, requiring the model to simultaneously respond with the precise start and end times of the video segment and provide a detailed description of that segment. While the content in the scene title may leak into the detailed caption response, most of the missing detailed information must be correctly described by attending the corresponding segment. Moreover, temporal grounding and detailed captioning can serve as regularization task for each other, preventing caption model from hallucinations from unrelated visual or linguistic contexts and helping grounding model to regress the timestamp more accurately. + +The process for collecting our Temporal Grounded Caption data is described at the bottom of Figure 3(b). In the first stage, we use a detailed caption dataset with timestamps as our data source. We remove data with target grounding time intervals that are too short or too long and ensure that the scenes in the video are as diverse as possible. In the second stage, we use a LLM to summarize scene titles. To prevent excessive semantics of video segments from being leaked from the query to the MLLM, we try to retain the minimal subset of key features that are sufficient to distinguish the video segments. In the third stage, to avoid overly similar or identical content appearing at different temporal intervals in the video, we perform similarity filtering on the data annotations. Based on the scene titles and video features, we calculate the similarity between different segments of the same video and remove data with excessively high similarity. In the fourth stage, we randomly sample the generated data and manually assess its quality. Based on human feedback, we refine the threshold parameters for data filtering used in the first three stages to yield the final Temporal Grounded Caption dataset. This new dataset plays an important role in our grounded tuning. + +
MethodLLM SizeCharades-STAQVHighlight
R@1(IOU=0.3)R@1(IOU=0.5)R@1(IOU=0.7)mAPHIT@1
MovieChat (Song et al., 2024a)7B8.82.91.311.716.1
GroundingGPT (Li et al., 2024c)7B-29.611.9--
VTimeLLM (Huang et al., 2024a)7B51.027.511.4--
HawkEye (Wang et al., 2024f)7B50.631.414.5--
TimeChat (Ren et al., 2024)7B-32.213.414.523.9
ChatVTG (Qu et al., 2024)7B52.733.015.9--
VideoChat2 (Li et al., 2024b)7B9.63.41.413.418.6
VideoChat-T7B69.9 (+60.3)48.7 (+45.3)24.0 (+22.6)26.5 (+13.1)54.1 (+35.5)
QD-DETR※ (FT) (Moon et al., 2023b)--57.332.638.964.2
UnLoc-L※ (FT) (Yan et al., 2023)--60.838.4--
HawkEye (FT) (Wang et al., 2024f)7B72.558.328.8--
Timechat (FT) (Ren et al., 2024)7B-46.723.721.737.9
VideoChat-T (FT)7B79.467.143.027.055.3
+ +Table 1: Performance of VideoChat-T on temporal grounding and highlight detection tasks. (FT) indicates the model fine-tuned on training set of the evaluation benchmark, with the respective text marked in gray. Classic supervised expert models are marked with $\text{※}$ . + +# 4 EXPERIMENTS + +# 4.1 IMPLEMENTATION DETAILS + +Built upon VideoChat2, we use UMT-L (Li et al., 2023c) and Mistral-7B (Jiang et al., 2023) as the video encoder and LLM, respectively. Except for the TAPE, all components are initialized from the pre-trained model of VideoChat2-Mistral. For the TAPE, we use random initialization, set the initial values of the final linear layer to zero, and freeze it during the first epoch of training. We set the frame count $T$ for each clip to 8, so the number of clips $K$ for a long video is equal to the total frame count divided by $T$ . We fine-tune the model for 3 epochs using the TimePro with 349K instances and a general QA task dataset with 82K instances. To ensure the stability of model training, we use 192-frame input for the first epoch. In the second and third epochs, we unfreeze the TAPE and adjust the model input to 128 frames. All experiments are conducted on 16 A100 GPUs. + +# 4.2 PERFORMANCE ON TEMPORAL GROUNDING + +We evaluate our method using two commonly used temporal localization tasks, i.e., Temporal Grounding and Highlight Detection. The performance comparison between VideoChat-T and other models is shown in Table 1. Our method's zero-shot performance surpasses all previous LLM-based methods and after fine-tuning, VideoChat-T even exceeds some classic expert models on the temporal grounding task. + +Temporal Grounding. This task aims to identify the start and end timestamps of the video content described by the query sentence, using Charades-STA as the evaluation benchmark. VideoChat-T achieves an accuracy of 48.7 in the R@1 (IOU=0.5) metric, significantly surpassing the previous state-of-the-art MLLM method, namely TimeChat, by 16.5 points. Additionally, it outperforms the fine-tuned version of TimeChat on the training set of the evaluation benchmark by $2.0\%$ . Furthermore, the performance of VideoChat-T fine-tuned on the evaluation benchmark training set reaches 67.1 R@1 at IoU=0.5, surpassing most state-of-the-art classic supervised expert models. + +Highlight Detection. We use QVHighlights as the evaluation benchmark. For a given query, this task requires outputting all timestamps of highlight moments and their corresponding saliency scores. Since there could be many sparse highlight moments in a video, this task requires finer-grained video understanding at the frame level. VideoChat-T achieves mAP of 26.5, significantly surpassing the previous MLLM method of TimeChat by 13.0 points, and also outperforms its finetuned version by 4.8 points. We observe that after fine-tuning on the corresponding training set, VideoChat-T shows almost no performance improvement. This may be due to the bottleneck in language representation of LLMs. The Highlight Detection task requires outputting a (timestamp, saliency score) pair for each highlight moment, and a video may contain dozens of discrete highlight moments, making it challenging for the model to correctly respond with dozens to hundreds of numbers in a language format. The precise numerical salience score output is very difficult for LLMs, and VideoChat-T can only respond well to queries with fewer highlight moments. Due to the + +
MethodLLM SizeLong VideoShort Video
EgoschemaVideoMMEMVbench
SubsetFullw/o subsw/o subs (Long)Avg
VideoAgent (Wang et al., 2024c)GPT-460.254.1---
VideoAgent (Fan et al., 2024)GPT-462.8----
TimeChat (Ren et al., 2024)7B-33.030.226.138.5
LLAMA-Vid (Li et al., 2023d)7B-38.5--41.9
MovieChat (Song et al., 2024a)7B-53.538.233.455.1
MovieChat+ (Song et al., 2024b)7B-56.4---
Chat-UniVi (Jin et al., 2024)7B--40.635.8-
VideoChat2 (Li et al., 2024b)7B63.654.439.533.260.4
VideoChat-T7B68.4 (+4.8)60.0 (+5.6)46.3 (+6.8)41.9 (+8.7)59.9 (-0.5)
+ +Table 2: Performance of VideoChat-T and other methods on video question answering tasks. By upgrading VideoChat2 with TimeSuite, VideoChat-T demonstrates significant improvements across multiple long video benchmarks. + +specific architectural design, classic supervised expert models have a natural advantage in handling such tasks, and VideoChat-T still has a performance gap compared to expert models. + +# 4.3 PERFORMANCE ON GENERAL VIDEO QA + +In addition to test the grounding ability of our VideoChat-T, we also want to verify its general video question answering performance. According to mainstream evaluation standards, we use both long video and short video QA to assess the general video understanding capability of VideoChat-T. Table 2 shows the performance of VideoChat-T on the video QA evaluation benchmarks. + +Long Video QA. We use Egoschema (Mangalam et al., 2023) and VideoMME (Fu et al., 2024) to evaluate the long video capabilities of VideoChat-T. In conjunction with our proposed architectural improvements, we incremental fine-tune VideoChat2 using only 432K data points. VideoChat-T demonstrates outstanding performance on the Egoschema, achieving an accuracy of $68.4\%$ on the test subset and $60.0\%$ on the entire test set. Compared to VideoChat2, VideoChat-T obtains improvements of $4.8\%$ and $5.6\%$ on the subset and the full test set, respectively. Additionally, for the VideoMME benchmark, VideoChat-T achieves an accuracy of $46.3\%$ by solely analyzing the visual content without using subtitles, representing a $6.8\%$ improvement over VideoChat2. On the long video data division of VideoMME, VideoChat-T achieves an accuracy of $41.9\%$ , which is an $8.7\%$ improvement compared to VideoChat2. The upgraded VideoChat-T demonstrated significant performance improvements on long video QA benchmarks. This indicates the potential of leveraging grounding-centric video tasks to enhance the temporal awareness of MLLMs, thereby further improving long video understanding capabilities. + +Short Video QA. We use MVBench (Li et al., 2024b) to evaluate the general short video understanding capabilities of VideoChat-T. VideoChat-T achieves an overall average accuracy of $59.9\%$ on MVBench, which is a $0.5\%$ decrease compared to VideoChat2. It is important to note that achieving minimal performance loss is a challenging task. According to previous experiences in the field of incremental learning (Van de Ven et al., 2022), models inevitably forget old knowledge while learning new knowledge. VideoChat2 is fine-tuned with 2M data, whereas VideoChat-T is fine-tuned with only 432K data, where 349K annotations are temporal grounding centric, resulting in only a $0.5\%$ accuracy loss. Previous temporal MLLMs like TimeChat (Ren et al., 2024), although achieving strong temporal localization capabilities, yield much weaker general video QA capability, with an accuracy of only $38.5\%$ on MVBench. This demonstrates that the design of our TimeSuite enhances new capabilities for the model while still preserving the original general video understanding capabilities. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2. + +# 4.4 QUALITATIVE ANALYSIS + +Figure 4 presents a qualitative comparison between our model and other methods. In the example on the left, VideoChat-T is capable of answering more complex long video reasoning questions. Our model accurately identifies the temporal location of the "light a cigarette" event and determines the correct key clue "the person in a white coat" based on the video content. This leads to the inference + +![](images/35e92a20134204d271d52add199b9668212da945d3ff96706a6ebda7d2bd99cc.jpg) +Figure 4: Qualitative comparison between VideoChat-T and other methods. VideoChat-T not only possesses temporal fine-grained perception capabilities but also can perform accurate long video reasoning. Green text indicates correct answers, while red text indicates inappropriate answers. + +![](images/97a395318cd819ea31c6f8a8f40d2462eee8825dbbf4fe4859d6dca452f7a506.jpg) + +
ModelEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
VideoChat-T (Ours)60.046.348.754.1
w/o TAPE59.145.947.150.4
w/o frz59.045.252.453.7
+ +Table 3: Performance results of the ablation study on the TAPE. Here, w/o adapter refers to removing our proposed TAPE, and w/o frz refers to not using the training method where the TAPE is frozen during the first epoch. + +
ModelEgoschema FullVideoMME w/o subsCharades-STA R@ 1 IOU=0.5QVHighlights Hit@1
VideoChat-T(Ours)60.046.348.754.1
r/w pooling59.844.840.347.3
r/w clustering59.545.039.840.1
w/o init57.443.442.053.9
+ +Table 4: Performance results of the ablation study on the Token Shuffle. Here, r/w refers to replacing Token Shuffle with the other component, and w/o init refers to removing the efficient initialization. + +that "playing the piano very fast and pressing the keys very hard" are the true reasons. The example on the right demonstrates our model's fine-grained perception ability. The appearance of "money in the briefcase" is very brief, and most models easily overlook this detail. Thanks to its strong fine-grained perception ability, our model precisely captures this visual content. + +# 4.5 ABLATION STUDY + +Role of TAPE. To verify the performance improvement brought by TAPE, ablation experiments were conducted. Table 3 lists the performance results of the conducted adapter-related ablation experiments. It can be observed that when the TAPE is removed, the model's performance on long video understanding and temporal grounding benchmarks decreases. TAPE can adaptively embed positional encodings into video tokens, and the absence of TAPE leads to a certain loss in temporal awareness capability. When we unfroze the TAPE in the first epoch, the performance improved on the temporal grounding task but declined on the long video QA task. This is because the TAPE is highly suited for tasks with strong temporal dependencies. If unfrozen too early, the model may become biased towards fitting temporal grounding tasks. Freezing the TAPE during the first epoch allows the model to first optimize and learn a relatively generalized feature representation, thereby balancing the performance across different tasks. + +Effectiveness of Token Shuffle. To verify the effectiveness of token shuffle, we conducted ablation experiments. Table 4 presents the results of these ablation experiments. We compared token shuffle with conventional methods such as pooling and clustering, and also observed the results after removing efficient initialization. When we replaced token shuffle with pooling or clustering methods, the model's performance declined. This is because the efficient initialization of the linear layer in token shuffle makes the initial values of the module equivalent to average pooling, which gradually optimizes better solutions during training. Therefore, our method is inherently superior to pooling. On the other hand, clustering often fails to maintain the spatial/temporal consistency of the video, leading to temporal confusion. When we removed the efficient initialization of the linear layer, the negative impact of random initialization severely damaged the model's original performance. + +Effect of TimePro. We conducted ablation studies to evaluate the effectiveness of the TimePro data components. As shown in Table 5, by gradually adding subsets of TimePro, we observed the model's performance changes across various temporal grounding-centric instruction-tuning data. As we pro + +
NormalTimeITTGCHDMTGRTLEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
56.642.68.024.4
57.843.632.225.2
58.344.039.133.9
59.844.941.943.8
60.045.145.848.3
60.046.348.754.1
+ +Table 5: Performance results of the ablation study on different components of TimePro. We use 82K normal training data as the baseline. TimeIT refers to the training data with five task types from Ren et al. (2024), TGC refers to Temporal Grounded Caption, HD refers to Highlight Detection, MTG refers to Multi-format Temporal Grounding, and RTL refers to Reasoning Temporal Localization. + +![](images/21867962bd38420a9c54f8c16c40cde0165a85826b166be4e57f7fc2c727e0f1.jpg) +Figure 5: Performance of VideoChat-T with varying input frame numbers. As the number of input frames increases, the performance of VideoChat-T shows an upward trend in both long video QA and temporal grounding tasks. Due to the over low temporal grounding performance of VideoChat2, its curve is omitted. + +![](images/312cd59dc4ad40f3e5a575aef589070bd88564e1070e8538a70ffe6a79829bd9.jpg) + +![](images/af3929d838db2730e5cd5fd741a457c44f1e3db2810c9a3c5d45fa025cdb22aa.jpg) + +gressively added subsets of TimePro, not only did the model's performance on temporal grounding tasks show a stable and significant improvement, but we also observed a noticeable upward trend in performance on long video benchmarks. This to some extent corroborates that temporal grounding centric tasks have a positive impact on long video understanding. + +Impact of frames. To investigate the impact of input frame count on model performance, we conducted an ablation study. Figure 5 illustrates the scalability of our model's performance with respect to input frame count. VideoChat-T demonstrates good stability as the input frame count varies, and its performance in long video QA and temporal grounding tasks improves with an increase in frame count. In contrast, the baseline model, VideoChat2, exhibited catastrophic performance degradation when the frame count was significantly increased. As the input frame count increases, the number of visual encoding tokens grows linearly. Excessive visual token input imposes an additional computational burden on the temporal modeling of the LLM. TimeSuite mitigates this by employing Token Shuffle to reduce the number of tokens, ensuring the stable operation of the model. + +# 5 CONCLUSION + +In this paper, we have introduced TimeSuite, a collection of new designs from perspectives of efficient architecture, high-quality data, and new instruction tuning task, to achieve long video understanding by fine-tuning short video MLLMs with temporal grounding-centric data. We address the computational challenges of processing long videos by introducing token shuffle to compress visual tokens. We also propose the TAPE for adaptive position encoding, enhancing the temporal awareness of visual representation. Additionally, our designed Temporal Grounded Caption training task ensures MLLMs to build correspondence between grounded segments and detailed caption, while the TimePro dataset provides comprehensive instruction tuning data for learning more effective temporal perception capability. Experimental results demonstrate that VideoChat-T significantly improves long video understanding, with notable performance gains on Egoschema and VideoMME. Furthermore, VideoChat-T exhibits strong zero-shot temporal grounding capabilities, significantly outperforming the previous MLLMs on temporal grounding. Overall, our TimeSuite provides effective designs for short MLLMs to enhance their performance on temporal grounding and long video QA. We hope our TimeSuite could yield some insights on designing long video MLLMs. + +# ACKNOWLEDGEMENT + +This work is supported by the National Key R&D Program of China (No. 2022ZD0160900), the Fundamental Research Funds for the Central Universities (No. 020214380119), Jiangsu Frontier Technology Research and Development Program (No. BF2024076), and the Collaborative Innovation Center of Novel Software Technology and Industrialization. + +# REFERENCES + +Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pp. 5803-5812, 2017. +Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. +Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. +Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023. +Xiangxiang Chu, Zhi Tian, Bo Zhang, Xinlong Wang, and Chunhua Shen. Conditional positional encodings for vision transformers. arXiv preprint arXiv:2102.10882, 2021. +Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. arXiv preprint arXiv:2403.11481, 2024. +Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. +Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pp. 5267-5275, 2017. +Siavash Golkar, Mariel Pettee, Michael Eickenberg, Alberto Bietti, Miles Cranmer, Geraud Krawezik, Francois Lanusse, Michael McCabe, Ruben Ohana, Liam Parker, et al. xval: A continuous number encoding for large language models. arXiv preprint arXiv:2310.02989, 2023. +Michael Gygli, Helmut Grabner, Hayko Riemenschneider, and Luc Van Gool. Creating summaries from user videos. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pp. 505-520. Springer, 2014. +Bin Huang, Xin Wang, Hong Chen, Zihan Song, and Wenwu Zhu. Vtimellm: Empower llm to grasp video moments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14271-14280, 2024a. +De-An Huang, Shijia Liao, Subhashree Radhakrishnan, Hongxu Yin, Pavlo Molchanov, Zhiding Yu, and Jan Kautz. Lita: Language instructed temporal-localization assistant. arXiv preprint arXiv:2403.19046, 2024b. +Gabriel Huang, Bo Pang, Zhenhai Zhu, Clara Rivera, and Radu Soricut. Multimodal pretraining for dense video captioning. arXiv preprint arXiv:2011.11760, 2020. +Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. arXiv preprint arXiv:2311.05232, 2023. + +Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Ye Jin Bang, Andrea Madotto, and Pascale Fung. Survey of hallucination in natural language generation. ACM Computing Surveys, 55(12):1-38, 2023. +Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023. +Peng Jin, Ryuichi Takanobu, Wancai Zhang, Xiaochun Cao, and Li Yuan. Chat-univi: Unified visual representation empowers large language models with image and video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13700-13710, 2024. +Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pp. 706-715, 2017. +Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021a. +Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021b. +Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a. +Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023a. +KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023b. +Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19948-19960, 2023c. +Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 22195-22206, 2024b. +Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. arXiv preprint arXiv:2311.17043, 2023d. +Zhaowei Li, Qi Xu, Dong Zhang, Hang Song, Yiqing Cai, Qi Qi, Ran Zhou, Junting Pan, Zefeng Li, Van Tu Vu, et al. Groundinggpt: Language enhanced multi-modal grounding model. CoRR, 2024c. +Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023a. +Kevin Qinghong Lin, Pengchuan Zhang, Joya Chen, Shraman Pramanick, Difei Gao, Alex Jinpeng Wang, Rui Yan, and Mike Zheng Shou. Univtg: Towards unified video-language temporal grounding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2794-2804, 2023b. +Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a. +Ruyang Liu, Chen Li, Haoran Tang, Yixiao Ge, Ying Shan, and Ge Li. St-llm: Large language models are effective temporal learners. arXiv preprint arXiv:2404.00308, 2024b. + +Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023. +Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2023. +WonJun Moon, Sangeek Hyun, SuBeen Lee, and Jae-Pil Heo. Correlation-guided query-dependency calibration in video representation learning for temporal grounding. arXiv preprint arXiv:2311.08835, 2023a. +WonJun Moon, Sangeek Hyun, SangUk Park, Dongchan Park, and Jae-Pil Heo. Query-dependent video representation for moment retrieval and highlight detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23023-23033, 2023b. +Andreea-Maria Oncescu, Joao F Henriques, Yang Liu, Andrew Zisserman, and Samuel Albanie. Queryd: A video dataset with high-quality text and audio narrations. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2265-2269. IEEE, 2021. +Mengxue Qu, Xiaodong Chen, Wu Liu, Alicia Li, and Yao Zhao. Chatvtg: Video temporal grounding via chat with video dialogue large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1847-1856, 2024. +Shuhuai Ren, Linli Yao, Shicheng Li, Xu Sun, and Lu Hou. Timechat: A time-sensitive multimodal large language model for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14313-14323, 2024. +Share. Sharegemini: Scaling up video caption data for multimodal large language models, June 2024. URL https://github.com/Share14/ShareGemini. +Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18221-18232, 2024a. +Enxin Song, Wenhao Chai, Tian Ye, Jenq-Neng Hwang, Xi Li, and Gaoang Wang. Moviechat+: Question-aware sparse memory for long video question answering. arXiv preprint arXiv:2404.17176, 2024b. +Yale Song, Jordi Vallmitjana, Amanda Stent, and Alejandro Jaime. Tvsum: Summarizing web videos using titles. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5179-5187, 2015. +Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1207-1216, 2019. +Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. +Gido M Van de Ven, Tinne Tuytelaars, and Andreas S Tolias. Three types of incremental learning. Nature Machine Intelligence, 4(12):1185-1197, 2022. +Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaou Tang, and Luc Van Gool. Temporal segment networks for action recognition in videos. IEEE Trans. Pattern Anal. Mach. Intell., 41(11):2740-2755, 2019. +Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024a. + +Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36, 2024b. +Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024c. +Xidong Wang, Dingjie Song, Shunian Chen, Chen Zhang, and Benyou Wang. Longllava: Scaling multi-modal llms to 1000 images efficiently via hybrid architecture. arXiv preprint arXiv:2409.02889, 2024d. +Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Internvideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024e. +Yueqian Wang, Xiaojun Meng, Jianxin Liang, Yuxuan Wang, Qun Liu, and Dongyan Zhao. Hawkeye: Training video-text llms for grounding text in videos. arXiv preprint arXiv:2403.10228, 2024f. +Yuxuan Wang, Yueqian Wang, Pengfei Wu, Jianxin Liang, Dongyan Zhao, Yang Liu, and Zilong Zheng. Efficient temporal extrapolation of multimodal large language models with temporal grounding bridge. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 9972-9987, 2024g. +Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024h. +Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Wenhai Wang, Zhe Chen, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionllm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. arXiv preprint arXiv:2406.08394, 2024. +Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13204-13214, 2024. +Fuzhao Xue, Yukang Chen, Dacheng Li, Qinghao Hu, Ligeng Zhu, Xiuyu Li, Yunhao Fang, Haotian Tang, Shang Yang, Zhijian Liu, et al. Longvila: Scaling long-context visual language models for long videos. arXiv preprint arXiv:2408.10188, 2024. +Shen Yan, Xuehan Xiong, Arsha Nagrani, Anurag Arnab, Zhonghao Wang, Weina Ge, David Ross, and Cordelia Schmid. Unloc: A unified framework for video localization tasks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 13623-13633, 2023. +En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023. +Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23056-23065, 2023. +Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16375-16387, 2022. +Xiangyu Zeng, Mingzhu Xu, Yijun Hu, Haoyu Tang, Yupeng Hu, and Liqiang Nie. Adaptive edge-aware semantic interaction network for salient object detection in optical remote sensing images. IEEE Transactions on Geoscience and Remote Sensing, 2023. +Yingsen Zeng, Yujie Zhong, Chengjian Feng, and Lin Ma. Unimd: Towards unifying moment retrieval and temporal action detection. arXiv preprint arXiv:2404.04933, 2024. + +Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023. + +Haoji Zhang, Yiqin Wang, Yansong Tang, Yong Liu, Jiashi Feng, Jifeng Dai, and Xiaojie Jin. Flash-vstream: Memory-based real-time understanding for long video streams. arXiv preprint arXiv:2406.08085, 2024a. + +Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024b. + +Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. + +Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018. + +# A IMPLEMENTATION OF TAPE + +# Algorithm 1 PyTorch snippet of TAPE. + +# Initialize related package + +```python +class TemporalAdapter(nnModule): def __init__(self, merge_len, clip_num, input_dim, mid_dim, output_dim, sample_rate): super().__init_() self.AvgPool = nn.AvgPool1d(merge_len, stride = merge_len) self.upsample = nn.UpSample(scale_factor = sample_rate) self_linear_input = nn.Linear(input_dim, mid_dim) self_linear_output = nn.Linear(mid_dim, output_dim) nn.init_constant_(self_linear_output_weight, 0) nn.init_constant_(self_linear_output.bias, 0) self.Downsample_Depthwise_Separable_Conv1 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Downsample_Depthwise_Separable_Conv2 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.fc = nnSequential (nn.Conv1d(mid_dim, mid_dim, clip_num+1, stride=1, padding=clip_num//2), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv2 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv1 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) def forward(self, input_tokens): time_ad = self(linear_input(input_tokens).transpose(1, 2) time_ad1 = self.AvgPool(time_ad) time_ad2 = self.Downsample_Depthwise_Separable_Conv1(time_adl) time_ad3 = self.Downsample_Depthwise_Separable_Conv2(time_ad2) time_ad3 = self.fc(time_ad3) time_ad2 = self.upsample(time_ad3) + time_ad2 time_ad2 = self.Conv2(time_ad2) time_ad1 = self.upsample(time_ad2) + time_ad1 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad2 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad1 return time_ad_out +``` + +Algorithm 1 details the implementation process of TAPE in code form. Specifically, the long video token sequence input_tokens is first compressed in the channel dimension by a linear layer to obtain time_ad, and the sequence length is compressed through a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different + +time resolutions, namely time_ad1, time_ad2, and time_ad3. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence time_ad3, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence. Then, we progressively upsample the temporal feature sequences from short to long, using residual connections to preserve temporal features at different scales. Finally, the temporal feature sequence time_ad_out is restored to the same length as the video features after token shuffling and aligned in the channel dimension through a linear layer. + +# B INSTRUCTION-TUNING DATA + +We fine-tuned VideoChat-T using 432K data, which includes 349K instances from TimePro and 82K instances from normal data. All videos were sampled from existing open-source video datasets, with specific information about the relevant data provided in Table 6. + +
SetTaskCourseInstance Num
TimeProTemporal Video GroundingDideMo32,944
QueryD14,602
HiREST-grounding459
Dense Video CaptioningActivityNet-Captions10,009
ViTT5,086
Youcook28,700
Video SummarizationTVSum50
SumMe25
Step Localization and CaptioningCOIN9,026
HiREST-step459
Transcribed Speech GenerationYT-Temporal31,190
Reasoning Temporal LocalizationActivityNet-RTL33,557
Multi-format Temporal GroundingInternVid-VTime100,000
Highlight DetectionActivityNet-HL10,340
Temporal Grounded CaptionCosMo-TGC93,118
NormalConversationVideoChatGPT13,303
VideoChat13,884
Video QAEgoQA7,813
MovieChat-QA808
ReasoningSTAR45,731
CaptionMovieChat-Caption808
+ +Table 6: The complete instruction fine-tuning data used for training. We utilized a total of approximately 432K data points, which can be divided into 349K instances of TimePro and 82K instances of regular video data, covering 13 tasks across 21 datasets. + +We evaluate the quality of the data from three perspectives: diversity, length, and difficulty. We strive to include different datasets for various tasks, and the distribution of videos in the datasets is as broad as possible. The length of the videos should be controlled within an appropriate range, as excessively long or short videos may pose challenges for training. Each query should clearly describe the video content of the target time segment and avoid corresponding to multiple time segments in the video. Based on these principles, we have screened and integrated existing high-quality datasets, which significantly contribute to enhancing the model's temporal awareness capabilities. + +TimePro encompasses a series of open-source temporal grounding datasets that we have integrated, cleaned, and refined, such as TimeIT (Ren et al., 2024), ANet-RTL (Huang et al., 2024b), and InternVid-VTime (Huang et al., 2024a). These high-quality open-source datasets have been experimentally validated by us. We also added two new self-made datasets, ANet-HL and CosMo-TGC. + +Temporal Video Grounding. This task involves providing a natural language query and requires outputting the corresponding video's start and end times. The datasets include DiDeMo (Anne Hen + +dricks et al., 2017), QuerYD (Oncescu et al., 2021), and HiREST-grounding (Zala et al., 2023), aiming to achieve precise temporal localization during user interaction with natural language. +Dense Video Captioning. This task requires the model to detect a series of events occurring in a given video and output the corresponding timestamps and coarse-grained descriptions. The datasets for this part include ActivityNet-Caption (Krishna et al., 2017), ViTT Huang et al. (2020), and YouCook2 (Zhou et al., 2018), which help the model learn the temporal relationships between different events within the video. +Video Summarization. The goal of this task is not to summarize at the semantic level of natural language, but to determine a set of compressed frames or clips in the form of timestamps, representing the most informative content in a given video. Our datasets include TVSum (Song et al., 2015) and SumMe (Gygli et al., 2014), which effectively combine the model's temporal perception capabilities with its semantic content inference abilities. +Step Localization and Captioning. This task differs from dense video captioning as it is designed to segment and describe the important steps within a long video. We have integrated two datasets, COIN (Tang et al., 2019) and HiREST-step (Zala et al., 2023), which can help the model learn the procedural temporal logic relationships of different steps within a single event. +Transcribed Speech Generation. The purpose of this task is to predict speech content and its corresponding start and end timestamps based on visual signals in the video. Including the YT-Temporal (Zellers et al., 2022) dataset, this task can be viewed as a weakly supervised event localization and description task. +Reasoning Temporal Localization. The answers to the questions in this task include both timestamps and explanations. We used the ANet-RTL (Huang et al., 2024b) dataset as training data for this task. By combining temporal localization and reasoning, we can more specifically enhance the model's temporal perception capabilities. +Multi-format Temporal Grounding. This task includes both single-turn and multi-turn dialogues, with a variety of question types. We use the InternVid-VTime (Huang et al., 2024a) dataset for training this task. The broader range of task types and more diverse output formats can effectively enhance the model's temporal generalization capabilities. +Highlight Detection. Unlike video summarization, this task identifies only the most salient moments of a video in response to a natural language query, without covering the entire scope of the original video (Lei et al., 2021a). We used a custom dataset, ANet-HL, derived from temporal localization data. We extract video segments between the start and end times of the target's appearance and use CLIP to calculate the similarity between each frame's scene and the target. This is converted into discrete saliency levels ranging from 1 to 5, at intervals of 0.5. This task effectively enhances the model's temporal perception capabilities for specific events. +Temporal Grounded Caption. This task involves using scene titles as queries, requiring the model to output both the time segments when the scenes appear and the fine-grained subtitles for those segments. We used our custom dataset, CosMo-TGC. This task format, which combines temporal localization and semantic understanding, can effectively prevent large language models from focusing on irrelevant video segments, thereby improving the quality of the model's responses to questions. +We also used normal data comprising four tasks and six different data sources. These general data help prevent the model from overfitting to temporal grounding-related tasks during training, thereby preserving the model's original capabilities. + +# C COMPUTATIONAL EFFICIENCY + +By applying Token Shuffle, we further reduced the computational cost of VideoChat-T, giving it a significant computational advantage over high-performance models like LLaVA-OneVision (Li et al., 2024a) and Qwen2-VL (Wang et al., 2024a). Under the same settings, VideoChat-T uses only 3 tokens per frame, with flops consumption at just $5.1\%$ of LLaVA-OneVision. Its inference time on single A100 is only 0.63 seconds, reaching real-time response levels, making it highly suitable for applications requiring rapid response, such as online video understanding. + +
MethodToken num per frameflops 128 framesInference Time 128f & on single A100 GPUCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME Vision
Qwen2-VL (Wang et al., 2024a)138929.8 TOut Of Memory15.013.067.066.763.3
LLaVA-OneVision (Li et al., 2024a)196693.7 T4.95 s7.314.9856.760.158.2
VideoChat-T (Ours)335.5 T0.63 s48.726.559.960.046.3
+ +In terms of performance, VideoChat-T significantly outperforms LLaVA-OneVision in temporal grounding tasks. It has a slight advantage on MVBench; both perform comparably on Egoschema; but VideoChat-T performs worse on VideoMME. Given the substantial savings in computational resources with VideoChat-T, we consider the disadvantages on some datasets to be acceptable. + +Moreover, our model's ability to maintain reasonable performance under high compression ratios suggests that the token embedding spaces of contemporary models may be characterized by considerable feature redundancy. This observation presents a promising avenue for future research, as efficient techniques for compressing or discarding redundant features could substantially reduce computational costs without sacrificing model performance, enabling longer context reasoning. + +# D DETAILS OF HYPERPARAMETERS + +Table 7: Comparison of the computational efficiency and performance of VideoChat-T with other methods. Our approach achieves relatively impressive performance with extremely low computational cost. + +
configepoch1epoch2&3
input frame192128
max text length15361024
freeze TAPETrueFalse
learning rate2e-51.5e-5
input resolution224
clip frame8
merge lenth4
QFormer token (per clip)96
lora rank16
lora alpha32
lora dropout0.1
batch size (per GPU)2
optimizerAdamW
optimizer momentum0.9, 0.999
weight decay0.02
learning rate schedulecosine decay
+ +Table 8 lists the hyperparameters used during different epochs of the training process. In the first epoch, we used a larger number of input frames and froze the TAPE. At the beginning of the second epoch, we unfroze the TAPE and fixed the model's input frames to 128. Following the settings of VideoChat2, we integrated the lora module into the LLM and applied flash attention to accelerate the training process. + +# E FULL PERFORMANCES + +Table 8: Hyper-parameter Settings During the Training Process of VideoChat-T. + +
ModelLLMAvgASAPAAFAUAOEOIOSMDALSTACMCMASCFPCOENERCI
VideoChatGPT (Maaz et al., 2023)7B32.723.526.062.022.526.554.028.040.023.020.031.030.525.539.548.529.033.029.526.035.5
VideoLLaMA (Zhang et al., 2023)7B34.127.525.551.029.039.048.040.538.022.522.543.034.022.532.545.532.540.030.021.037.0
VideoChat (Li et al., 2023b)7B35.533.526.556.033.540.553.040.530.025.527.048.535.020.542.546.026.541.023.523.536.0
ST-LLM (Liu et al., 2024b)7B54.966.053.584.044.058.580.573.538.542.531.086.536.556.578.543.044.546.534.541.558.5
VideoChat2 (Li et al., 2024b)7B60.475.558.083.550.560.587.574.545.047.544.082.537.064.587.551.066.547.035.037.072.5
VideoChat-T7B59.983.568.580.544.061.071.084.035.548.056.587.046.056.578.049.559.046.037.040.066.5
+ +Table 9: The full performance of VideoChat-T on MVBench. VideoChat-T still demonstrates strong performance, effectively prevents catastrophic forgetting caused by incremental fine-tuning. + +The performance of VideoChat-T on MVBench is shown in Table 9. Compared to VideoChat2, VideoChat-T only experienced a $0.5\%$ accuracy loss. This indicates that our method effectively preserves the capabilities of the base model, preventing catastrophic forgetting caused by incremental fine-tuning. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2. For the Action Localization (AL) task, which requires the model to determine the coarse-grained temporal position of events, the test accuracy improved from $44.0\%$ to $56.5\%$ . This indirectly confirms that our method significantly enhances the model's temporal awareness capabilities. + +
ModelLLM sizeOverall (%)Short Video (%)Medium Video (%)Long Video (%)
w/o subsw subsw/o subsw subsw/o subsw subsw/o subsw subs
ST-LLM (Liu et al., 2024b)7B37.942.345.748.436.841.431.336.9
Video-LLaVA (Lin et al., 2023a)7B39.941.645.346.138.040.736.238.1
ShareGPT4Video (Chen et al., 2024)8B39.943.648.353.636.339.335.037.9
Chat-UniVi-v1.5 (Jin et al., 2024)7B40.645.945.751.240.344.635.841.8
Qwen-VL-Chat (Bai et al., 2023)7B41.141.946.947.338.740.437.837.9
ShareGemini (Share, 2024)7B43.247.949.152.841.347.339.143.4
VideoChat2 (Li et al., 2024b)7B39.543.848.352.837.039.433.239.2
VideoChat-T7B46.355.853.359.943.854.041.953.4
+ +Table 10: The full performance of VideoChat-T on VideoMME. VideoChat-T achieved significant performance improvements, particularly in the long video subset. + +The overall performance of our model on VideoMME is presented in Table 10. VideoChat-T achieved significant improvements on both evaluation benchmarks of VideoMME, which include watching videos only and videos with subtitles. The improvements are particularly notable in the long video subset. + +# F EXTRA ABLATION + +# F.1 DOMAIN CORRELATION OF DATA + +
ModelCharades-STA(R@1 IOU=0.5)MVBench(avg)
VideoChat-T48.759.9
w/o STAR47.5 (-1.2)59.4 (-0.5)
+ +Table 11: The performance changes of the model after removing STAR. Although the video sources of STAR may have some domain correlation with those of Charades-STA and MVBench, the performance of our model is minimally affected by STAR. + +We found that the video sources in the STAR dataset might have some domain correlation with the video sources in MVBench and Charades-STA. Therefore, we removed STAR from the training set while keeping other training settings consistent with the original. The performance on benchmarks where the video sources might have domain correlation is shown in Table 11. The model's accuracy on Charades-STA (R@1 IOU=0.5) decreased by $1.2\%$ , and the average accuracy on MVBench decreased by $0.5\%$ . This indicates that the domain correlation of video sources did not significantly impact performance for our model. Notably, after removing STAR, our normal data volume was reduced to approximately 36K. This implies that, with sufficiently parameter-efficient initialization and appropriate training strategies, using only a small amount of high-quality normal data is sufficient to retain the model's original capabilities. + +# F.2 DEeper INVESTIGATION OF THE PERFORMANCE DROP ON MVBENCH + +We conducted a deeper investigation into the performance decline on MVBench. Through additional ablation experiments (as shown in Tabel 12), we identified two main factors contributing to the performance drop. + +Architectural Discrepancy: The original VideoChat2 model was designed to process only 16 frames, leading to a mismatch in the learned feature distribution compared to the architecture of VideoChatT. As shown in the first two rows of the table, increasing the input frame number for VideoChat2 + +
Methodpost ft datadata sizeframe numtoken num (per frame)MVBatch(AVG)
VideoChat2--161260.4
VideoChat2--1281242.1
VideoChat-T (Common_Init)--128325.3
VideoChat-T (Ours)--128348.6
VideoChat-T (Ours)TimePro+Normal (Ours)0.43M128359.9
VideoChat-T (Ours)TimePro+FullVideoChat22M128362.9
+ +Table 12: Performance of VideoChat2 and VideoChat-T on MVBench under different settings. + +resulted in a significant performance drop (from 60.4 to 42.1). When initializing VideoChat-T with VideoChat2, performance was close to random (25.3) due to the newly introduced randomly initialized layers. By applying efficient initialization to these new layers, we partially recovered the original capabilities of the model, bringing the MVBench performance of the un-trained VideoChat-T back to 48.6, representing an improvement of 6.5 compared to the 128-frame VideoChat2. After further fine-tuning, the short-video processing capability of VideoChat-T improved significantly, reaching 59.9. + +Fine-tuning Data Discrepancy: We fine-tuned VideoChat-T using only 432K data, significantly less than the 2M non-grounded regular data used for training VideoChat2. The fine-tuning data for VideoChat2 primarily consisted of short videos of around ten seconds, which closely matched the length distribution of the MVBench evaluation videos, playing a crucial role in improving MVBench performance. To validate our hypothesis, we conducted additional experiments by training our VideoChat-T model using the TimePro and full VideoChat2 training data. It can be observed that VideoChat-T showed a slight improvement in performance on the MVBench dataset, achieving an accuracy of 62.9, which is an increase of 2.5 compared to the original VideoChat2. + +Based on the above, we can conclude the fundamental reasons affecting the model's foundational generalization capabilities. When a model undergoes adjustments, the learned original distribution may not perfectly match the new architecture, making the efficient initialization of new layers crucial. The features learned from the original dataset might be forgotten due to changes in various parameters. Utilizing a more comprehensive and diverse dataset for fine-tuning can restore and even further enhance performance. + +F.3 ASSOCIATION BETWEEN PERFORMANCE AND MODEL DESIGN + +
MethodFT DataCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME w/o subs
TimeChatTimeIT+Valley32.214.538.533.030.2
TimeChatTimePro+Normal34.216.341.638.933.4
VideoChat-TTimePro+Normal48.726.559.960.046.3
+ +Table 13: Comparison of other model architectures trained on our dataset with our method, demonstrating the impact of the overall model structure design. + +To eliminate the influence of training data and auxiliary tasks, and to more clearly evaluate the association between performance and model design, we fine-tuned TimeChat using the full set of fine-tuning data and auxiliary tasks from VideoChat-T. Table 13 presents the performance of TimeChat, fine-tuned with our data, across five datasets. It can be observed that TimeChat, fine-tuned with our data, shows improvements across all benchmarks. However, its performance still lags significantly behind VideoChat-T. This indicates that an efficient fine-tuning architecture design and high-quality, diverse datasets are both essential and complementary. + +# F.4 VALIDATION OF TRANSFERABILITY + +To verify the robustness of our TimeSuite for other MLLMs, we transferred our method to Llava-OneVision (Li et al., 2024a). Table 14 shows the performance changes of Llava-OneVision after applying our TimeSuite. It can be seen that when we apply the full set of methods in TimeSuite to Llava-OneVision, the model's performance on two different long-video evaluation benchmarks + +
MethodCharades-STA IOU0.5QVHighlight mapVideoMME w/o subsMLVU AvgMVBench Avg
Llava-OneVision (baseline)7.315.058.264.756.7
Llava-OneVision-T (Ours)42.521.761.469.456.1
+ +improves (+3.2 on VideoMME and +4.7 on MLVU), effectively demonstrating the robustness of our TimeSuite for different MLLMs. + +# F.5 EXPLORATIONS OF DATA CONFIGRATIONS OF TIMEPRO + +Table 14: Performance comparison of TimeSuite migration to other MLLMs. The application of our method shows a certain improvement in long video comprehension, demonstrating the transferability of our approach. + +
MethodMVBench AvgEgoschema FullVideoMME w/o subsCharades-STA IOU=0.5QVHighlight mAP
TimePro615K+Normal82K (old version)60.061.046.345.425.7
TimePro349K+Normal82K (Ours)59.960.046.348.726.5
+ +Table 15: Comparison of different versions of our proposed TimePro. More data does not necessarily lead to higher overall performance, highlighting the importance of data quality. + +In the early version of TimePro, we employed datasets comprising 309K Multi-format Temporal Grounding instances, 150K Temporal Grounded Caption instances and other data. Through extensive experimentation (as shown in Tabel 15), we discovered that removing low-quality data while retaining high-quality instances could significantly reduce training time without compromising performance. Consequently, we pruned these two part datasets to 100K and 93K instances, respectively. The data distribution presented in the paper represents the optimized and relatively balanced configuration we arrived at. + +# G DISCUSSION + +# G.1 CAN THE OVERALL PERFORMANCE OF MLLMS BE ENHANCED BY CONTINUOUSLY INTEGRATING EXPERT TASKS? + +By appropriately fine-tuning the Multimodal Large Language Model (MLLM), we have developed a general MLLM with powerful zero-shot temporal grounding capabilities. Its performance, after fine-tuning on the training set of evaluation benchmarks, can rival the current state-of-the-art supervised expert models. Based on these results, we can boldly speculate whether it is possible to internalize the capabilities of expert models such as spatial grounding, tracking and detection (Zeng et al., 2023) into the MLLM itself, without using any external expert decoders, to enhance the comprehensive understanding performance of the MLLM and achieve a unified generalist MLLM for multiple tasks. + +Merlin (Yu et al., 2023) and VisionLLM (Wang et al., 2024b) have already attempted something similar, but its performance is limited by the reasoning capabilities and language representation bottlenecks of the LLM. There is still a significant gap between its performance and that of expert models for various tasks. We observed similar phenomena in our experiments. The temporal grounding task only requires outputting two timestamps, and the task format is relatively simple, so our model achieved good results. However, the highlight detection task requires outputting multiple discrete timestamps and their corresponding saliency scores. The model needs to accurately predict dozens of numbers in language form to answer the question correctly. Our model performed well only on data with fewer timestamps. Therefore, how to simplify the complex output format of expert tasks into the language representation of LLMs, or to design special processing procedures to simplify complex expert tasks, is a question worth exploring. + +Moreover, designing diverse data formats is also crucial for enhancing the expert capabilities of MLLMs. Compared to classic expert models, MLLMs have a natural advantage in task type diversity and can enhance their performance through various different variants tasks of a single capability. + +For temporal grounding tasks, we found that enhancing task diversity has a significant effect on improving the model's temporal perception generalization ability. We can boldly speculate that if there are sufficiently diverse training data task types, most tasks with relatively simple output formats can achieve results comparable to expert models through appropriate instruction fine-tuning. + +Through the integration of diverse expert tasks and the optimization of language representations, MLLMs can achieve substantial improvements in their overall capabilities. This allows them to effectively comprehend and address complex tasks, rivaling or even exceeding the performance of specialized expert models within specific domains. Looking ahead, MLLMs have the potential to evolve into highly versatile AI models, transcending traditional conversational and QA capabilities. They will be equipped to handle a wide range of complex expert tasks across various domains, such as vision, language, and reasoning. + +# G.2 WHY DOES TEMPORAL GROUNDING DATA LEAD TO ACCURACY LOSS IN SHORT-TERM VIDEOS? + +We conducted ablation experiments using different combinations of temporal grounding data and regular data. The accuracy of VideoChat-T on MVBench after fine-tuning with various data combinations is shown in Table 16. + +
FT DataMVBench (AVG)
TimeIT54.7
TimeIT+Normal55.3
Normal56.1
TimePro57.4
TimePro+Normal (Ours)59.9
+ +Table 16: Performance VideoChat-T on MVBench under different fine-tuning data settings. + +The diversity of grounding data formats in the past has often been limited, which can lead to overfitting on Temporal Grounding tasks and cause the model to lose its general question-answering capability. We compared the TimeIT dataset proposed in TimeChat (Ren et al., 2024) with our TimePro dataset on MVBench. As shown in the Table 16, fine-tuning with only TimeIT resulted in the lowest accuracy, and the combined use of TimeIT+Normal also performed slightly worse than using Normal alone. This indicates that monotonous grounding data indeed damages the model's original performance (as shown in Figure 1 at the beginning of the paper, TimeChat loses some of its general question-answering capability after fine-tuning, where it outputs localization times for general questions). + +In contrast, our TimePro dataset includes diverse data, encompassing 9 different task types from 15 datasets, which helps mitigate the generalization loss caused by homogeneous grounding data types. Additionally, our dataset integrates Grounding with various general tasks. For instance, Grounded Caption requires detailed descriptions of corresponding video segments, while Reasoning Temporal Localization demands the model to reason about questions. This approach significantly enhances the model's generalization ability and minimizes the impact on its original capability (e.g., short video accuracy). As demonstrated in the Table 16, the performance of using only TimePro exceeds that of using Normal alone, and the combined use of TimePro and Normal far surpasses all other combinations. This also confirms that our TimePro effectively preserves the model's original performance. + +Overall, using a single type of expert task training data can easily lead to model overfitting, resulting in significant loss of the model's original capabilities. To preserve the model's foundational generalization abilities, it is essential to use diversified training data. Additionally, incorporating data of various types and distributions, such as text, images, and videos, can further enhance the model's generalization capabilities. + +# G.3 COULD TRAINING THE MODEL ON BOTH TEMPORAL AND NON-TEMPORAL GROUNDING DATA MITIGATE PERFORMANCE LOSS IN SHORT-TERM VIDEOS? + +To address this question, we conducted additional ablation experiments. By training VideoChat-T with different combinations of temporal and non-temporal grounding data, we were able to clearly observe the effects of both types of data on the model's performance. The results of the experiments are shown in the Table 17. + +
FT DataMVBench AvgVideoMME w/o subsCharades-STA R1@0.5
Normal56.142.68.0
TimePro57.446.045.6
TimePro+Normal (Ours)59.946.348.7
+ +Table 17: Performance comparison of VideoChat-T using different combinations of temporal grounding and non-temporal grounding data. + +It can be observed that the combined use of TimePro+Normal for VideoChat-T achieves the highest performance in short video QA, long video QA, and temporal grounding tasks. This not only demonstrates that using both temporal grounding and non-temporal grounding data can reduce performance loss in short videos, but also reveals that the effects of temporal and non-temporal grounding data are complementary across various tasks. The distinct differences between temporal grounding and non-temporal grounding tasks can respectively compensate for the model's shortcomings in different task perspectives and feature distributions. The simultaneous use of both types of data can effectively enhance the model's overall capabilities. + +# H CASE STUDY + +# H.1 MORE QUALITATIVE ANALYSIS + +To further qualitatively analyze our model, we supplemented it with three types of examples. These examples are about long video QA, short video QA, and captioning tasks, all of which include temporal grounding. + +More qualitative comparisons about long video QA are shown in Figure 6. VideoChat-T effectively handles various questions across different domains. By better perceiving the temporal relationships of different events occurring in long videos, it can more accurately and deeply understand the detailed content of the entire video. + +More qualitative comparisons about short video QA are shown in Figure 7. VideoChat-T effectively retains the original capabilities of the base model. Through parameter-efficient initialization methods and appropriate training strategies, we minimize the damage to the base model's capabilities caused by new architectures and data. + +More qualitative comparisons about captioning are shown in Figure 8. Although VideoChat2 describes more local details in some scenarios compared to VideoChat-T, VideoChat-T focuses more on a series of temporal events, which aligns better with how humans typically describe videos. + +# H.2 SHORTCOMINGS + +We also conducted a qualitative analysis of the shortcomings of VideoChat-T through examples. As shown in Figure 9, VideoChat-T performs poorly on examples with complex logic. In the left example, although VideoChat-T accurately identified the timing of the event, it failed to fully explain the motivation behind the man opening the isolation door, which was "to fight the hijackers of the space elevator, seize the controller, and thus save the people in the entire space elevator." In the right example, VideoChat-T correctly identified the event where Mr. Bean reached out to touch his desk mate's table, but it incorrectly explained the true reason for this action, which was "to cover up the fact that he was copying his desk mate's exam by pretending to wipe dust off the desk." + +![](images/e52d5f38eabd8a9e955e523265302051c3b150ffd1cb3d8bde4b9df1f3a4805f.jpg) +Figure 6: More qualitative comparisons in temporal grounding & long video QA. + +![](images/7a0c7dcb93ea05e6d55e7cd110f7f6731f43f222eef388781bf8d351bc78fe07.jpg) + +![](images/1c8ccced94116db04aa8157061d63ee3b4fc32ba71d140812c5ca8a41962a7a9.jpg) +Figure 7: More qualitative comparisons in temporal grounding & short video QA. + +![](images/6ce4c7c9de45e48dbb9ea0a84be1a3868ed6615bd5840777217d3668c4e9131e.jpg) + +Due to the preponderance of single-turn, perceptual questions in our training data and the lack of multi-step reasoning data with complex logic, our model struggles to handle more challenging scenarios that demand intricate logical reasoning. To address this limitation, we propose constructing data in a chain-of-thought format to guide the model through multi-step reasoning, enabling it to delve deeper into the underlying motivations and causal relationships within a video. + +![](images/80a2a36824b3311ed40441b8c36bf0418f18e8c27ac646dea3885618de83ec4b.jpg) +Figure 8: More qualitative comparisons in temporal grounding & captioning. + +![](images/2778c174efb9e72fc415a2a83c66f0215f739af772831010d5f229d7adb68871.jpg) + +![](images/623ccbd51596cb694021ccac483f15bdda05f6f38d7caa766cd8ea5fdc15ce4a.jpg) +Figure 9: Examples of poor performance by VideoChat-T. While it accurately identifies the time of events, it struggles to answer questions that involve more complex logic. + +![](images/d038dda1b056cb1d2be4292749a2aebf0529a81ae05fae500e01321ca9708227.jpg) \ No newline at end of file diff --git a/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/images.zip b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..58e0ceb83fbd12fb418e7d0586470aaa50bc3ec5 --- /dev/null +++ b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b782f04dcd92cc31cd3f2b034c92e0c3cbedc5645deb51bc881d56598ccf1d85 +size 1168134 diff --git a/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/layout.json b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e1b006918e46dcf40290c49aacc4310dbbd52611 --- /dev/null +++ b/2025/TimeSuite_ Improving MLLMs for Long Video Understanding via Grounded Tuning/layout.json @@ -0,0 +1,12816 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "TIMESUITE: IMPROVING MLLMS FOR LONG VIDEO UNDERSTANDING VIA GROUNDED TUNING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "spans": [ + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "text", + "content": "Xiangyu Zeng" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "text", + "content": " Kunchang Li" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "inline_equation", + "content": "^{3,2}" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "text", + "content": " Chenting Wang" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "inline_equation", + "content": "^{6,2}" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "text", + "content": " Xinhao Li" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "text", + "content": " Tianxiang Jiang" + }, + { + "bbox": [ + 110, + 122, + 484, + 135 + ], + "type": "inline_equation", + "content": "^{5,2}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "spans": [ + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "text", + "content": "Ziang Yan" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "inline_equation", + "content": "^{4,2}" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "text", + "content": " Songze Li" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "inline_equation", + "content": "^{7,2}" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "text", + "content": " Yansong Shi" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "inline_equation", + "content": "^{5,2}" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "text", + "content": " Zhengrong Yue" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "inline_equation", + "content": "^{6,2}" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "text", + "content": " Yi Wang" + }, + { + "bbox": [ + 111, + 135, + 436, + 147 + ], + "type": "inline_equation", + "content": "^{2,8}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "spans": [ + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "type": "text", + "content": "Yali Wang" + }, + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "type": "inline_equation", + "content": "^{3,2}" + }, + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "type": "text", + "content": " Yu Qiao" + }, + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "type": "text", + "content": " Limin Wang" + }, + { + "bbox": [ + 112, + 147, + 289, + 159 + ], + "type": "inline_equation", + "content": "^{1,2,\\dagger}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 160, + 517, + 194 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "spans": [ + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "text", + "content": "Nanjing University " + }, + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "text", + "content": "Shanghai AI Laboratory " + }, + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "text", + "content": "SIAT, Chinese Academy of Sciences " + }, + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 112, + 160, + 517, + 172 + ], + "type": "text", + "content": "Zhejiang University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "spans": [ + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "type": "text", + "content": "University of Science and Technology of China " + }, + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "type": "text", + "content": "Shanghai Jiao Tong University " + }, + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 112, + 172, + 487, + 183 + ], + "type": "text", + "content": "Fudan University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 183, + 224, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 183, + 224, + 194 + ], + "spans": [ + { + "bbox": [ + 112, + 183, + 224, + 194 + ], + "type": "text", + "content": "8 Shanghai Innovation Institute" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 111, + 197, + 386, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 197, + 386, + 209 + ], + "spans": [ + { + "bbox": [ + 111, + 197, + 386, + 209 + ], + "type": "text", + "content": "XiangyuZeng2001@outlook.com lmwang@nju.edu.cn" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 106, + 222, + 272, + 349 + ], + "blocks": [ + { + "bbox": [ + 106, + 222, + 272, + 349 + ], + "lines": [ + { + "bbox": [ + 106, + 222, + 272, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 222, + 272, + 349 + ], + "type": "image", + "image_path": "ad602c480f2c6ee6f7e3e6d73c36fcef9e7c8e2a42909b47fdf01801f9b7c3d1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 349, + 504, + 393 + ], + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 393 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 393 + ], + "type": "text", + "content": "Figure 1: VideoChat-T demonstrates high performance for both long-form video question answering and temporal grounding. Our TimeSuite presents a collection of new designs to enhance the long video understanding capability of MLLMs. It will implicitly endow the MLLM with ability of correctly attending the visual segments when generating answers, thus relieving the hallucinations." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 286, + 234, + 502, + 346 + ], + "blocks": [ + { + "bbox": [ + 286, + 234, + 502, + 346 + ], + "lines": [ + { + "bbox": [ + 286, + 234, + 502, + 346 + ], + "spans": [ + { + "bbox": [ + 286, + 234, + 502, + 346 + ], + "type": "image", + "image_path": "25a3ed0e3f3a3d2c5da756a7ce2b57e59174fb96aafb85ec3a7bc17191ca7251.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 276, + 406, + 334, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 406, + 334, + 417 + ], + "spans": [ + { + "bbox": [ + 276, + 406, + 334, + 417 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 140, + 429, + 470, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 429, + 470, + 715 + ], + "spans": [ + { + "bbox": [ + 140, + 429, + 470, + 715 + ], + "type": "text", + "content": "Multimodal Large Language Models (MLLMs) have demonstrated impressive performance in short video understanding. However, understanding long-form videos still remains challenging for MLLMs. This paper proposes TimeSuite, a collection of new designs to adapt the existing short-form video MLLMs for long video understanding, including a simple yet efficient framework to process long video sequence, a high-quality video dataset for grounded tuning of MLLMs, and a carefully-designed instruction tuning task to explicitly incorporate the grounding supervision in the traditional QA format. Specifically, based on VideoChat, we propose our long-video MLLM, coined as VideoChat-T, by implementing a token shuffling to compress long video tokens and introducing Temporal Adaptive Position Encoding (TAPE) to enhance the temporal awareness of visual representation. Meanwhile, we introduce the TimePro, a comprehensive grounding-centric instruction tuning dataset composed of 9 tasks and 349k high-quality grounded annotations. Notably, we design a new instruction tuning task type, called Temporal Grounded Caption, to perform detailed video descriptions with the corresponding timestamps prediction. This explicit temporal location prediction will guide MLLM to correctly attend on the visual content when generating description, and thus reduce the hallucination risk caused by the LLMs. Experimental results demonstrate that our TimeSuite provides a successful solution to enhance the long video understanding capability of short-form MLLM, achieving improvement of " + }, + { + "bbox": [ + 140, + 429, + 470, + 715 + ], + "type": "inline_equation", + "content": "5.6\\%" + }, + { + "bbox": [ + 140, + 429, + 470, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 429, + 470, + 715 + ], + "type": "inline_equation", + "content": "6.8\\%" + }, + { + "bbox": [ + 140, + 429, + 470, + 715 + ], + "type": "text", + "content": " on the benchmarks of Egoschema and VideoMME, respectively. In addition, VideoChat-T exhibits robust zero-shot temporal grounding capabilities, significantly outperforming the existing state-of-the-art MLLMs. After fine-tuning, it performs on par with the traditional supervised expert models. Our code and dataset are available at https://github.com/OpenGVLab/TimeSuite." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 720, + 251, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 720, + 251, + 731 + ], + "spans": [ + { + "bbox": [ + 121, + 720, + 251, + 731 + ], + "type": "inline_equation", + "content": "^\\dagger" + }, + { + "bbox": [ + 121, + 720, + 251, + 731 + ], + "type": "text", + "content": " denotes the corresponding author." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "content": "Multimodal Large Language Models (MLLMs) have demonstrated impressive video understanding performance by following the general human instructions to interpret the visual content (Li et al., 2023b; Zhang et al., 2023; Lin et al., 2023a; Jin et al., 2024; Wang et al., 2024e). However, these MLLMs still struggle in long video understanding, as a long video sequence may contain various dynamic actions and complex temporal relationships, making it difficult for MLLMs to effectively locate the key segments related to questions. When humans watch long videos, their attention is consciously focused on prominent segments, which may occur within a few seconds. NExT-GQA (Xiao et al., 2024) has also verified the relevance of temporal grounding for accurately answering video QA tasks. Therefore, a natural question arises: Can we enhance long video understanding by using temporal grounding as a auxiliary task?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 226, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 226, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 226, + 506, + 392 + ], + "type": "text", + "content": "Previously, some works have made progress in temporal grounding task by using general MLLMs. They often enhance the temporal grounding capability of video MLLMs by designing specialized modules and perform specific supervised fine-tuning (Ren et al., 2024; Huang et al., 2024a,b). However, these overly specialized designs significantly impair the general QA capabilities of video MLLMs, resulting in great performance drop on the video QA task (as illustrated by TimeChat in Figure 1). Meanwhile, current research on long video understanding primarily focuses on architecture design, such as long-context LLMs (Liu et al., 2024a) and token compression (Song et al., 2024a). They can only capture holistic semantics in videos without the ability of localizing fine-grained information, leading to poor performance in temporal grounding tasks (as illustrated by MovieChat in Figure 1). So far, it is still challenging to build a video MLLM that is good at both tasks of temporal grounding and long video QA. We argue long video understanding could be assisted by explicitly performing temporal grounding, as grounding supervision enables MLLM to establish the detailed correspondence between the visual segments and fine-grained semantics. This fine-grained alignment would guide the MLLM to attend correctly video segments when generating answers and thus relieve the hallucination risk caused by the LLM." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 396, + 506, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 506, + 595 + ], + "type": "text", + "content": "Based on the above analysis, in this paper, we propose TimeSuite, a collection of new designs to improve the long video understanding capability of the existing short-form MLLMs, with a focus on incorporating grounding supervision in instruction tuning process. First, to address the high computational cost caused by the excessive number of visual tokens in long videos, we propose a simple Token Shuffle scheme to compress visual tokens, allowing the LLM to process more frame inputs. We also propose TAPE to generate adaptive position encodings, enhancing the temporal awareness of visual representations. The proposed structure does not introduce overly complex proprietary designs, which could be efficiently initialized with the parameters of short video MLLMs, without damaging the original performance of pre-trained MLLM. Second, to naturally incorporate the grounding ability into our MLLMs and yet still to preserve its original general QA capability, we design a new instruction tuning task, called Temporal Grounded Caption. This new task requires generating detailed segment-level description with corresponding timestamp prediction. Tuning on this new task will not only endow the MLLM with the extra grounding ability but also enhance its original long video QA performance, thanks to the requirement of building correspondence between grounded segments and detailed captions. Finally, we collect a comprehensive grounding-centric instruction tuning dataset for post-training our designed MLLMs, which is composed of 349K high-quality annotations covering 9 tasks. Based on this new dataset, we are able to perform grounded tuning with detailed captions on our proposed MLLMs (coined as VideoChat-T)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": "We verify the effectiveness of TimeSuite design through extensive experiments on the tasks of long video understanding and temporal grounding. VideoChat-T demonstrates a significant improvement in accuracy over baseline for long video understanding, with a " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "5.6\\%" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " increase on Egoschema (Mangalam et al., 2023) and a " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "6.8\\%" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " increase on VideoMME (Fu et al., 2024). Additionally, VideoChat-T exhibits robust zero-shot temporal localization capabilities on Charades-STA (Gao et al., 2017) and QVHighlights (Lei et al., 2021a). Our VideoChat-T outperforms the state-of-the-art temporal grounding MLLM of TimeChat from " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " for different metrics. After fine-tuning on the training set of temporal grounding benchmarks, the performance of VideoChat-T is on par with the state-of-the-art supervised expert models. The experiments demonstrate that our VideoChat-T is the first end-to-end MLLM that is able to perform well on both temporal grounding and general video QA. In particular, we show that grounded tuning with explicit location prediction can facilitate the long video understanding and relieve the hallucination risk." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 212, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 212, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 212, + 94 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 112, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 112, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 112, + 506, + 277 + ], + "type": "text", + "content": "Video MLLMs. With the advancement of open-sourced LLMs (Chiang et al., 2023; Touvron et al., 2023; Jiang et al., 2023), video MLLMs have emerged by utilizing projection bridges to link vision foundation models with LLMs (Li et al., 2023b; 2024b; Zhang et al., 2023; Li et al., 2024a). Limited by the training context length, thought these methods perform well with a small number of frame inputs, they meet significant challenges when processing long videos. The longer video length usually implies longer temporal relationships and more redundancies, resulting in the difficulty of extracting key clues (Zhou et al., 2024). Recently, several methods for long video handling have been proposed, such as exploiting long context LLM (Liu et al., 2024a; Zhang et al., 2024b; Xue et al., 2024; Wang et al., 2024d) and token compression (Li et al., 2023d; Song et al., 2024a; Zhang et al., 2024a) for enabling more visual inputs and agents for task decomposition or retrieval (Fan et al., 2024; Wang et al., 2024c;h). MovieChat (Song et al., 2024a) supports more frames by applying short-term and long-term memory to merge similar visual tokens. Yet, studies in learning objectives for long videos are less explored, making it difficult to alleviate the frequent hallucination of LLMs in long context reasoning. Our proposed TimeSuite leverages temporally-centric tasks to unlock the temporal perception potential of MLLMs, anchoring responses to the most relevant video segments." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 300, + 506, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 506, + 433 + ], + "type": "text", + "content": "Temporal Grounding. Temporal grounding is a fundamental capability in video understanding, associating semantics to specific clips with corresponding timestamps. Typical expert models (Lei et al., 2021b; Moon et al., 2023a;b; Lin et al., 2023b; Zeng et al., 2024) have been developed by formulating it into a timestamp regression from visual inputs and user queries. Most existing video MLLMs fail to address it compared with expert models, while some remedy its temporal grounding by specifically designed architectures and data (Huang et al., 2024a; Wang et al., 2024f; Li et al., 2024c; Wang et al., 2024g; Huang et al., 2024b; Qu et al., 2024). Timechat (Ren et al., 2024) binds visual features of images with timestamps and uses a sliding window to handle variable token length. From the perspective of training data, an instruction-tuning dataset TimeIT is constructed. Despite impressive improvements in temporal performance, these MLLMs still lag behind expert models and compromise general video dialogue capabilities. In this paper, we explore how to enhance the temporal grounding of MLLMs while preserving their original capabilities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 459, + 174, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 459, + 174, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 174, + 470 + ], + "type": "text", + "content": "3 METHOD" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 490, + 504, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 504, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 504, + 557 + ], + "type": "text", + "content": "In this section, we detail the proposed TimeSuite, a new collection of designs for improving short video MLLMs. Specifically, our TimeSuite includes a long video modeling framework, a high-quality video dataset for grounded tuning, and a carefully-designed instruction tuning task. With this new TimeSuite design, we are able to adapt the short-form video MLLM, obtaining significant performance improvements on two types of long video understanding tasks: traditional long video QA and temporal video grounding." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 580, + 195, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 195, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 195, + 590 + ], + "type": "text", + "content": "3.1 VIDEOCHAT-T" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 605, + 504, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 504, + 661 + ], + "type": "text", + "content": "We first describe the architecture of our proposed long video modeling framework. Specifically, built upon VideoChat2 (Li et al., 2024b), we devise long-video version of VideoChat-T. Our VideoChat-T is composed of a video backbone for extracting visual representations, a visual-language connector to compress visual tokens and bridge the visual and languages modalities, a LLM to follow human instructions to interpret the video content." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "The architecture of VideoChat-T is illustrated in Figure 2. Its workflow has three stages. In the first stage, long videos are evenly segmented into clips and the clips are embedded by the Video Encoder and Q-Former (Li et al., 2023a). Then, for compressing visual token number and highlighting crucial ones, token shuffling is employed to merge adjacent tokens, and TAPE is used to add temporal adaptive positional encodings. Finally, the compressed video token sequence is fed to the LLM to generate accurate responses that adhere to user requirements." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 506, + 238 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 506, + 238 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 506, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 506, + 238 + ], + "type": "image", + "image_path": "076f20a316ea038edc9dee59b2e5486f4caff907815706c4663eda300ce62196.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 247, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 304 + ], + "type": "text", + "content": "Figure 2: Overall Architecture of VideoChat-T. First, long videos are segmented into clips, which are then transformed into feature embeddings by video encoder and time-aware Qformer. Next, all visual tokens undergo Token Shuffle to compress overly long tokens, and generate adaptive positional encodings through TAPE. Finally, the long video tokens are concatenated with the user query, serving as the input of LLM, thereby generating appropriate responses." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 324, + 224, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 224, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 224, + 335 + ], + "type": "text", + "content": "3.1.1 BACKBONE DESIGN" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": "Video clip encoding. For the given long video, we perform uniform sampling (Wang et al., 2019) to obtain " + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "inline_equation", + "content": "K \\times T" + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": " frames. We divide these frames into " + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": " video segments in chronological order, and sample " + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": " frames from each segment. Next, we use the video encoder and its visual-linguistic connector (Q-Former here) to encode each segment into " + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": " tokens. After the aforementioned processing, the entire video is encoded into a sequence of visual tokens, denoted by " + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_q \\in \\mathbb{R}^{L \\times C_q}" + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "inline_equation", + "content": "C_q" + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": " is the dimension of output token by the Q-Former and " + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "inline_equation", + "content": "L = K \\times N" + }, + { + "bbox": [ + 104, + 342, + 504, + 420 + ], + "type": "text", + "content": " is the total number of tokens for the entire video." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 426, + 504, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 480 + ], + "type": "text", + "content": "Large Language Model. According to previous research, images and visual cues are projected into the same feature space of the LLM. The LLM acts as an interaction interface in the MLLMs, being used to process multimodal inputs, parse user instructions, and generate appropriate responses. To afford the processing of long video sequence, we need to design an efficient compression module between the visual encoder and LLMs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 493, + 289, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 289, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 289, + 504 + ], + "type": "text", + "content": "3.1.2 VL-CONNECTOR:TOKEN SHUFFLE" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 512, + 504, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 504, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 504, + 579 + ], + "type": "text", + "content": "The increased number of sampled frames in long videos leads to a larger number of encoded visual tokens, causing a significant rise in the computational complexity and memory consumption of LLMs. Therefore, it is crucial to keep the number of visual tokens within an acceptable range. Some works have proposed various token compression schemes, such as clustering (Jin et al., 2024) and pooling (Huang et al., 2024b). However, clustering methods often struggle to maintain the temporal consistency, and pooling methods usually result in a certain loss of overall performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "text", + "content": "To address this, we propose a simple token shuffling compression scheme that ensures the temporal consistency of video tokens before and after compression while avoiding excessive performance loss. Previous methods often used a projector to achieve dimensional conversion. However, projecting visual encoding vectors from low to high dimensions does not increase information density. Therefore, we propose to rearrange multiple visual tokens along the channel dimension. Specifically, for the long video " + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_q = [v_q^1,v_q^2,\\dots,v_q^L ]\\in \\mathbb{R}^{L\\times C_q}" + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "text", + "content": ", we concatenate " + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "text", + "content": " adjacent tokens along the channel dimension to obtain the reshaped visual feature " + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_m = [v_m^1,v_m^2,\\dots,v_m^{\\frac{L}{m}}]\\in \\mathbb{R}_{\\frac{L}{m}}^{\\times mC_q}" + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "text", + "content": " where each merged token " + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "inline_equation", + "content": "v_{m}^{i}" + }, + { + "bbox": [ + 104, + 583, + 504, + 679 + ], + "type": "text", + "content": " is represented as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 165, + 682, + 443, + 700 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 682, + 443, + 700 + ], + "spans": [ + { + "bbox": [ + 165, + 682, + 443, + 700 + ], + "type": "interline_equation", + "content": "v _ {m} ^ {i} = \\operatorname {C o n c a t} \\left(v _ {q} ^ {(i - 1) * m + 1}, v _ {q} ^ {(i - 1) * m + 2}, \\dots , v _ {q} ^ {i * m}\\right) \\quad \\forall i = 1, 2, \\dots , \\frac {L}{m}.", + "image_path": "512180577bf56b680c88a503c4d3ffa629f9bca06382efdc4638441eaf14b4c1.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "text", + "content": "Next, a linear projection layer is applied to the merged visual feature " + }, + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_m" + }, + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "text", + "content": ", generating the visual token sequences " + }, + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_l \\in \\mathbb{R}^{\\frac{L}{m} \\times C_l}" + }, + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "text", + "content": " as input into the LLM, where " + }, + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "inline_equation", + "content": "C_l" + }, + { + "bbox": [ + 104, + 708, + 504, + 733 + ], + "type": "text", + "content": " represents the token channel di" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": "mension of the LLM. This scheme effectively reuses the projector of base model by replicating the original linear layer parameters " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " times along the channel dimension, achieving an initialization equivalent to mean pooling with a window length of " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": ". This design avoids introducing additional randomly initialized parameters that might disturb the original model, thus preserving the its original capabilities. Additionally, compared to directly using pooling, this method offers higher flexibility for fine-tuning to achieve better results (see ablation study, Table 4)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 161, + 330, + 172 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 161, + 330, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 161, + 330, + 172 + ], + "type": "text", + "content": "3.1.3 TEMPORAL ADAPTIVE POSITION ENCODING" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 179, + 504, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 179, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 179, + 504, + 236 + ], + "type": "text", + "content": "To bind temporal positional information to visual tokens, we propose an adapter called Temporal Adaptive Position Encoding (TAPE). Inspired by CPVT (Chu et al., 2021), our TAPE uses zero padding at both ends of the convolution as anchors, and gradually transmits relative positional encoding information. Without the need to add any special time tokens, TAPE can automatically perceive the relative temporal positions of the token sequence and generate temporal embeddings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "text", + "content": "Specifically, the long video token sequence " + }, + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_q" + }, + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "text", + "content": " is first compressed in the channel dimension by a linear layer and further compressed in sequence length by a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different resolutions. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence (Chu et al., 2021). Then, we progressively upsample and restore the temporal feature sequences from short to long, using residual connections to retain temporal features at different scales. Finally, the temporal feature sequences are restored to the same length as " + }, + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_l" + }, + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "text", + "content": " and aligned in the channel dimension by a linear layer, thereby obtaining the temporal features " + }, + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_t" + }, + { + "bbox": [ + 104, + 240, + 506, + 362 + ], + "type": "text", + "content": " output by the TAPE. For detailed implementation of TAPE, please refer to Appendix A." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 367, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 422 + ], + "type": "text", + "content": "Our proposed TAPE offers a plug-and-play module, which could be easily integrated into the network structure via residual connections, adding temporal position information to video tokens without disrupting the distribution of other trainable parameters. With appropriate training strategies, TAPE effectively preserves the model's generalization capabilities and enhances its temporal sensitivity (see ablation study, Table 3), which is important for temporal grounding task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 436, + 365, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 365, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 365, + 447 + ], + "type": "text", + "content": "3.2 TIMEPRO:TEMPORAL GROUNDED INSTRUCTION DATA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 456, + 504, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 523 + ], + "type": "text", + "content": "Traditional temporal grounding datasets only contain monotonous ground truth, i.e., the start and end times of the target period. This data format performs well in training the classic expert models, but is difficult to unleash the potential of LLMs. Although several temporal grounding-centric datasets have been released for MLLM fine-tuning (Ren et al., 2024; Huang et al., 2024b), they still have deficiencies in data quantity, data quality, and task diversity. Thus, it is necessary to build a more comprehensive temporal dataset designed for the tuning of MLLMs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 528, + 504, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 504, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 504, + 584 + ], + "type": "text", + "content": "Based on the criteria of diversity, length, and difficulty, we collect and clean several existing high-quality grounding-centric datasets (Ren et al., 2024; Huang et al., 2024a,b), and create two new datasets, resulting in the TimePro. Compared to previous temporal grounding-centric datasets, TimePro offers a larger volume of data, a broader distribution, and a higher task diversity, facilitating the learning of more generalizable temporal representations for MLLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 589, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 589, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 733 + ], + "type": "text", + "content": "As shown in Figure 3(a), TimePro contains 9 task types from 15 datasets that are highly relevant to temporal grounding, containing approximately 349K high-quality temporal grounding annotations. The 9 tasks are specified as follows. Temporal Video Grounding involves identifying the start and end times of video content based on a natural language query (Anne Hendricks et al., 2017; Oncescu et al., 2021; Zala et al., 2023). Dense Video Captioning requires detecting events within a video and providing corresponding timestamps and descriptions (Krishna et al., 2017; Huang et al., 2020; Zhou et al., 2018). Video Summarization focuses on determining key frames or clips in the form of timestamps rather than semantic summaries (Song et al., 2015; Gygli et al., 2014). Step Localization aims to segment and describe important steps in a long video (Tang et al., 2019; Zala et al., 2023). Transcribed Speech Generation predicts speech content and its timestamps from visual signals (Zellers et al., 2022). Reasoning Temporal Localization combines timestamps with explanatory answers (Huang et al., 2024b). Multi-format Temporal Grounding includes single-turn and multi-turn dialogues with diverse question types (Huang et al., 2024a). Highlight" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 125, + 79, + 298, + 217 + ], + "blocks": [ + { + "bbox": [ + 125, + 79, + 298, + 217 + ], + "lines": [ + { + "bbox": [ + 125, + 79, + 298, + 217 + ], + "spans": [ + { + "bbox": [ + 125, + 79, + 298, + 217 + ], + "type": "image", + "image_path": "083cd70a090e43a2861582ab50537e9775960f025a9ff9e923b85e63a3e03146.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 167, + 217, + 233, + 226 + ], + "lines": [ + { + "bbox": [ + 167, + 217, + 233, + 226 + ], + "spans": [ + { + "bbox": [ + 167, + 217, + 233, + 226 + ], + "type": "text", + "content": "(a) Tasks of TimePro" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 236, + 504, + 281 + ], + "lines": [ + { + "bbox": [ + 104, + 236, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 504, + 281 + ], + "type": "text", + "content": "Figure 3: (a) The proposed temporal centric instruction-tuning dataset, TimePro. This dataset contains approximately 349K high-quality and strongly temporally correlated data. (b) The proposed Temporal Grounded Caption fine-tuning data paradigm. It effectively reducing the occurrence of hallucinations. We employ a 4-stage processing pipeline to ensure the quality of the generated data." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 300, + 80, + 484, + 217 + ], + "blocks": [ + { + "bbox": [ + 300, + 80, + 484, + 217 + ], + "lines": [ + { + "bbox": [ + 300, + 80, + 484, + 217 + ], + "spans": [ + { + "bbox": [ + 300, + 80, + 484, + 217 + ], + "type": "image", + "image_path": "aface00e068ff3e7c38e20a5aa09cc30b2c5cf813a2898118d5b09daed21e814.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 217, + 455, + 227 + ], + "lines": [ + { + "bbox": [ + 325, + 217, + 455, + 227 + ], + "spans": [ + { + "bbox": [ + 325, + 217, + 455, + 227 + ], + "type": "text", + "content": "(b) Details of Temporal Grounded Caption" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 306, + 504, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 361 + ], + "type": "text", + "content": "Detection identifies the most significant moments in a video based on a query (Lei et al., 2021a). Temporal Grounded Caption uses a brief scene title to output both the time period and fine-grained description for the scene. More detailed information about TimePro is available in the appendix B. It should be noted that Temporal Grounded Caption is our newly-designed task that can help our model to establish fine-grained correspondence between visual segment and linguistic description." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 380, + 299, + 390 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 299, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 299, + 390 + ], + "type": "text", + "content": "3.3 TEMPORAL GROUNDED CAPTION TASK" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 402, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 504, + 491 + ], + "type": "text", + "content": "Some studies have shown that MLLMs often exhibit severe hallucinations when dealing with fine-grained perception tasks (Ji et al., 2023; Huang et al., 2023; Golkar et al., 2023). Since our VideoChat-T directly regresses the timestamps corresponding to the text queries using MLLMs, it is more susceptible to hallucinations compared to methods that use external expert models as decoders (Wu et al., 2024). By forcing the video MLLMs to predict the event occurrence time and simultaneously describe the visual content evidence, we attempt to anchor these queries to the relevant time segments within the video, rather than generating hallucinations originating from LLM itself. Based on this analysis, we design the Temporal Grounded Caption task." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 495, + 506, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 584 + ], + "type": "text", + "content": "The top of Figure 3(b) illustrates the definition of Temporal Grounded Caption. We use a brief scene title of the video segment as the query, requiring the model to simultaneously respond with the precise start and end times of the video segment and provide a detailed description of that segment. While the content in the scene title may leak into the detailed caption response, most of the missing detailed information must be correctly described by attending the corresponding segment. Moreover, temporal grounding and detailed captioning can serve as regularization task for each other, preventing caption model from hallucinations from unrelated visual or linguistic contexts and helping grounding model to regress the timestamp more accurately." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": "The process for collecting our Temporal Grounded Caption data is described at the bottom of Figure 3(b). In the first stage, we use a detailed caption dataset with timestamps as our data source. We remove data with target grounding time intervals that are too short or too long and ensure that the scenes in the video are as diverse as possible. In the second stage, we use a LLM to summarize scene titles. To prevent excessive semantics of video segments from being leaked from the query to the MLLM, we try to retain the minimal subset of key features that are sufficient to distinguish the video segments. In the third stage, to avoid overly similar or identical content appearing at different temporal intervals in the video, we perform similarity filtering on the data annotations. Based on the scene titles and video features, we calculate the similarity between different segments of the same video and remove data with excessively high similarity. In the fourth stage, we randomly sample the generated data and manually assess its quality. Based on human feedback, we refine the threshold parameters for data filtering used in the first three stages to yield the final Temporal Grounded Caption dataset. This new dataset plays an important role in our grounded tuning." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 80, + 504, + 227 + ], + "blocks": [ + { + "bbox": [ + 107, + 80, + 504, + 227 + ], + "lines": [ + { + "bbox": [ + 107, + 80, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 107, + 80, + 504, + 227 + ], + "type": "table", + "html": "
MethodLLM SizeCharades-STAQVHighlight
R@1(IOU=0.3)R@1(IOU=0.5)R@1(IOU=0.7)mAPHIT@1
MovieChat (Song et al., 2024a)7B8.82.91.311.716.1
GroundingGPT (Li et al., 2024c)7B-29.611.9--
VTimeLLM (Huang et al., 2024a)7B51.027.511.4--
HawkEye (Wang et al., 2024f)7B50.631.414.5--
TimeChat (Ren et al., 2024)7B-32.213.414.523.9
ChatVTG (Qu et al., 2024)7B52.733.015.9--
VideoChat2 (Li et al., 2024b)7B9.63.41.413.418.6
VideoChat-T7B69.9 (+60.3)48.7 (+45.3)24.0 (+22.6)26.5 (+13.1)54.1 (+35.5)
QD-DETR※ (FT) (Moon et al., 2023b)--57.332.638.964.2
UnLoc-L※ (FT) (Yan et al., 2023)--60.838.4--
HawkEye (FT) (Wang et al., 2024f)7B72.558.328.8--
Timechat (FT) (Ren et al., 2024)7B-46.723.721.737.9
VideoChat-T (FT)7B79.467.143.027.055.3
", + "image_path": "45a634c3aab292d6b2dce1bcf29a6c0d2dcf2721b8a03c4fa9445fe0fd035b40.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "lines": [ + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "type": "text", + "content": "Table 1: Performance of VideoChat-T on temporal grounding and highlight detection tasks. (FT) indicates the model fine-tuned on training set of the evaluation benchmark, with the respective text marked in gray. Classic supervised expert models are marked with " + }, + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "type": "inline_equation", + "content": "\\text{※}" + }, + { + "bbox": [ + 104, + 235, + 504, + 269 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 277, + 201, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 201, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 201, + 289 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 303, + 250, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 303, + 250, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 250, + 313 + ], + "type": "text", + "content": "4.1 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "text", + "content": "Built upon VideoChat2, we use UMT-L (Li et al., 2023c) and Mistral-7B (Jiang et al., 2023) as the video encoder and LLM, respectively. Except for the TAPE, all components are initialized from the pre-trained model of VideoChat2-Mistral. For the TAPE, we use random initialization, set the initial values of the final linear layer to zero, and freeze it during the first epoch of training. We set the frame count " + }, + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "text", + "content": " for each clip to 8, so the number of clips " + }, + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "text", + "content": " for a long video is equal to the total frame count divided by " + }, + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 323, + 506, + 422 + ], + "type": "text", + "content": ". We fine-tune the model for 3 epochs using the TimePro with 349K instances and a general QA task dataset with 82K instances. To ensure the stability of model training, we use 192-frame input for the first epoch. In the second and third epochs, we unfreeze the TAPE and adjust the model input to 128 frames. All experiments are conducted on 16 A100 GPUs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 436, + 318, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 318, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 318, + 447 + ], + "type": "text", + "content": "4.2 PERFORMANCE ON TEMPORAL GROUNDING" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 456, + 504, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 513 + ], + "type": "text", + "content": "We evaluate our method using two commonly used temporal localization tasks, i.e., Temporal Grounding and Highlight Detection. The performance comparison between VideoChat-T and other models is shown in Table 1. Our method's zero-shot performance surpasses all previous LLM-based methods and after fine-tuning, VideoChat-T even exceeds some classic expert models on the temporal grounding task." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "text", + "content": "Temporal Grounding. This task aims to identify the start and end timestamps of the video content described by the query sentence, using Charades-STA as the evaluation benchmark. VideoChat-T achieves an accuracy of 48.7 in the R@1 (IOU=0.5) metric, significantly surpassing the previous state-of-the-art MLLM method, namely TimeChat, by 16.5 points. Additionally, it outperforms the fine-tuned version of TimeChat on the training set of the evaluation benchmark by " + }, + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "inline_equation", + "content": "2.0\\%" + }, + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "text", + "content": ". Furthermore, the performance of VideoChat-T fine-tuned on the evaluation benchmark training set reaches 67.1 R@1 at IoU=0.5, surpassing most state-of-the-art classic supervised expert models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": "Highlight Detection. We use QVHighlights as the evaluation benchmark. For a given query, this task requires outputting all timestamps of highlight moments and their corresponding saliency scores. Since there could be many sparse highlight moments in a video, this task requires finer-grained video understanding at the frame level. VideoChat-T achieves mAP of 26.5, significantly surpassing the previous MLLM method of TimeChat by 13.0 points, and also outperforms its finetuned version by 4.8 points. We observe that after fine-tuning on the corresponding training set, VideoChat-T shows almost no performance improvement. This may be due to the bottleneck in language representation of LLMs. The Highlight Detection task requires outputting a (timestamp, saliency score) pair for each highlight moment, and a video may contain dozens of discrete highlight moments, making it challenging for the model to correctly respond with dozens to hundreds of numbers in a language format. The precise numerical salience score output is very difficult for LLMs, and VideoChat-T can only respond well to queries with fewer highlight moments. Due to the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 80, + 504, + 205 + ], + "blocks": [ + { + "bbox": [ + 107, + 80, + 504, + 205 + ], + "lines": [ + { + "bbox": [ + 107, + 80, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 107, + 80, + 504, + 205 + ], + "type": "table", + "html": "
MethodLLM SizeLong VideoShort Video
EgoschemaVideoMMEMVbench
SubsetFullw/o subsw/o subs (Long)Avg
VideoAgent (Wang et al., 2024c)GPT-460.254.1---
VideoAgent (Fan et al., 2024)GPT-462.8----
TimeChat (Ren et al., 2024)7B-33.030.226.138.5
LLAMA-Vid (Li et al., 2023d)7B-38.5--41.9
MovieChat (Song et al., 2024a)7B-53.538.233.455.1
MovieChat+ (Song et al., 2024b)7B-56.4---
Chat-UniVi (Jin et al., 2024)7B--40.635.8-
VideoChat2 (Li et al., 2024b)7B63.654.439.533.260.4
VideoChat-T7B68.4 (+4.8)60.0 (+5.6)46.3 (+6.8)41.9 (+8.7)59.9 (-0.5)
", + "image_path": "c15c9827e65003c39960bd0839ef3e857461f6bf0c544794dda96bdb0e2f4a9b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 213, + 504, + 247 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 247 + ], + "type": "text", + "content": "Table 2: Performance of VideoChat-T and other methods on video question answering tasks. By upgrading VideoChat2 with TimeSuite, VideoChat-T demonstrates significant improvements across multiple long video benchmarks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 255, + 504, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 279 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 279 + ], + "type": "text", + "content": "specific architectural design, classic supervised expert models have a natural advantage in handling such tasks, and VideoChat-T still has a performance gap compared to expert models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 292, + 307, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 292, + 307, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 307, + 304 + ], + "type": "text", + "content": "4.3 PERFORMANCE ON GENERAL VIDEO QA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "content": "In addition to test the grounding ability of our VideoChat-T, we also want to verify its general video question answering performance. According to mainstream evaluation standards, we use both long video and short video QA to assess the general video understanding capability of VideoChat-T. Table 2 shows the performance of VideoChat-T on the video QA evaluation benchmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": "Long Video QA. We use Egoschema (Mangalam et al., 2023) and VideoMME (Fu et al., 2024) to evaluate the long video capabilities of VideoChat-T. In conjunction with our proposed architectural improvements, we incremental fine-tune VideoChat2 using only 432K data points. VideoChat-T demonstrates outstanding performance on the Egoschema, achieving an accuracy of " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "68.4\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": " on the test subset and " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "60.0\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": " on the entire test set. Compared to VideoChat2, VideoChat-T obtains improvements of " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "4.8\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "5.6\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": " on the subset and the full test set, respectively. Additionally, for the VideoMME benchmark, VideoChat-T achieves an accuracy of " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "46.3\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": " by solely analyzing the visual content without using subtitles, representing a " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "6.8\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": " improvement over VideoChat2. On the long video data division of VideoMME, VideoChat-T achieves an accuracy of " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "41.9\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": ", which is an " + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "inline_equation", + "content": "8.7\\%" + }, + { + "bbox": [ + 104, + 361, + 506, + 506 + ], + "type": "text", + "content": " improvement compared to VideoChat2. The upgraded VideoChat-T demonstrated significant performance improvements on long video QA benchmarks. This indicates the potential of leveraging grounding-centric video tasks to enhance the temporal awareness of MLLMs, thereby further improving long video understanding capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "content": "Short Video QA. We use MVBench (Li et al., 2024b) to evaluate the general short video understanding capabilities of VideoChat-T. VideoChat-T achieves an overall average accuracy of " + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "inline_equation", + "content": "59.9\\%" + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "content": " on MVBench, which is a " + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "content": " decrease compared to VideoChat2. It is important to note that achieving minimal performance loss is a challenging task. According to previous experiences in the field of incremental learning (Van de Ven et al., 2022), models inevitably forget old knowledge while learning new knowledge. VideoChat2 is fine-tuned with 2M data, whereas VideoChat-T is fine-tuned with only 432K data, where 349K annotations are temporal grounding centric, resulting in only a " + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "content": " accuracy loss. Previous temporal MLLMs like TimeChat (Ren et al., 2024), although achieving strong temporal localization capabilities, yield much weaker general video QA capability, with an accuracy of only " + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "inline_equation", + "content": "38.5\\%" + }, + { + "bbox": [ + 104, + 510, + 506, + 654 + ], + "type": "text", + "content": " on MVBench. This demonstrates that the design of our TimeSuite enhances new capabilities for the model while still preserving the original general video understanding capabilities. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 667, + 235, + 679 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 235, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 235, + 679 + ], + "type": "text", + "content": "4.4 QUALITATIVE ANALYSIS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "Figure 4 presents a qualitative comparison between our model and other methods. In the example on the left, VideoChat-T is capable of answering more complex long video reasoning questions. Our model accurately identifies the temporal location of the \"light a cigarette\" event and determines the correct key clue \"the person in a white coat\" based on the video content. This leads to the inference" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 80, + 299, + 213 + ], + "blocks": [ + { + "bbox": [ + 106, + 80, + 299, + 213 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 299, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 299, + 213 + ], + "type": "image", + "image_path": "35e92a20134204d271d52add199b9668212da945d3ff96706a6ebda7d2bd99cc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": "Figure 4: Qualitative comparison between VideoChat-T and other methods. VideoChat-T not only possesses temporal fine-grained perception capabilities but also can perform accurate long video reasoning. Green text indicates correct answers, while red text indicates inappropriate answers." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 80, + 504, + 205 + ], + "blocks": [ + { + "bbox": [ + 310, + 80, + 504, + 205 + ], + "lines": [ + { + "bbox": [ + 310, + 80, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 310, + 80, + 504, + 205 + ], + "type": "image", + "image_path": "97a395318cd819ea31c6f8a8f40d2462eee8825dbbf4fe4859d6dca452f7a506.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 260, + 298, + 300 + ], + "blocks": [ + { + "bbox": [ + 106, + 260, + 298, + 300 + ], + "lines": [ + { + "bbox": [ + 106, + 260, + 298, + 300 + ], + "spans": [ + { + "bbox": [ + 106, + 260, + 298, + 300 + ], + "type": "table", + "html": "
ModelEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
VideoChat-T (Ours)60.046.348.754.1
w/o TAPE59.145.947.150.4
w/o frz59.045.252.453.7
", + "image_path": "94d9776459cc5e5d0fff9a197b234bec613225a8b63a126dd7bc6e694db72f32.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 312, + 260, + 503, + 308 + ], + "blocks": [ + { + "bbox": [ + 104, + 304, + 299, + 361 + ], + "lines": [ + { + "bbox": [ + 104, + 304, + 299, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 299, + 361 + ], + "type": "text", + "content": "Table 3: Performance results of the ablation study on the TAPE. Here, w/o adapter refers to removing our proposed TAPE, and w/o frz refers to not using the training method where the TAPE is frozen during the first epoch." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 260, + 503, + 308 + ], + "lines": [ + { + "bbox": [ + 312, + 260, + 503, + 308 + ], + "spans": [ + { + "bbox": [ + 312, + 260, + 503, + 308 + ], + "type": "table", + "html": "
ModelEgoschema FullVideoMME w/o subsCharades-STA R@ 1 IOU=0.5QVHighlights Hit@1
VideoChat-T(Ours)60.046.348.754.1
r/w pooling59.844.840.347.3
r/w clustering59.545.039.840.1
w/o init57.443.442.053.9
", + "image_path": "1ef628e7b61d7ac79750d1dde1f1c939db03d2b5d66922bc048e68475aa032ae.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 312, + 504, + 367 + ], + "lines": [ + { + "bbox": [ + 309, + 312, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 309, + 312, + 504, + 367 + ], + "type": "text", + "content": "Table 4: Performance results of the ablation study on the Token Shuffle. Here, r/w refers to replacing Token Shuffle with the other component, and w/o init refers to removing the efficient initialization." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 377, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 422 + ], + "type": "text", + "content": "that \"playing the piano very fast and pressing the keys very hard\" are the true reasons. The example on the right demonstrates our model's fine-grained perception ability. The appearance of \"money in the briefcase\" is very brief, and most models easily overlook this detail. Thanks to its strong fine-grained perception ability, our model precisely captures this visual content." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 436, + 209, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 209, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 209, + 447 + ], + "type": "text", + "content": "4.5 ABLATION STUDY" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 456, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 578 + ], + "type": "text", + "content": "Role of TAPE. To verify the performance improvement brought by TAPE, ablation experiments were conducted. Table 3 lists the performance results of the conducted adapter-related ablation experiments. It can be observed that when the TAPE is removed, the model's performance on long video understanding and temporal grounding benchmarks decreases. TAPE can adaptively embed positional encodings into video tokens, and the absence of TAPE leads to a certain loss in temporal awareness capability. When we unfroze the TAPE in the first epoch, the performance improved on the temporal grounding task but declined on the long video QA task. This is because the TAPE is highly suited for tasks with strong temporal dependencies. If unfrozen too early, the model may become biased towards fitting temporal grounding tasks. Freezing the TAPE during the first epoch allows the model to first optimize and learn a relatively generalized feature representation, thereby balancing the performance across different tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 582, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 693 + ], + "type": "text", + "content": "Effectiveness of Token Shuffle. To verify the effectiveness of token shuffle, we conducted ablation experiments. Table 4 presents the results of these ablation experiments. We compared token shuffle with conventional methods such as pooling and clustering, and also observed the results after removing efficient initialization. When we replaced token shuffle with pooling or clustering methods, the model's performance declined. This is because the efficient initialization of the linear layer in token shuffle makes the initial values of the module equivalent to average pooling, which gradually optimizes better solutions during training. Therefore, our method is inherently superior to pooling. On the other hand, clustering often fails to maintain the spatial/temporal consistency of the video, leading to temporal confusion. When we removed the efficient initialization of the linear layer, the negative impact of random initialization severely damaged the model's original performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Effect of TimePro. We conducted ablation studies to evaluate the effectiveness of the TimePro data components. As shown in Table 5, by gradually adding subsets of TimePro, we observed the model's performance changes across various temporal grounding-centric instruction-tuning data. As we pro" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 79, + 482, + 156 + ], + "blocks": [ + { + "bbox": [ + 127, + 79, + 482, + 156 + ], + "lines": [ + { + "bbox": [ + 127, + 79, + 482, + 156 + ], + "spans": [ + { + "bbox": [ + 127, + 79, + 482, + 156 + ], + "type": "table", + "html": "
NormalTimeITTGCHDMTGRTLEgoschema FullVideoMME w/o subsCharades-STA R@1 IOU=0.5QVHighlights Hit@1
56.642.68.024.4
57.843.632.225.2
58.344.039.133.9
59.844.941.943.8
60.045.145.848.3
60.046.348.754.1
", + "image_path": "3cbe39df81cbc638085740dff8291a6f2871a0f25d1273b3eb5e76126c5d65ad.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 163, + 504, + 209 + ], + "lines": [ + { + "bbox": [ + 104, + 163, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 504, + 209 + ], + "type": "text", + "content": "Table 5: Performance results of the ablation study on different components of TimePro. We use 82K normal training data as the baseline. TimeIT refers to the training data with five task types from Ren et al. (2024), TGC refers to Temporal Grounded Caption, HD refers to Highlight Detection, MTG refers to Multi-format Temporal Grounding, and RTL refers to Reasoning Temporal Localization." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 126, + 217, + 243, + 300 + ], + "blocks": [ + { + "bbox": [ + 126, + 217, + 243, + 300 + ], + "lines": [ + { + "bbox": [ + 126, + 217, + 243, + 300 + ], + "spans": [ + { + "bbox": [ + 126, + 217, + 243, + 300 + ], + "type": "image", + "image_path": "21867962bd38420a9c54f8c16c40cde0165a85826b166be4e57f7fc2c727e0f1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 308, + 504, + 352 + ], + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 352 + ], + "type": "text", + "content": "Figure 5: Performance of VideoChat-T with varying input frame numbers. As the number of input frames increases, the performance of VideoChat-T shows an upward trend in both long video QA and temporal grounding tasks. Due to the over low temporal grounding performance of VideoChat2, its curve is omitted." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 247, + 216, + 364, + 300 + ], + "blocks": [ + { + "bbox": [ + 247, + 216, + 364, + 300 + ], + "lines": [ + { + "bbox": [ + 247, + 216, + 364, + 300 + ], + "spans": [ + { + "bbox": [ + 247, + 216, + 364, + 300 + ], + "type": "image", + "image_path": "312cd59dc4ad40f3e5a575aef589070bd88564e1070e8538a70ffe6a79829bd9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 368, + 217, + 484, + 300 + ], + "blocks": [ + { + "bbox": [ + 368, + 217, + 484, + 300 + ], + "lines": [ + { + "bbox": [ + 368, + 217, + 484, + 300 + ], + "spans": [ + { + "bbox": [ + 368, + 217, + 484, + 300 + ], + "type": "image", + "image_path": "af3929d838db2730e5cd5fd741a457c44f1e3db2810c9a3c5d45fa025cdb22aa.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 363, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 407 + ], + "type": "text", + "content": "gressively added subsets of TimePro, not only did the model's performance on temporal grounding tasks show a stable and significant improvement, but we also observed a noticeable upward trend in performance on long video benchmarks. This to some extent corroborates that temporal grounding centric tasks have a positive impact on long video understanding." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 413, + 506, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 413, + 506, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 413, + 506, + 513 + ], + "type": "text", + "content": "Impact of frames. To investigate the impact of input frame count on model performance, we conducted an ablation study. Figure 5 illustrates the scalability of our model's performance with respect to input frame count. VideoChat-T demonstrates good stability as the input frame count varies, and its performance in long video QA and temporal grounding tasks improves with an increase in frame count. In contrast, the baseline model, VideoChat2, exhibited catastrophic performance degradation when the frame count was significantly increased. As the input frame count increases, the number of visual encoding tokens grows linearly. Excessive visual token input imposes an additional computational burden on the temporal modeling of the LLM. TimeSuite mitigates this by employing Token Shuffle to reduce the number of tokens, ensuring the stable operation of the model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 528, + 196, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 196, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 196, + 540 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "type": "text", + "content": "In this paper, we have introduced TimeSuite, a collection of new designs from perspectives of efficient architecture, high-quality data, and new instruction tuning task, to achieve long video understanding by fine-tuning short video MLLMs with temporal grounding-centric data. We address the computational challenges of processing long videos by introducing token shuffle to compress visual tokens. We also propose the TAPE for adaptive position encoding, enhancing the temporal awareness of visual representation. Additionally, our designed Temporal Grounded Caption training task ensures MLLMs to build correspondence between grounded segments and detailed caption, while the TimePro dataset provides comprehensive instruction tuning data for learning more effective temporal perception capability. Experimental results demonstrate that VideoChat-T significantly improves long video understanding, with notable performance gains on Egoschema and VideoMME. Furthermore, VideoChat-T exhibits strong zero-shot temporal grounding capabilities, significantly outperforming the previous MLLMs on temporal grounding. Overall, our TimeSuite provides effective designs for short MLLMs to enhance their performance on temporal grounding and long video QA. We hope our TimeSuite could yield some insights on designing long video MLLMs." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 221, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 221, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 221, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 150 + ], + "type": "text", + "content": "This work is supported by the National Key R&D Program of China (No. 2022ZD0160900), the Fundamental Research Funds for the Central Universities (No. 020214380119), Jiangsu Frontier Technology Research and Development Program (No. BF2024076), and the Collaborative Innovation Center of Novel Software Technology and Industrialization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 165, + 176, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 165, + 176, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 165, + 176, + 177 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 183, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 183, + 505, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 183, + 505, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 505, + 218 + ], + "type": "text", + "content": "Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pp. 5803-5812, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 224, + 504, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 224, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 107, + 224, + 504, + 257 + ], + "type": "text", + "content": "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 262, + 504, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 504, + 297 + ], + "type": "text", + "content": "Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 301, + 504, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 301, + 504, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 504, + 347 + ], + "type": "text", + "content": "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with " + }, + { + "bbox": [ + 105, + 301, + 504, + 347 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 105, + 301, + 504, + 347 + ], + "type": "text", + "content": " chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 353, + 504, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 353, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 107, + 353, + 504, + 376 + ], + "type": "text", + "content": "Xiangxiang Chu, Zhi Tian, Bo Zhang, Xinlong Wang, and Chunhua Shen. Conditional positional encodings for vision transformers. arXiv preprint arXiv:2102.10882, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 381, + 504, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 381, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 107, + 381, + 504, + 415 + ], + "type": "text", + "content": "Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. arXiv preprint arXiv:2403.11481, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 421, + 504, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 421, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 107, + 421, + 504, + 456 + ], + "type": "text", + "content": "Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 460, + 504, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 504, + 495 + ], + "type": "text", + "content": "Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pp. 5267-5275, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 500, + 504, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 500, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 107, + 500, + 504, + 535 + ], + "type": "text", + "content": "Siavash Golkar, Mariel Pettee, Michael Eickenberg, Alberto Bietti, Miles Cranmer, Geraud Krawezik, Francois Lanusse, Michael McCabe, Ruben Ohana, Liam Parker, et al. xval: A continuous number encoding for large language models. arXiv preprint arXiv:2310.02989, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 540, + 504, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 540, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 107, + 540, + 504, + 574 + ], + "type": "text", + "content": "Michael Gygli, Helmut Grabner, Hayko Riemenschneider, and Luc Van Gool. Creating summaries from user videos. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pp. 505-520. Springer, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 579, + 504, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 579, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 107, + 579, + 504, + 613 + ], + "type": "text", + "content": "Bin Huang, Xin Wang, Hong Chen, Zihan Song, and Wenwu Zhu. Vtimellm: Empower llm to grasp video moments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14271-14280, 2024a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 619, + 504, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 619, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 107, + 619, + 504, + 653 + ], + "type": "text", + "content": "De-An Huang, Shijia Liao, Subhashree Radhakrishnan, Hongxu Yin, Pavlo Molchanov, Zhiding Yu, and Jan Kautz. Lita: Language instructed temporal-localization assistant. arXiv preprint arXiv:2403.19046, 2024b." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 658, + 504, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 658, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 107, + 658, + 504, + 682 + ], + "type": "text", + "content": "Gabriel Huang, Bo Pang, Zhenhai Zhu, Clara Rivera, and Radu Soricut. Multimodal pretraining for dense video captioning. arXiv preprint arXiv:2011.11760, 2020." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 687, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 687, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 504, + 731 + ], + "type": "text", + "content": "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. arXiv preprint arXiv:2311.05232, 2023." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Ye Jin Bang, Andrea Madotto, and Pascale Fung. Survey of hallucination in natural language generation. ACM Computing Surveys, 55(12):1-38, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. Mistral 7b. arXiv preprint arXiv:2310.06825, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 208 + ], + "type": "text", + "content": "Peng Jin, Ryuichi Takanobu, Wancai Zhang, Xiaochun Cao, and Li Yuan. Chat-univi: Unified visual representation empowers large language models with image and video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13700-13710, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 214, + 505, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 214, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 214, + 505, + 248 + ], + "type": "text", + "content": "Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pp. 706-715, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 255, + 505, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 505, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 505, + 279 + ], + "type": "text", + "content": "Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 285, + 505, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 505, + 308 + ], + "type": "text", + "content": "Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 314, + 505, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 314, + 505, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 314, + 505, + 348 + ], + "type": "text", + "content": "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 354, + 505, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 505, + 389 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 505, + 389 + ], + "type": "text", + "content": "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730–19742. PMLR, 2023a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 395, + 505, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 395, + 505, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 395, + 505, + 429 + ], + "type": "text", + "content": "KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 436, + 505, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 505, + 471 + ], + "type": "text", + "content": "Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. Unmasked teacher: Towards training-efficient video foundation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19948-19960, 2023c." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 476, + 505, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 476, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 505, + 521 + ], + "type": "text", + "content": "Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 22195-22206, 2024b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 528, + 505, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 552 + ], + "type": "text", + "content": "Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. arXiv preprint arXiv:2311.17043, 2023d." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 557, + 505, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 557, + 505, + 591 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 505, + 591 + ], + "type": "text", + "content": "Zhaowei Li, Qi Xu, Dong Zhang, Hang Song, Yiqing Cai, Qi Qi, Ran Zhou, Junting Pan, Zefeng Li, Van Tu Vu, et al. Groundinggpt: Language enhanced multi-modal grounding model. CoRR, 2024c." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 597, + 505, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 505, + 622 + ], + "type": "text", + "content": "Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 628, + 505, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 505, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 505, + 672 + ], + "type": "text", + "content": "Kevin Qinghong Lin, Pengchuan Zhang, Joya Chen, Shraman Pramanick, Difei Gao, Alex Jinpeng Wang, Rui Yan, and Mike Zheng Shou. Univtg: Towards unified video-language temporal grounding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2794-2804, 2023b." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 679, + 505, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 505, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 505, + 703 + ], + "type": "text", + "content": "Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "text", + "content": "Ruyang Liu, Chen Li, Haoran Tang, Yixiao Ge, Ying Shan, and Ge Li. St-llm: Large language models are effective temporal learners. arXiv preprint arXiv:2404.00308, 2024b." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 157 + ], + "type": "text", + "content": "Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "text", + "content": "WonJun Moon, Sangeek Hyun, SuBeen Lee, and Jae-Pil Heo. Correlation-guided query-dependency calibration in video representation learning for temporal grounding. arXiv preprint arXiv:2311.08835, 2023a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 205, + 504, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 504, + 239 + ], + "type": "text", + "content": "WonJun Moon, Sangeek Hyun, SangUk Park, Dongchan Park, and Jae-Pil Heo. Query-dependent video representation for moment retrieval and highlight detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23023-23033, 2023b." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 246, + 504, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 246, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 504, + 291 + ], + "type": "text", + "content": "Andreea-Maria Oncescu, Joao F Henriques, Yang Liu, Andrew Zisserman, and Samuel Albanie. Queryd: A video dataset with high-quality text and audio narrations. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2265-2269. IEEE, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 298, + 504, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 298, + 504, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 504, + 333 + ], + "type": "text", + "content": "Mengxue Qu, Xiaodong Chen, Wu Liu, Alicia Li, and Yao Zhao. Chatvtg: Video temporal grounding via chat with video dialogue large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1847-1856, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 339, + 504, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 339, + 504, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 504, + 374 + ], + "type": "text", + "content": "Shuhuai Ren, Linli Yao, Shicheng Li, Xu Sun, and Lu Hou. Timechat: A time-sensitive multimodal large language model for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14313-14323, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 380, + 504, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 504, + 404 + ], + "type": "text", + "content": "Share. Sharegemini: Scaling up video caption data for multimodal large language models, June 2024. URL https://github.com/Share14/ShareGemini." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 410, + 504, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 504, + 456 + ], + "type": "text", + "content": "Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, Tian Ye, Yanting Zhang, et al. Moviechat: From dense token to sparse memory for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18221-18232, 2024a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 462, + 504, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 462, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 504, + 496 + ], + "type": "text", + "content": "Enxin Song, Wenhao Chai, Tian Ye, Jenq-Neng Hwang, Xi Li, and Gaoang Wang. Moviechat+: Question-aware sparse memory for long video question answering. arXiv preprint arXiv:2404.17176, 2024b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 503, + 504, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 503, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 504, + 538 + ], + "type": "text", + "content": "Yale Song, Jordi Vallmitjana, Amanda Stent, and Alejandro Jaime. Tvsum: Summarizing web videos using titles. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5179-5187, 2015." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 544, + 504, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 504, + 579 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 504, + 579 + ], + "type": "text", + "content": "Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1207-1216, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 586, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 504, + 620 + ], + "type": "text", + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 627, + 504, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 504, + 650 + ], + "type": "text", + "content": "Gido M Van de Ven, Tinne Tuytelaars, and Andreas S Tolias. Three types of incremental learning. Nature Machine Intelligence, 4(12):1185-1197, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 656, + 504, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 504, + 690 + ], + "type": "text", + "content": "Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaou Tang, and Luc Van Gool. Temporal segment networks for action recognition in videos. IEEE Trans. Pattern Anal. Mach. Intell., 41(11):2740-2755, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024a." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 122, + 504, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 122, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 107, + 122, + 504, + 146 + ], + "type": "text", + "content": "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. arXiv preprint arXiv:2403.10517, 2024c." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 152, + 504, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 152, + 504, + 185 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 504, + 185 + ], + "type": "text", + "content": "Xidong Wang, Dingjie Song, Shunian Chen, Chen Zhang, and Benyou Wang. Longllava: Scaling multi-modal llms to 1000 images efficiently via hybrid architecture. arXiv preprint arXiv:2409.02889, 2024d." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 192, + 504, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 192, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 504, + 225 + ], + "type": "text", + "content": "Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Internvideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024e." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 232, + 504, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 504, + 266 + ], + "type": "text", + "content": "Yueqian Wang, Xiaojun Meng, Jianxin Liang, Yuxuan Wang, Qun Liu, and Dongyan Zhao. Hawkeye: Training video-text llms for grounding text in videos. arXiv preprint arXiv:2403.10228, 2024f." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 272, + 504, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 272, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 106, + 272, + 504, + 318 + ], + "type": "text", + "content": "Yuxuan Wang, Yueqian Wang, Pengfei Wu, Jianxin Liang, Dongyan Zhao, Yang Liu, and Zilong Zheng. Efficient temporal extrapolation of multimodal large language models with temporal grounding bridge. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 9972-9987, 2024g." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 323, + 504, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 504, + 357 + ], + "type": "text", + "content": "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024h." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 364, + 504, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 504, + 408 + ], + "type": "text", + "content": "Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Wenhai Wang, Zhe Chen, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionllm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. arXiv preprint arXiv:2406.08394, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 415, + 504, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 504, + 449 + ], + "type": "text", + "content": "Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13204-13214, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 456, + 504, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 504, + 490 + ], + "type": "text", + "content": "Fuzhao Xue, Yukang Chen, Dacheng Li, Qinghao Hu, Ligeng Zhu, Xiuyu Li, Yunhao Fang, Haotian Tang, Shang Yang, Zhijian Liu, et al. Longvila: Scaling long-context visual language models for long videos. arXiv preprint arXiv:2408.10188, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 496, + 504, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 504, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 504, + 531 + ], + "type": "text", + "content": "Shen Yan, Xuehan Xiong, Arsha Nagrani, Anurag Arnab, Zhonghao Wang, Weina Ge, David Ross, and Cordelia Schmid. Unloc: A unified framework for video localization tasks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 13623-13633, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 536, + 504, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 504, + 571 + ], + "type": "text", + "content": "En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 576, + 504, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 504, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 504, + 611 + ], + "type": "text", + "content": "Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 23056-23065, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 616, + 504, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 504, + 662 + ], + "type": "text", + "content": "Rowan Zellers, Jiasen Lu, Ximing Lu, Youngjae Yu, Yanpeng Zhao, Mohammadreza Salehi, Aditya Kusupati, Jack Hessel, Ali Farhadi, and Yejin Choi. Merlot reserve: Neural script knowledge through vision and language and sound. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16375-16387, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 669, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 504, + 703 + ], + "type": "text", + "content": "Xiangyu Zeng, Mingzhu Xu, Yijun Hu, Haoyu Tang, Yupeng Hu, and Liqiang Nie. Adaptive edge-aware semantic interaction network for salient object detection in optical remote sensing images. IEEE Transactions on Geoscience and Remote Sensing, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "text", + "content": "Yingsen Zeng, Yujie Zhong, Chengjian Feng, and Lin Ma. Unimd: Towards unifying moment retrieval and temporal action detection. arXiv preprint arXiv:2404.04933, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 504, + 106 + ], + "type": "text", + "content": "Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 504, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 504, + 146 + ], + "type": "text", + "content": "Haoji Zhang, Yiqin Wang, Yansong Tang, Yong Liu, Jiashi Feng, Jifeng Dai, and Xiaojie Jin. Flash-vstream: Memory-based real-time understanding for long video streams. arXiv preprint arXiv:2406.08085, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 151, + 504, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 151, + 504, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 504, + 185 + ], + "type": "text", + "content": "Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 191, + 504, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 191, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 191, + 504, + 225 + ], + "type": "text", + "content": "Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 231, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 504, + 264 + ], + "type": "text", + "content": "Luowei Zhou, Chenliang Xu, and Jason Corso. Towards automatic learning of procedures from web instructional videos. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 285, + 272, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 272, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 272, + 297 + ], + "type": "text", + "content": "A IMPLEMENTATION OF TAPE" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 313, + 266, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 266, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 266, + 325 + ], + "type": "text", + "content": "Algorithm 1 PyTorch snippet of TAPE." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 328, + 225, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 328, + 225, + 341 + ], + "spans": [ + { + "bbox": [ + 123, + 328, + 225, + 341 + ], + "type": "text", + "content": "Initialize related package" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 122, + 345, + 487, + 654 + ], + "blocks": [ + { + "bbox": [ + 122, + 345, + 487, + 654 + ], + "lines": [ + { + "bbox": [ + 122, + 345, + 487, + 654 + ], + "spans": [ + { + "bbox": [ + 122, + 345, + 487, + 654 + ], + "type": "text", + "content": "class TemporalAdapter(nnModule): def __init__(self, merge_len, clip_num, input_dim, mid_dim, output_dim, sample_rate): super().__init_() self.AvgPool = nn.AvgPool1d(merge_len, stride = merge_len) self.upsample = nn.UpSample(scale_factor = sample_rate) self_linear_input = nn.Linear(input_dim, mid_dim) self_linear_output = nn.Linear(mid_dim, output_dim) nn.init_constant_(self_linear_output_weight, 0) nn.init_constant_(self_linear_output.bias, 0) self.Downsample_Depthwise_Separable_Conv1 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Downsample_Depthwise_Separable_Conv2 = nnSEQUENTIAL (nn.Conv1d(mid_dim, mid_dim, merge_len*2+1, stride=sample_rate, padding=merge_len, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.fc = nnSequential (nn.Conv1d(mid_dim, mid_dim, clip_num+1, stride=1, padding=clip_num//2), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv2 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) self.Conv1 = nnSequential (nn.Conv1d(mid_dim, mid_dim, merge_len+1, stride=1, padding=merge_len//2, groups=mid_dim), nn.Conv1d(mid_dim, mid_dim, 1), TransposeLayerNorm(mid_dim), nn.GELU(), ) def forward(self, input_tokens): time_ad = self(linear_input(input_tokens).transpose(1, 2) time_ad1 = self.AvgPool(time_ad) time_ad2 = self.Downsample_Depthwise_Separable_Conv1(time_adl) time_ad3 = self.Downsample_Depthwise_Separable_Conv2(time_ad2) time_ad3 = self.fc(time_ad3) time_ad2 = self.upsample(time_ad3) + time_ad2 time_ad2 = self.Conv2(time_ad2) time_ad1 = self.upsample(time_ad2) + time_ad1 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad2 time_ad1 = self.Conv1(time_ad1) time_ad2 = self.upsample(time_ad2) + time_ad1 return time_ad_out" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "Algorithm 1 details the implementation process of TAPE in code form. Specifically, the long video token sequence input_tokens is first compressed in the channel dimension by a linear layer to obtain time_ad, and the sequence length is compressed through a pooling layer. Next, we use a U-Net-like structure composed of one-dimensional depthwise separable convolutions to progressively down-sample the sequence, obtaining three one-dimensional temporal feature sequences with different" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "time resolutions, namely time_ad1, time_ad2, and time_ad3. Subsequently, a convolution with a sufficiently long window is applied to the shortest temporal feature sequence time_ad3, using zero padding at both ends as anchors to encode the relative temporal position of each token in the sequence. Then, we progressively upsample the temporal feature sequences from short to long, using residual connections to preserve temporal features at different scales. Finally, the temporal feature sequence time_ad_out is restored to the same length as the video features after token shuffling and aligned in the channel dimension through a linear layer." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 178, + 274, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 274, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 274, + 190 + ], + "type": "text", + "content": "B INSTRUCTION-TUNING DATA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 203, + 504, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 504, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 504, + 237 + ], + "type": "text", + "content": "We fine-tuned VideoChat-T using 432K data, which includes 349K instances from TimePro and 82K instances from normal data. All videos were sampled from existing open-source video datasets, with specific information about the relevant data provided in Table 6." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 148, + 249, + 464, + 521 + ], + "blocks": [ + { + "bbox": [ + 148, + 249, + 464, + 521 + ], + "lines": [ + { + "bbox": [ + 148, + 249, + 464, + 521 + ], + "spans": [ + { + "bbox": [ + 148, + 249, + 464, + 521 + ], + "type": "table", + "html": "
SetTaskCourseInstance Num
TimeProTemporal Video GroundingDideMo32,944
QueryD14,602
HiREST-grounding459
Dense Video CaptioningActivityNet-Captions10,009
ViTT5,086
Youcook28,700
Video SummarizationTVSum50
SumMe25
Step Localization and CaptioningCOIN9,026
HiREST-step459
Transcribed Speech GenerationYT-Temporal31,190
Reasoning Temporal LocalizationActivityNet-RTL33,557
Multi-format Temporal GroundingInternVid-VTime100,000
Highlight DetectionActivityNet-HL10,340
Temporal Grounded CaptionCosMo-TGC93,118
NormalConversationVideoChatGPT13,303
VideoChat13,884
Video QAEgoQA7,813
MovieChat-QA808
ReasoningSTAR45,731
CaptionMovieChat-Caption808
", + "image_path": "1b9e6ef83ab9afc63694e00f5519b32bd5204559a5ab4e39d51403d8be6c638c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 104, + 529, + 504, + 563 + ], + "lines": [ + { + "bbox": [ + 104, + 529, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 504, + 563 + ], + "type": "text", + "content": "Table 6: The complete instruction fine-tuning data used for training. We utilized a total of approximately 432K data points, which can be divided into 349K instances of TimePro and 82K instances of regular video data, covering 13 tasks across 21 datasets." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 577, + 504, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 577, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 504, + 655 + ], + "type": "text", + "content": "We evaluate the quality of the data from three perspectives: diversity, length, and difficulty. We strive to include different datasets for various tasks, and the distribution of videos in the datasets is as broad as possible. The length of the videos should be controlled within an appropriate range, as excessively long or short videos may pose challenges for training. Each query should clearly describe the video content of the target time segment and avoid corresponding to multiple time segments in the video. Based on these principles, we have screened and integrated existing high-quality datasets, which significantly contribute to enhancing the model's temporal awareness capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 659, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 704 + ], + "type": "text", + "content": "TimePro encompasses a series of open-source temporal grounding datasets that we have integrated, cleaned, and refined, such as TimeIT (Ren et al., 2024), ANet-RTL (Huang et al., 2024b), and InternVid-VTime (Huang et al., 2024a). These high-quality open-source datasets have been experimentally validated by us. We also added two new self-made datasets, ANet-HL and CosMo-TGC." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Temporal Video Grounding. This task involves providing a natural language query and requires outputting the corresponding video's start and end times. The datasets include DiDeMo (Anne Hen" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 620 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "dricks et al., 2017), QuerYD (Oncescu et al., 2021), and HiREST-grounding (Zala et al., 2023), aiming to achieve precise temporal localization during user interaction with natural language." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "type": "text", + "content": "Dense Video Captioning. This task requires the model to detect a series of events occurring in a given video and output the corresponding timestamps and coarse-grained descriptions. The datasets for this part include ActivityNet-Caption (Krishna et al., 2017), ViTT Huang et al. (2020), and YouCook2 (Zhou et al., 2018), which help the model learn the temporal relationships between different events within the video." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 506, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 228 + ], + "type": "text", + "content": "Video Summarization. The goal of this task is not to summarize at the semantic level of natural language, but to determine a set of compressed frames or clips in the form of timestamps, representing the most informative content in a given video. Our datasets include TVSum (Song et al., 2015) and SumMe (Gygli et al., 2014), which effectively combine the model's temporal perception capabilities with its semantic content inference abilities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 232, + 506, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 277 + ], + "type": "text", + "content": "Step Localization and Captioning. This task differs from dense video captioning as it is designed to segment and describe the important steps within a long video. We have integrated two datasets, COIN (Tang et al., 2019) and HiREST-step (Zala et al., 2023), which can help the model learn the procedural temporal logic relationships of different steps within a single event." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 281, + 506, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 326 + ], + "type": "text", + "content": "Transcribed Speech Generation. The purpose of this task is to predict speech content and its corresponding start and end timestamps based on visual signals in the video. Including the YT-Temporal (Zellers et al., 2022) dataset, this task can be viewed as a weakly supervised event localization and description task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 331, + 506, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 376 + ], + "type": "text", + "content": "Reasoning Temporal Localization. The answers to the questions in this task include both timestamps and explanations. We used the ANet-RTL (Huang et al., 2024b) dataset as training data for this task. By combining temporal localization and reasoning, we can more specifically enhance the model's temporal perception capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 380, + 506, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 427 + ], + "type": "text", + "content": "Multi-format Temporal Grounding. This task includes both single-turn and multi-turn dialogues, with a variety of question types. We use the InternVid-VTime (Huang et al., 2024a) dataset for training this task. The broader range of task types and more diverse output formats can effectively enhance the model's temporal generalization capabilities." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 430, + 506, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 509 + ], + "type": "text", + "content": "Highlight Detection. Unlike video summarization, this task identifies only the most salient moments of a video in response to a natural language query, without covering the entire scope of the original video (Lei et al., 2021a). We used a custom dataset, ANet-HL, derived from temporal localization data. We extract video segments between the start and end times of the target's appearance and use CLIP to calculate the similarity between each frame's scene and the target. This is converted into discrete saliency levels ranging from 1 to 5, at intervals of 0.5. This task effectively enhances the model's temporal perception capabilities for specific events." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": "Temporal Grounded Caption. This task involves using scene titles as queries, requiring the model to output both the time segments when the scenes appear and the fine-grained subtitles for those segments. We used our custom dataset, CosMo-TGC. This task format, which combines temporal localization and semantic understanding, can effectively prevent large language models from focusing on irrelevant video segments, thereby improving the quality of the model's responses to questions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 586, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 504, + 620 + ], + "type": "text", + "content": "We also used normal data comprising four tasks and six different data sources. These general data help prevent the model from overfitting to temporal grounding-related tasks during training, thereby preserving the model's original capabilities." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 105, + 639, + 282, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 282, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 282, + 651 + ], + "type": "text", + "content": "C COMPUTATIONAL EFFICIENCY" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "text", + "content": "By applying Token Shuffle, we further reduced the computational cost of VideoChat-T, giving it a significant computational advantage over high-performance models like LLaVA-OneVision (Li et al., 2024a) and Qwen2-VL (Wang et al., 2024a). Under the same settings, VideoChat-T uses only 3 tokens per frame, with flops consumption at just " + }, + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "inline_equation", + "content": "5.1\\%" + }, + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "text", + "content": " of LLaVA-OneVision. Its inference time on single A100 is only 0.63 seconds, reaching real-time response levels, making it highly suitable for applications requiring rapid response, such as online video understanding." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 79, + 504, + 119 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 504, + 119 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 504, + 119 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 504, + 119 + ], + "type": "table", + "html": "
MethodToken num per frameflops 128 framesInference Time 128f & on single A100 GPUCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME Vision
Qwen2-VL (Wang et al., 2024a)138929.8 TOut Of Memory15.013.067.066.763.3
LLaVA-OneVision (Li et al., 2024a)196693.7 T4.95 s7.314.9856.760.158.2
VideoChat-T (Ours)335.5 T0.63 s48.726.559.960.046.3
", + "image_path": "caee47541cc17bb9a6d7f948345ff67c964770d537438e21fa3a86a3d156a5c2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": "In terms of performance, VideoChat-T significantly outperforms LLaVA-OneVision in temporal grounding tasks. It has a slight advantage on MVBench; both perform comparably on Egoschema; but VideoChat-T performs worse on VideoMME. Given the substantial savings in computational resources with VideoChat-T, we consider the disadvantages on some datasets to be acceptable." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "text", + "content": "Moreover, our model's ability to maintain reasonable performance under high compression ratios suggests that the token embedding spaces of contemporary models may be characterized by considerable feature redundancy. This observation presents a promising avenue for future research, as efficient techniques for compressing or discarding redundant features could substantially reduce computational costs without sacrificing model performance, enabling longer context reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 304, + 298, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 298, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 298, + 316 + ], + "type": "text", + "content": "D DETAILS OF HYPERPARAMETERS" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 207, + 332, + 404, + 513 + ], + "blocks": [ + { + "bbox": [ + 104, + 126, + 504, + 160 + ], + "lines": [ + { + "bbox": [ + 104, + 126, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 504, + 160 + ], + "type": "text", + "content": "Table 7: Comparison of the computational efficiency and performance of VideoChat-T with other methods. Our approach achieves relatively impressive performance with extremely low computational cost." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 207, + 332, + 404, + 513 + ], + "lines": [ + { + "bbox": [ + 207, + 332, + 404, + 513 + ], + "spans": [ + { + "bbox": [ + 207, + 332, + 404, + 513 + ], + "type": "table", + "html": "
configepoch1epoch2&3
input frame192128
max text length15361024
freeze TAPETrueFalse
learning rate2e-51.5e-5
input resolution224
clip frame8
merge lenth4
QFormer token (per clip)96
lora rank16
lora alpha32
lora dropout0.1
batch size (per GPU)2
optimizerAdamW
optimizer momentum0.9, 0.999
weight decay0.02
learning rate schedulecosine decay
", + "image_path": "ca3cd1656aee9afc53e2bd99a4c8c3260116905a553d6e5a926e02ff61453318.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 547, + 504, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 547, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 547, + 504, + 602 + ], + "type": "text", + "content": "Table 8 lists the hyperparameters used during different epochs of the training process. In the first epoch, we used a larger number of input frames and froze the TAPE. At the beginning of the second epoch, we unfroze the TAPE and fixed the model's input frames to 128. Following the settings of VideoChat2, we integrated the lora module into the LLM and applied flash attention to accelerate the training process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 619, + 241, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 619, + 241, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 241, + 631 + ], + "type": "text", + "content": "E FULL PERFORMANCES" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 107, + 647, + 504, + 714 + ], + "blocks": [ + { + "bbox": [ + 143, + 520, + 465, + 533 + ], + "lines": [ + { + "bbox": [ + 143, + 520, + 465, + 533 + ], + "spans": [ + { + "bbox": [ + 143, + 520, + 465, + 533 + ], + "type": "text", + "content": "Table 8: Hyper-parameter Settings During the Training Process of VideoChat-T." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 647, + 504, + 714 + ], + "lines": [ + { + "bbox": [ + 107, + 647, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 504, + 714 + ], + "type": "table", + "html": "
ModelLLMAvgASAPAAFAUAOEOIOSMDALSTACMCMASCFPCOENERCI
VideoChatGPT (Maaz et al., 2023)7B32.723.526.062.022.526.554.028.040.023.020.031.030.525.539.548.529.033.029.526.035.5
VideoLLaMA (Zhang et al., 2023)7B34.127.525.551.029.039.048.040.538.022.522.543.034.022.532.545.532.540.030.021.037.0
VideoChat (Li et al., 2023b)7B35.533.526.556.033.540.553.040.530.025.527.048.535.020.542.546.026.541.023.523.536.0
ST-LLM (Liu et al., 2024b)7B54.966.053.584.044.058.580.573.538.542.531.086.536.556.578.543.044.546.534.541.558.5
VideoChat2 (Li et al., 2024b)7B60.475.558.083.550.560.587.574.545.047.544.082.537.064.587.551.066.547.035.037.072.5
VideoChat-T7B59.983.568.580.544.061.071.084.035.548.056.587.046.056.578.049.559.046.037.040.066.5
", + "image_path": "a3c57aa34b9a7662f0cafe3a8bd4673ab5a234ba215d29502ec0f8057f1d24ee.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 715, + 504, + 738 + ], + "lines": [ + { + "bbox": [ + 104, + 715, + 504, + 738 + ], + "spans": [ + { + "bbox": [ + 104, + 715, + 504, + 738 + ], + "type": "text", + "content": "Table 9: The full performance of VideoChat-T on MVBench. VideoChat-T still demonstrates strong performance, effectively prevents catastrophic forgetting caused by incremental fine-tuning." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "text", + "content": "The performance of VideoChat-T on MVBench is shown in Table 9. Compared to VideoChat2, VideoChat-T only experienced a " + }, + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "text", + "content": " accuracy loss. This indicates that our method effectively preserves the capabilities of the base model, preventing catastrophic forgetting caused by incremental fine-tuning. For a detailed analysis of the performance degradation of MVBench, please refer to Appendix F.2. For the Action Localization (AL) task, which requires the model to determine the coarse-grained temporal position of events, the test accuracy improved from " + }, + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "inline_equation", + "content": "44.0\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "inline_equation", + "content": "56.5\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 169 + ], + "type": "text", + "content": ". This indirectly confirms that our method significantly enhances the model's temporal awareness capabilities." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 178, + 504, + 282 + ], + "blocks": [ + { + "bbox": [ + 106, + 178, + 504, + 282 + ], + "lines": [ + { + "bbox": [ + 106, + 178, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 178, + 504, + 282 + ], + "type": "table", + "html": "
ModelLLM sizeOverall (%)Short Video (%)Medium Video (%)Long Video (%)
w/o subsw subsw/o subsw subsw/o subsw subsw/o subsw subs
ST-LLM (Liu et al., 2024b)7B37.942.345.748.436.841.431.336.9
Video-LLaVA (Lin et al., 2023a)7B39.941.645.346.138.040.736.238.1
ShareGPT4Video (Chen et al., 2024)8B39.943.648.353.636.339.335.037.9
Chat-UniVi-v1.5 (Jin et al., 2024)7B40.645.945.751.240.344.635.841.8
Qwen-VL-Chat (Bai et al., 2023)7B41.141.946.947.338.740.437.837.9
ShareGemini (Share, 2024)7B43.247.949.152.841.347.339.143.4
VideoChat2 (Li et al., 2024b)7B39.543.848.352.837.039.433.239.2
VideoChat-T7B46.355.853.359.943.854.041.953.4
", + "image_path": "9fbfb9620bae9079a0ba61060047fd8e6d129aaef5b0c2bd83deda49e64a4c6e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 283, + 504, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 283, + 504, + 306 + ], + "spans": [ + { + "bbox": [ + 104, + 283, + 504, + 306 + ], + "type": "text", + "content": "Table 10: The full performance of VideoChat-T on VideoMME. VideoChat-T achieved significant performance improvements, particularly in the long video subset." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "content": "The overall performance of our model on VideoMME is presented in Table 10. VideoChat-T achieved significant improvements on both evaluation benchmarks of VideoMME, which include watching videos only and videos with subtitles. The improvements are particularly notable in the long video subset." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 373, + 219, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 219, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 219, + 385 + ], + "type": "text", + "content": "F EXTRA ABLATION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 399, + 271, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 271, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 271, + 410 + ], + "type": "text", + "content": "F.1 DOMAIN CORRELATION OF DATA" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 187, + 422, + 424, + 459 + ], + "blocks": [ + { + "bbox": [ + 187, + 422, + 424, + 459 + ], + "lines": [ + { + "bbox": [ + 187, + 422, + 424, + 459 + ], + "spans": [ + { + "bbox": [ + 187, + 422, + 424, + 459 + ], + "type": "table", + "html": "
ModelCharades-STA(R@1 IOU=0.5)MVBench(avg)
VideoChat-T48.759.9
w/o STAR47.5 (-1.2)59.4 (-0.5)
", + "image_path": "d52dbcd29db220724ab1b19d9d0dfe3b2034156c2c9f9efc3f8c3682565afc72.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "lines": [ + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": "Table 11: The performance changes of the model after removing STAR. Although the video sources of STAR may have some domain correlation with those of Charades-STA and MVBench, the performance of our model is minimally affected by STAR." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 514, + 506, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 626 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 626 + ], + "type": "text", + "content": "We found that the video sources in the STAR dataset might have some domain correlation with the video sources in MVBench and Charades-STA. Therefore, we removed STAR from the training set while keeping other training settings consistent with the original. The performance on benchmarks where the video sources might have domain correlation is shown in Table 11. The model's accuracy on Charades-STA (R@1 IOU=0.5) decreased by " + }, + { + "bbox": [ + 104, + 514, + 506, + 626 + ], + "type": "inline_equation", + "content": "1.2\\%" + }, + { + "bbox": [ + 104, + 514, + 506, + 626 + ], + "type": "text", + "content": ", and the average accuracy on MVBench decreased by " + }, + { + "bbox": [ + 104, + 514, + 506, + 626 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 104, + 514, + 506, + 626 + ], + "type": "text", + "content": ". This indicates that the domain correlation of video sources did not significantly impact performance for our model. Notably, after removing STAR, our normal data volume was reduced to approximately 36K. This implies that, with sufficiently parameter-efficient initialization and appropriate training strategies, using only a small amount of high-quality normal data is sufficient to retain the model's original capabilities." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 639, + 425, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 425, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 425, + 651 + ], + "type": "text", + "content": "F.2 DEeper INVESTIGATION OF THE PERFORMANCE DROP ON MVBENCH" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": "We conducted a deeper investigation into the performance decline on MVBench. Through additional ablation experiments (as shown in Tabel 12), we identified two main factors contributing to the performance drop." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Architectural Discrepancy: The original VideoChat2 model was designed to process only 16 frames, leading to a mismatch in the learned feature distribution compared to the architecture of VideoChatT. As shown in the first two rows of the table, increasing the input frame number for VideoChat2" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 79, + 504, + 160 + ], + "blocks": [ + { + "bbox": [ + 108, + 79, + 504, + 160 + ], + "lines": [ + { + "bbox": [ + 108, + 79, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 108, + 79, + 504, + 160 + ], + "type": "table", + "html": "
Methodpost ft datadata sizeframe numtoken num (per frame)MVBatch(AVG)
VideoChat2--161260.4
VideoChat2--1281242.1
VideoChat-T (Common_Init)--128325.3
VideoChat-T (Ours)--128348.6
VideoChat-T (Ours)TimePro+Normal (Ours)0.43M128359.9
VideoChat-T (Ours)TimePro+FullVideoChat22M128362.9
", + "image_path": "e70e31bf2b744396da75b43e21554740075609040d00ca2f8013334baf836e66.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 167, + 495, + 179 + ], + "lines": [ + { + "bbox": [ + 114, + 167, + 495, + 179 + ], + "spans": [ + { + "bbox": [ + 114, + 167, + 495, + 179 + ], + "type": "text", + "content": "Table 12: Performance of VideoChat2 and VideoChat-T on MVBench under different settings." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 199, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 506, + 277 + ], + "type": "text", + "content": "resulted in a significant performance drop (from 60.4 to 42.1). When initializing VideoChat-T with VideoChat2, performance was close to random (25.3) due to the newly introduced randomly initialized layers. By applying efficient initialization to these new layers, we partially recovered the original capabilities of the model, bringing the MVBench performance of the un-trained VideoChat-T back to 48.6, representing an improvement of 6.5 compared to the 128-frame VideoChat2. After further fine-tuning, the short-video processing capability of VideoChat-T improved significantly, reaching 59.9." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 282, + 504, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 504, + 370 + ], + "type": "text", + "content": "Fine-tuning Data Discrepancy: We fine-tuned VideoChat-T using only 432K data, significantly less than the 2M non-grounded regular data used for training VideoChat2. The fine-tuning data for VideoChat2 primarily consisted of short videos of around ten seconds, which closely matched the length distribution of the MVBench evaluation videos, playing a crucial role in improving MVBench performance. To validate our hypothesis, we conducted additional experiments by training our VideoChat-T model using the TimePro and full VideoChat2 training data. It can be observed that VideoChat-T showed a slight improvement in performance on the MVBench dataset, achieving an accuracy of 62.9, which is an increase of 2.5 compared to the original VideoChat2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 376, + 504, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 443 + ], + "type": "text", + "content": "Based on the above, we can conclude the fundamental reasons affecting the model's foundational generalization capabilities. When a model undergoes adjustments, the learned original distribution may not perfectly match the new architecture, making the efficient initialization of new layers crucial. The features learned from the original dataset might be forgotten due to changes in various parameters. Utilizing a more comprehensive and diverse dataset for fine-tuning can restore and even further enhance performance." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 127, + 478, + 484, + 533 + ], + "blocks": [ + { + "bbox": [ + 105, + 456, + 395, + 467 + ], + "lines": [ + { + "bbox": [ + 105, + 456, + 395, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 395, + 467 + ], + "type": "text", + "content": "F.3 ASSOCIATION BETWEEN PERFORMANCE AND MODEL DESIGN" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 478, + 484, + 533 + ], + "lines": [ + { + "bbox": [ + 127, + 478, + 484, + 533 + ], + "spans": [ + { + "bbox": [ + 127, + 478, + 484, + 533 + ], + "type": "table", + "html": "
MethodFT DataCharades-STA IOU0.5QVHighlight mAPMVBench AvgEgoschema FullVideoMME w/o subs
TimeChatTimeIT+Valley32.214.538.533.030.2
TimeChatTimePro+Normal34.216.341.638.933.4
VideoChat-TTimePro+Normal48.726.559.960.046.3
", + "image_path": "2050e3f990134bcd5ee5f66999a9af1feccafb67a8f70c10c65785883179abb3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "content": "Table 13: Comparison of other model architectures trained on our dataset with our method, demonstrating the impact of the overall model structure design." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 577, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 577, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 506, + 654 + ], + "type": "text", + "content": "To eliminate the influence of training data and auxiliary tasks, and to more clearly evaluate the association between performance and model design, we fine-tuned TimeChat using the full set of fine-tuning data and auxiliary tasks from VideoChat-T. Table 13 presents the performance of TimeChat, fine-tuned with our data, across five datasets. It can be observed that TimeChat, fine-tuned with our data, shows improvements across all benchmarks. However, its performance still lags significantly behind VideoChat-T. This indicates that an efficient fine-tuning architecture design and high-quality, diverse datasets are both essential and complementary." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 667, + 282, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 282, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 282, + 678 + ], + "type": "text", + "content": "F.4 VALIDATION OF TRANSFERABILITY" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "To verify the robustness of our TimeSuite for other MLLMs, we transferred our method to Llava-OneVision (Li et al., 2024a). Table 14 shows the performance changes of Llava-OneVision after applying our TimeSuite. It can be seen that when we apply the full set of methods in TimeSuite to Llava-OneVision, the model's performance on two different long-video evaluation benchmarks" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 79, + 483, + 128 + ], + "blocks": [ + { + "bbox": [ + 127, + 79, + 483, + 128 + ], + "lines": [ + { + "bbox": [ + 127, + 79, + 483, + 128 + ], + "spans": [ + { + "bbox": [ + 127, + 79, + 483, + 128 + ], + "type": "table", + "html": "
MethodCharades-STA IOU0.5QVHighlight mapVideoMME w/o subsMLVU AvgMVBench Avg
Llava-OneVision (baseline)7.315.058.264.756.7
Llava-OneVision-T (Ours)42.521.761.469.456.1
", + "image_path": "13ffe7f9c75f9998a7d1704ae8920b30bf5392370e2081b05de584158f20f245.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 191, + 504, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 504, + 213 + ], + "type": "text", + "content": "improves (+3.2 on VideoMME and +4.7 on MLVU), effectively demonstrating the robustness of our TimeSuite for different MLLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 228, + 367, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 367, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 367, + 239 + ], + "type": "text", + "content": "F.5 EXPLORATIONS OF DATA CONFIGRATIONS OF TIMEPRO" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 127, + 251, + 483, + 293 + ], + "blocks": [ + { + "bbox": [ + 104, + 136, + 504, + 170 + ], + "lines": [ + { + "bbox": [ + 104, + 136, + 504, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 136, + 504, + 170 + ], + "type": "text", + "content": "Table 14: Performance comparison of TimeSuite migration to other MLLMs. The application of our method shows a certain improvement in long video comprehension, demonstrating the transferability of our approach." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 251, + 483, + 293 + ], + "lines": [ + { + "bbox": [ + 127, + 251, + 483, + 293 + ], + "spans": [ + { + "bbox": [ + 127, + 251, + 483, + 293 + ], + "type": "table", + "html": "
MethodMVBench AvgEgoschema FullVideoMME w/o subsCharades-STA IOU=0.5QVHighlight mAP
TimePro615K+Normal82K (old version)60.061.046.345.425.7
TimePro349K+Normal82K (Ours)59.960.046.348.726.5
", + "image_path": "3790862ec03db5b7017f9227c34d426e2e1ad7cafeb723dbfdc0a261b24da217.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 300, + 504, + 324 + ], + "lines": [ + { + "bbox": [ + 104, + 300, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 504, + 324 + ], + "type": "text", + "content": "Table 15: Comparison of different versions of our proposed TimePro. More data does not necessarily lead to higher overall performance, highlighting the importance of data quality." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 338, + 504, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 416 + ], + "type": "text", + "content": "In the early version of TimePro, we employed datasets comprising 309K Multi-format Temporal Grounding instances, 150K Temporal Grounded Caption instances and other data. Through extensive experimentation (as shown in Tabel 15), we discovered that removing low-quality data while retaining high-quality instances could significantly reduce training time without compromising performance. Consequently, we pruned these two part datasets to 100K and 93K instances, respectively. The data distribution presented in the paper represents the optimized and relatively balanced configuration we arrived at." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 432, + 194, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 432, + 194, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 194, + 444 + ], + "type": "text", + "content": "G DISCUSSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 457, + 484, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 484, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 484, + 479 + ], + "type": "text", + "content": "G.1 CAN THE OVERALL PERFORMANCE OF MLLMS BE ENHANCED BY CONTINUOUSLY INTEGRATING EXPERT TASKS?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": "By appropriately fine-tuning the Multimodal Large Language Model (MLLM), we have developed a general MLLM with powerful zero-shot temporal grounding capabilities. Its performance, after fine-tuning on the training set of evaluation benchmarks, can rival the current state-of-the-art supervised expert models. Based on these results, we can boldly speculate whether it is possible to internalize the capabilities of expert models such as spatial grounding, tracking and detection (Zeng et al., 2023) into the MLLM itself, without using any external expert decoders, to enhance the comprehensive understanding performance of the MLLM and achieve a unified generalist MLLM for multiple tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 571, + 504, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 504, + 694 + ], + "type": "text", + "content": "Merlin (Yu et al., 2023) and VisionLLM (Wang et al., 2024b) have already attempted something similar, but its performance is limited by the reasoning capabilities and language representation bottlenecks of the LLM. There is still a significant gap between its performance and that of expert models for various tasks. We observed similar phenomena in our experiments. The temporal grounding task only requires outputting two timestamps, and the task format is relatively simple, so our model achieved good results. However, the highlight detection task requires outputting multiple discrete timestamps and their corresponding saliency scores. The model needs to accurately predict dozens of numbers in language form to answer the question correctly. Our model performed well only on data with fewer timestamps. Therefore, how to simplify the complex output format of expert tasks into the language representation of LLMs, or to design special processing procedures to simplify complex expert tasks, is a question worth exploring." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Moreover, designing diverse data formats is also crucial for enhancing the expert capabilities of MLLMs. Compared to classic expert models, MLLMs have a natural advantage in task type diversity and can enhance their performance through various different variants tasks of a single capability." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "For temporal grounding tasks, we found that enhancing task diversity has a significant effect on improving the model's temporal perception generalization ability. We can boldly speculate that if there are sufficiently diverse training data task types, most tasks with relatively simple output formats can achieve results comparable to expert models through appropriate instruction fine-tuning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "type": "text", + "content": "Through the integration of diverse expert tasks and the optimization of language representations, MLLMs can achieve substantial improvements in their overall capabilities. This allows them to effectively comprehend and address complex tasks, rivaling or even exceeding the performance of specialized expert models within specific domains. Looking ahead, MLLMs have the potential to evolve into highly versatile AI models, transcending traditional conversational and QA capabilities. They will be equipped to handle a wide range of complex expert tasks across various domains, such as vision, language, and reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 236, + 495, + 259 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 495, + 259 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 495, + 259 + ], + "type": "text", + "content": "G.2 WHY DOES TEMPORAL GROUNDING DATA LEAD TO ACCURACY LOSS IN SHORT-TERM VIDEOS?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 273, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 504, + 307 + ], + "type": "text", + "content": "We conducted ablation experiments using different combinations of temporal grounding data and regular data. The accuracy of VideoChat-T on MVBench after fine-tuning with various data combinations is shown in Table 16." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 207, + 327, + 403, + 411 + ], + "blocks": [ + { + "bbox": [ + 207, + 327, + 403, + 411 + ], + "lines": [ + { + "bbox": [ + 207, + 327, + 403, + 411 + ], + "spans": [ + { + "bbox": [ + 207, + 327, + 403, + 411 + ], + "type": "table", + "html": "
FT DataMVBench (AVG)
TimeIT54.7
TimeIT+Normal55.3
Normal56.1
TimePro57.4
TimePro+Normal (Ours)59.9
", + "image_path": "de6e6aaab81cb54eefcf31bcb060a69ffd1354b82ac2e2170d6377afdfec2cf8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 418, + 488, + 430 + ], + "lines": [ + { + "bbox": [ + 120, + 418, + 488, + 430 + ], + "spans": [ + { + "bbox": [ + 120, + 418, + 488, + 430 + ], + "type": "text", + "content": "Table 16: Performance VideoChat-T on MVBench under different fine-tuning data settings." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 456, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 556 + ], + "type": "text", + "content": "The diversity of grounding data formats in the past has often been limited, which can lead to overfitting on Temporal Grounding tasks and cause the model to lose its general question-answering capability. We compared the TimeIT dataset proposed in TimeChat (Ren et al., 2024) with our TimePro dataset on MVBench. As shown in the Table 16, fine-tuning with only TimeIT resulted in the lowest accuracy, and the combined use of TimeIT+Normal also performed slightly worse than using Normal alone. This indicates that monotonous grounding data indeed damages the model's original performance (as shown in Figure 1 at the beginning of the paper, TimeChat loses some of its general question-answering capability after fine-tuning, where it outputs localization times for general questions)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 561, + 506, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 506, + 671 + ], + "type": "text", + "content": "In contrast, our TimePro dataset includes diverse data, encompassing 9 different task types from 15 datasets, which helps mitigate the generalization loss caused by homogeneous grounding data types. Additionally, our dataset integrates Grounding with various general tasks. For instance, Grounded Caption requires detailed descriptions of corresponding video segments, while Reasoning Temporal Localization demands the model to reason about questions. This approach significantly enhances the model's generalization ability and minimizes the impact on its original capability (e.g., short video accuracy). As demonstrated in the Table 16, the performance of using only TimePro exceeds that of using Normal alone, and the combined use of TimePro and Normal far surpasses all other combinations. This also confirms that our TimePro effectively preserves the model's original performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "Overall, using a single type of expert task training data can easily lead to model overfitting, resulting in significant loss of the model's original capabilities. To preserve the model's foundational generalization abilities, it is essential to use diversified training data. Additionally, incorporating data of various types and distributions, such as text, images, and videos, can further enhance the model's generalization capabilities." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 503, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 503, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 503, + 105 + ], + "type": "text", + "content": "G.3 COULD TRAINING THE MODEL ON BOTH TEMPORAL AND NON-TEMPORAL GROUNDING DATA MITIGATE PERFORMANCE LOSS IN SHORT-TERM VIDEOS?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 504, + 159 + ], + "type": "text", + "content": "To address this question, we conducted additional ablation experiments. By training VideoChat-T with different combinations of temporal and non-temporal grounding data, we were able to clearly observe the effects of both types of data on the model's performance. The results of the experiments are shown in the Table 17." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 148, + 167, + 462, + 240 + ], + "blocks": [ + { + "bbox": [ + 148, + 167, + 462, + 240 + ], + "lines": [ + { + "bbox": [ + 148, + 167, + 462, + 240 + ], + "spans": [ + { + "bbox": [ + 148, + 167, + 462, + 240 + ], + "type": "table", + "html": "
FT DataMVBench AvgVideoMME w/o subsCharades-STA R1@0.5
Normal56.142.68.0
TimePro57.446.045.6
TimePro+Normal (Ours)59.946.348.7
", + "image_path": "23e060406cc7ca5dd024e0fd494c5e7c33a333e7c1b025f0cab54e825a31606b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 247, + 504, + 271 + ], + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 271 + ], + "type": "text", + "content": "Table 17: Performance comparison of VideoChat-T using different combinations of temporal grounding and non-temporal grounding data." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 285, + 506, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 374 + ], + "type": "text", + "content": "It can be observed that the combined use of TimePro+Normal for VideoChat-T achieves the highest performance in short video QA, long video QA, and temporal grounding tasks. This not only demonstrates that using both temporal grounding and non-temporal grounding data can reduce performance loss in short videos, but also reveals that the effects of temporal and non-temporal grounding data are complementary across various tasks. The distinct differences between temporal grounding and non-temporal grounding tasks can respectively compensate for the model's shortcomings in different task perspectives and feature distributions. The simultaneous use of both types of data can effectively enhance the model's overall capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 390, + 195, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 195, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 195, + 403 + ], + "type": "text", + "content": "H CASE STUDY" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 415, + 263, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 263, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 263, + 427 + ], + "type": "text", + "content": "H.1 MORE QUALITATIVE ANALYSIS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 436, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 504, + 471 + ], + "type": "text", + "content": "To further qualitatively analyze our model, we supplemented it with three types of examples. These examples are about long video QA, short video QA, and captioning tasks, all of which include temporal grounding." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 475, + 504, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 520 + ], + "type": "text", + "content": "More qualitative comparisons about long video QA are shown in Figure 6. VideoChat-T effectively handles various questions across different domains. By better perceiving the temporal relationships of different events occurring in long videos, it can more accurately and deeply understand the detailed content of the entire video." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "text", + "content": "More qualitative comparisons about short video QA are shown in Figure 7. VideoChat-T effectively retains the original capabilities of the base model. Through parameter-efficient initialization methods and appropriate training strategies, we minimize the damage to the base model's capabilities caused by new architectures and data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "content": "More qualitative comparisons about captioning are shown in Figure 8. Although VideoChat2 describes more local details in some scenarios compared to VideoChat-T, VideoChat-T focuses more on a series of temporal events, which aligns better with how humans typically describe videos." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 622, + 204, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 622, + 204, + 634 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 204, + 634 + ], + "type": "text", + "content": "H.2 SHORTCOMINGS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": "We also conducted a qualitative analysis of the shortcomings of VideoChat-T through examples. As shown in Figure 9, VideoChat-T performs poorly on examples with complex logic. In the left example, although VideoChat-T accurately identified the timing of the event, it failed to fully explain the motivation behind the man opening the isolation door, which was \"to fight the hijackers of the space elevator, seize the controller, and thus save the people in the entire space elevator.\" In the right example, VideoChat-T correctly identified the event where Mr. Bean reached out to touch his desk mate's table, but it incorrectly explained the true reason for this action, which was \"to cover up the fact that he was copying his desk mate's exam by pretending to wipe dust off the desk.\"" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 299, + 338 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 299, + 338 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 299, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 299, + 338 + ], + "type": "image", + "image_path": "e52d5f38eabd8a9e955e523265302051c3b150ffd1cb3d8bde4b9df1f3a4805f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 345, + 468, + 357 + ], + "lines": [ + { + "bbox": [ + 140, + 345, + 468, + 357 + ], + "spans": [ + { + "bbox": [ + 140, + 345, + 468, + 357 + ], + "type": "text", + "content": "Figure 6: More qualitative comparisons in temporal grounding & long video QA." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 79, + 504, + 337 + ], + "blocks": [ + { + "bbox": [ + 310, + 79, + 504, + 337 + ], + "lines": [ + { + "bbox": [ + 310, + 79, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 310, + 79, + 504, + 337 + ], + "type": "image", + "image_path": "7a0c7dcb93ea05e6d55e7cd110f7f6731f43f222eef388781bf8d351bc78fe07.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 368, + 300, + 489 + ], + "blocks": [ + { + "bbox": [ + 106, + 368, + 300, + 489 + ], + "lines": [ + { + "bbox": [ + 106, + 368, + 300, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 368, + 300, + 489 + ], + "type": "image", + "image_path": "1c8ccced94116db04aa8157061d63ee3b4fc32ba71d140812c5ca8a41962a7a9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 139, + 498, + 469, + 511 + ], + "lines": [ + { + "bbox": [ + 139, + 498, + 469, + 511 + ], + "spans": [ + { + "bbox": [ + 139, + 498, + 469, + 511 + ], + "type": "text", + "content": "Figure 7: More qualitative comparisons in temporal grounding & short video QA." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 368, + 504, + 490 + ], + "blocks": [ + { + "bbox": [ + 310, + 368, + 504, + 490 + ], + "lines": [ + { + "bbox": [ + 310, + 368, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 310, + 368, + 504, + 490 + ], + "type": "image", + "image_path": "6ce4c7c9de45e48dbb9ea0a84be1a3868ed6615bd5840777217d3668c4e9131e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 531, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 506, + 588 + ], + "type": "text", + "content": "Due to the preponderance of single-turn, perceptual questions in our training data and the lack of multi-step reasoning data with complex logic, our model struggles to handle more challenging scenarios that demand intricate logical reasoning. To address this limitation, we propose constructing data in a chain-of-thought format to guide the model through multi-step reasoning, enabling it to delve deeper into the underlying motivations and causal relationships within a video." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 162, + 299, + 312 + ], + "blocks": [ + { + "bbox": [ + 106, + 162, + 299, + 312 + ], + "lines": [ + { + "bbox": [ + 106, + 162, + 299, + 312 + ], + "spans": [ + { + "bbox": [ + 106, + 162, + 299, + 312 + ], + "type": "image", + "image_path": "80a2a36824b3311ed40441b8c36bf0418f18e8c27ac646dea3885618de83ec4b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 320, + 460, + 333 + ], + "lines": [ + { + "bbox": [ + 149, + 320, + 460, + 333 + ], + "spans": [ + { + "bbox": [ + 149, + 320, + 460, + 333 + ], + "type": "text", + "content": "Figure 8: More qualitative comparisons in temporal grounding & captioning." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 162, + 504, + 312 + ], + "blocks": [ + { + "bbox": [ + 310, + 162, + 504, + 312 + ], + "lines": [ + { + "bbox": [ + 310, + 162, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 310, + 162, + 504, + 312 + ], + "type": "image", + "image_path": "2778c174efb9e72fc415a2a83c66f0215f739af772831010d5f229d7adb68871.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 505, + 293, + 616 + ], + "blocks": [ + { + "bbox": [ + 106, + 505, + 293, + 616 + ], + "lines": [ + { + "bbox": [ + 106, + 505, + 293, + 616 + ], + "spans": [ + { + "bbox": [ + 106, + 505, + 293, + 616 + ], + "type": "image", + "image_path": "623ccbd51596cb694021ccac483f15bdda05f6f38d7caa766cd8ea5fdc15ce4a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 623, + 506, + 647 + ], + "lines": [ + { + "bbox": [ + 105, + 623, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 506, + 647 + ], + "type": "text", + "content": "Figure 9: Examples of poor performance by VideoChat-T. While it accurately identifies the time of events, it struggles to answer questions that involve more complex logic." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 315, + 505, + 503, + 614 + ], + "blocks": [ + { + "bbox": [ + 315, + 505, + 503, + 614 + ], + "lines": [ + { + "bbox": [ + 315, + 505, + 503, + 614 + ], + "spans": [ + { + "bbox": [ + 315, + 505, + 503, + 614 + ], + "type": "image", + "image_path": "d038dda1b056cb1d2be4292749a2aebf0529a81ae05fae500e01321ca9708227.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_content_list.json b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..8911b462d38a079720e3b83f9875de6ec0e3a094 --- /dev/null +++ b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_content_list.json @@ -0,0 +1,3259 @@ +[ + { + "type": "text", + "text": "TIMER-XL: LONG-CONTEXT TRANSFORMERS FOR UNIFIED TIME SERIES FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 99, + 785, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yong Liu\\*, Guo Qin\\*, Xiangdong Huang, Jianmin Wang, Mingsheng Long\\* \nSchool of Software, BNrist, Tsinghua University, Beijing 100084, China \n{liuyong21, qinguo24}@ mails.tsinghua.edu.cn \n{huangxdong, jimwang, mingsheng}@tsinghua.edu.cn", + "bbox": [ + 179, + 169, + 699, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 262, + 547, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present Timer-XL, a causal Transformer for unified time series forecasting. To uniformly predict multidimensional time series, we generalize next token prediction, predominantly adopted for 1D token sequences, to multivariate next token prediction. The paradigm formulates various forecasting tasks as a long-context prediction problem. We opt for decoder-only Transformers that capture causal dependencies from varying-length contexts for unified forecasting, making predictions on non-stationary univariate time series, multivariate series with complicated dynamics and correlations, as well as covariate-informed contexts that include exogenous variables. Technically, we propose a universal TimeAttention to capture fine-grained intra- and inter-series dependencies of flattened time series tokens (patches), which is further enhanced by deft position embedding for temporal causality and variable equivalence. Timer-XL achieves state-of-the-art performance across task-specific forecasting benchmarks through a unified approach. Based on large-scale pre-training, Timer-XL achieves state-of-the-art zero-shot performance, making it a promising architecture for pre-trained time series models. Code is available at this repository: https://github.com/thuml/Timer-XL.", + "bbox": [ + 228, + 295, + 769, + 518 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 546, + 336, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Transformers have been extensively applied to time series forecasting, becoming the backbone of task-specific models (Zhou et al., 2021; Wu et al., 2021) and pre-trained models (Das et al., 2023). While the majority of prior works have focused on long-term forecasting, reliable predictions are made by considering endogenous variations and exogenous correlations in the context (Box, 2013). Besides, the context length of pre-trained Transformers determines the maximum input and output length during inference. Therefore, long-context Transformers are more versatile than shorter ones, facilitating long-sequence and high-resolution generation (Yin et al., 2023; Wang et al., 2024a).", + "bbox": [ + 169, + 578, + 826, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, existing Transformers in the time series field crucially encounter the context bottleneck. As shown in Figure 1, unlike Transformers for natural language and vision that learn dependencies among thousands to millions of tokens (Kirillov et al., 2023; OpenAI, 2023), time-series Transformers typically operate around limited contexts of up to hundreds of time series tokens (patches) (Nie et al., 2022). For univariate forecasting, a short-context input leads to insufficient learning of global tendencies, struggling to address non-stationarity in real-world time series (Hyndman, 2018). For multivariate forecasting, increasing research has demonstrated the effectiveness of explicitly capturing intra- and inter-channel dependencies (Zhang & Yan, 2022; Liu et al., 2023; 2024a), highlighting the practical urgency of extending the context length to encompass inter-correlated time series.", + "bbox": [ + 169, + 681, + 826, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, causal Transformers characterized by the decoder-only architecture have become a predominant choice of large language models (Zhao et al., 2023) and garnered increasing attention in the development of large time series models (Rasul et al., 2023; Ansari et al., 2024). Based on contextual flexibility and autoregressive next token prediction, one model can accommodate varying lookback and prediction lengths (Liu et al., 2024b). Therefore, pre-training on longer contexts not only empowers them with the fundamental capability to incorporate more contextual information but", + "bbox": [ + 169, + 814, + 828, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal Contribution", + "bbox": [ + 191, + 910, + 316, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0a23332ffe30348e29683222f624e59a9642b12c38b219e0830c57188039601e.jpg", + "image_caption": [ + "Figure 1: We compare the context length (measured by token number) of Transformers in different modalities and propose Timer-XL that increases the length to thousands of patch tokens. Given the generality across contexts, Timer-XL is a versatile solution for various forecasting tasks." + ], + "image_footnote": [], + "bbox": [ + 173, + 104, + 344, + 284 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/be99a7a17398a5e28835a3acb6957e447d5b6e901b66ad0a133862b2f5676c6a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 375, + 99, + 826, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "also enhances the model versatility toward a one-for-all foundation model. Regarding any-variate and any-length time series as one context, previous work (Liu et al., 2024a) has achieved unified modeling on flattened tokens based on noncausal Transformers. However, our empirical results (Figure 3) reveal that encoder-only forecasters may encounter performance degradation in long-context forecasting, while decoder-only Transformers can mitigate this degradation well.", + "bbox": [ + 169, + 354, + 826, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we generalize the training objective of language modeling to multivariate next token prediction, achieving unified time series forecasting that covers tasks in Figure 1 (right). Based on the decoder-only architecture, we propose TimeAttention to facilitate Transformers on multidimensional time series, presenting Kronecker-based masking mechanism to train time-series Transformers in a channel-dependent approach. With specialized position embedding for multivariate series, TimeAttention is aware of the chronological order of time points and achieves permutation-equivalence (Zaheer et al., 2017) on variables. We enlarge the context to thousands of patch tokens and achieve state-of-the-art on univariate, multivariate, and covariate-informed forecasting benchmarks. By pre-training on large-scale datasets, we present Timer-XL as an extra long version of pre-trained time-series Transformers (Timer) (Liu et al., 2024c), which outperforms recent large models in zero-shot forecasting. Our contributions lie in three aspects:", + "bbox": [ + 169, + 431, + 826, + 584 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose multivariate next token prediction and unified time series forecasting, strengthening Transformers with enlarged contexts to make information-complete predictions.", + "- We introduce TimeAttention, a novel causal self-attention tailored for multidimensional time series, facilitating intra- and inter-series modeling with positional awareness and maintaining causality and scalability of Transformers.", + "- We propose Timer-XL, a versatile Transformer for one-for-all forecasting, which mitigates performance degradation in long-context time series, achieves state-of-the-art performance in task-specific benchmarks, and presents notable zero-shot performance by pre-training." + ], + "bbox": [ + 215, + 598, + 825, + 724 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 752, + 346, + 767 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Transformers (Vaswani et al., 2017) for time series forecasting have undergone rapid advancements. Initial Transformer-based forecasters primarily focused on long-term forecasting (Li et al., 2019; Zhou et al., 2021; Wu et al., 2021; Sun & Zhang, 2024). However, the context length is not growing in pace, which hinders Transformers from making information-complete predictions. Another advancement has focused on multivariate forecasting. Unlike natural language, time series are multidimensional and inherently correlated (Hyndman, 2018). To learn intra- and inter-series dependencies, different tokenization of time-series Transformers has been proposed, including point-wise (Lim et al., 2021), patch-wise (Nie et al., 2022), and variable-wise (Liu et al., 2023) approaches, with deftly tailored architectures (Zhang & Yan, 2022; Wang et al., 2024b). However, few works highlight that multidimensional time series can be uniformly tackled by long-context Transformers without architectural", + "bbox": [ + 169, + 784, + 826, + 924 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "modification. In this work, we leverage causal Transformers, which excel at handling long-context sequences, and unify time series forecasting tasks into multivariate next token prediction.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, time-series Transformers have experienced the evolution from small task-specific models to pre-trained large models (Das et al., 2023; Woo et al., 2024; Ansari et al., 2024). Among them, decoder-only Transformer is predominantly adopted as the backbone of large language models (Touvron et al., 2023; OpenAI, 2023), positioning as a scalable choice for general time series analysis (Liu et al., 2024c). By independently predicting each token with supervision, decoder-only models are also multi-length forecasters (Liu et al., 2024b), avoiding resource-intensive training and lookback-search. However, existing decoder-only Transformers are generally pre-trained in a channel-independent approach, making them inaccessible to inter-series dependencies.", + "bbox": [ + 169, + 138, + 826, + 251 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Prior work has employed encoder-only Transformers to capture dependencies of multivariate time series (Liu et al., 2024a). However, our empirical study found that this architecture can be incompatible with causal forecasting, limiting the performance of Transformers. To implement next token prediction and multivariate forecasting in a single Transformer, we renovate the attention module, which disentangles fine-grained token dependencies into variable dependencies and temporal causal masks, capturing intra- and inter-series dependencies with causality and scalability maintained. In Table 1, we list representative time-series Transformers and highlight their differences.", + "bbox": [ + 169, + 257, + 826, + 354 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/53d14dbf58273f8a2925249ca212ea2e7d06a99b9b5f75655a5b81019e1094cb.jpg", + "table_caption": [ + "Table 1: Comparison among representative time-series Transformers." + ], + "table_footnote": [], + "table_body": "
ModelPatchTST (2022)iTTrans. (2023)TimeXer (2024b)UniTST (2024a)Moirai (2024)Timer (2024c)Timer-XL (Ours)
Intra-Series
Inter-Series
Causal Trm.
Pre-Trained
", + "bbox": [ + 173, + 387, + 823, + 477 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 APPROACH", + "text_level": 1, + "bbox": [ + 171, + 502, + 299, + 517 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we first introduce a decoder-only Transformer to illustrate the procedure of next token prediction on univariate time series. As an extension, we design TimeAttention and propose Timer-XL for unified time series forecasting. It is applicable to univariate, multivariate, and covariate-informed scenarios by generalizing the context from 1D sequences to 2D time series.", + "bbox": [ + 169, + 532, + 823, + 589 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 TIMER", + "text_level": 1, + "bbox": [ + 171, + 604, + 263, + 618 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Timer (Liu et al., 2024c) is a time-series Transformer trained by next token prediction (Bengio et al., 2000), which regards single-dimensional time series as non-overlapping patch tokens.", + "bbox": [ + 169, + 631, + 826, + 660 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Next Token Prediction Given an univariate time series $\\mathbf{X} = \\{x_{1},\\dots ,x_{TP}\\}$ of length $TP$ , a time series token is defined as $P$ consecutive time points, also termed as the patch token:", + "bbox": [ + 169, + 672, + 823, + 702 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {i} = \\left\\{x _ {(i - 1) P + 1}, \\dots , x _ {i P} \\right\\} \\in \\mathbb {R} ^ {P}, i = 1, \\dots , T. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 705, + 823, + 723 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The training objective is to independently predict the next patch token to maximize the likelihood:", + "bbox": [ + 169, + 724, + 815, + 739 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP (\\mathbf {X}) = \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {i + 1} \\mid \\mathbf {x} _ {\\leq i}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 743, + 825, + 785 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "which is realized by a decoder-only architecture with the block number $L$ and model dimension $D$ :", + "bbox": [ + 169, + 787, + 821, + 801 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {i} ^ {0} = \\mathbf {W} _ {e} \\mathbf {x} _ {i}, i = 1, \\dots , T,\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 804, + 566, + 821 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {H} ^ {l} = \\operatorname {T r m B l o c k} \\left(\\mathbf {H} ^ {l - 1}\\right), l = 1, \\dots , L, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 824, + 823, + 842 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\{\\hat {\\mathbf {x}} _ {i + 1} \\} = \\mathbf {H} ^ {L} \\mathbf {W} _ {d}, i = 1, \\dots , T.\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 845, + 576, + 863 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For simplicity, we omit the block index $l$ . Timer adopts $\\mathbf{W}_e$ , $\\mathbf{W}_d \\in \\mathbb{R}^{D \\times P}$ that independently embed and project the token embeddings as $\\mathbf{H} = \\{\\mathbf{h}_i\\} \\in \\mathbb{R}^{T \\times D}$ . TrmBlock includes feed-forward network and self-attention with the temporal causal mask $\\mathcal{T} \\in \\mathbb{R}^{T \\times T}$ . $\\mathbf{h}_i \\in \\mathbb{R}^D$ is the context representation of the previous $i$ tokens. All predicted $\\hat{\\mathbf{x}}_{i+1}$ are supervised with ground truth via MSE loss.", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 GENERALIZE 1D SEQUENCES TO 2D TIME SERIES", + "text_level": 1, + "bbox": [ + 171, + 103, + 565, + 118 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the enlarged context with the additional dimension, our proposed attention mechanism aims to (1) thoroughly capture intra- and inter-series dependencies and (2) preserve causality within the temporal dimension. Without loss of generality, we illustrate this with the case of multivariate forecasting.", + "bbox": [ + 169, + 128, + 826, + 174 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multivariate Next Token Prediction Given a multivariate time series $\\mathbf{X} \\in \\mathbb{R}^{N \\times TP}$ with the number of variables $N$ , the time series token $\\mathbf{x}_{m,i}$ is defined as the $i$ -th patch of the $m$ -th variable:", + "bbox": [ + 169, + 185, + 823, + 215 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {m, i} = \\left\\{\\mathbf {X} _ {m, (i - 1) P + 1}, \\dots , \\mathbf {X} _ {m, i P} \\right\\} \\in \\mathbb {R} ^ {P}, m = 1, \\dots , N, i = 1, \\dots , T. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 218, + 825, + 234 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The training objective is still to independently predict the next token. Unlike before, each prediction is made based on tokens of the previous time $(\\leq i)$ from all $N$ variables:", + "bbox": [ + 169, + 237, + 823, + 266 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP (\\mathbf {X}) = \\prod_ {m = 1} ^ {N} \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {m, i + 1} \\mid \\mathbf {x} _ {:, \\leq i}\\right) = \\prod_ {m = 1} ^ {N} \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {m, i + 1} \\mid \\mathbf {x} _ {1, \\leq i}, \\dots , \\mathbf {x} _ {N, \\leq i}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 268, + 825, + 309 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Compared with Equation 2, the multivariate context length increases from $T$ to $NT$ . By contrast, the benefit is that this paradigm learns causal dependencies within each sequence while incorporating exogenous variable correlations from other sequences, making it a universal forecasting paradigm that outperforms channel-independent (Nie et al., 2022) or variable-centric models (Liu et al., 2023).", + "bbox": [ + 169, + 311, + 825, + 368 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Technically, we independently apply $\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}$ on each token to obtain patch-wise representation $\\mathbf{h}_{m,i}\\in \\mathbb{R}^D$ , which will encompass contextual information from $N_i$ tokens through Transformer blocks and be eventually projected by $\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}$ into the predicted patch token $\\hat{\\mathbf{x}}_{m,i + 1}$ .", + "bbox": [ + 169, + 373, + 825, + 420 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Position Embedding Position embedding has not been sufficiently explored in time-series Transformers. To avoid inherent permutation-invariance of self-attention, positional embedding is required to reflect the chronological order of tokens on the temporal dimension. As for the variable dimension, shuffling the input order of variables should not affect anything other than the output order of variables. Formally, the processing on multiple variables should be permutation-equivalent (Zaheer et al., 2017).", + "bbox": [ + 169, + 431, + 826, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To meet the above requirements, we adopt RoPE (Su et al., 2024), a widely utilized position embedding on the temporal dimension. For the variable dimension, we use two learnable scalars in each head to keep the permutation-equivalence of variables (Woo et al., 2024). Beyond simply incorporating them together, we provide detailed ablations in Section E.3 to demonstrate the effectiveness:", + "bbox": [ + 169, + 508, + 825, + 566 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {A} _ {m n, i j} = \\mathbf {h} _ {m, i} ^ {\\top} \\mathbf {W} _ {q} \\mathbf {R} _ {\\theta , i - j} \\mathbf {W} _ {k} ^ {\\top} \\mathbf {h} _ {n, j} + u \\cdot \\mathbb {1} (m = n) + v \\cdot \\mathbb {1} (m \\neq n), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 266, + 566, + 823, + 585 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}$ and $d_k$ is the dimension of the query, key, and value. $\\mathbf{R}_{\\theta,t} \\in \\mathbb{R}^{d_k \\times d_k}$ is the rotary matrix with rotation degree $t \\cdot \\theta$ , $\\mathbb{1}(\\cdot)$ is the indicator function, and $u, v \\in \\mathbb{R}$ are learnable parameters for the token to distinguish its endogenous and exogenous time series.", + "bbox": [ + 169, + 588, + 823, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "TimeAttention In contrast to variable-wise (Liu et al., 2023) and non-causal patch-wise tokens (Nie et al., 2022; Woo et al., 2024), our TimeAttention aims to capture causal patch-wise dependencies within and among all variables. Concretely, we sort patch tokens by flattening their 2D indices into 1D indices in the temporal-first manner, which is illustrated in the upper left of Figure 2. Note that the order of variables does not matter, since Equation 6 guarantees their permutation-equivalence.", + "bbox": [ + 169, + 646, + 825, + 717 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We provide an intuitive example to illustrate the causal dependencies within multivariate time series: considering the 2nd token of time series A. To predict its next token, its representation h should be exactly dependent on the tokens-{1,2,4,5}. Similarly, we provide all causal dependencies of each token in Figure 12. Based on the visualized attention mask and variable dependencies presented in Figure 2, where all variables are inter-correlated, all token dependencies in $\\mathcal{A}$ can be formally disentangled by the Kronecker product into (1) the adjacency matrix of the variable dependency graph $\\mathcal{C} \\in \\mathbb{R}^{N \\times N}$ and (2) the causal temporal mask $\\mathcal{T} \\in \\mathbb{R}^{T \\times T}$ :", + "bbox": [ + 169, + 722, + 825, + 821 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} _ {i, j} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} j \\leq i, \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right. \\mathcal {C} _ {m, n} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f v a r i a b l e} m \\text {i s d e p e n d e n t o n} n, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 824, + 825, + 857 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Let the Kronecker product $\\otimes : (\\mathbb{R}^{N \\times N}, \\mathbb{R}^{T \\times T}) \\mapsto \\mathbb{R}^{NT \\times NT}$ take two matrices and produce a block matrix. Consequently, TimeAttention is formulated as follows:", + "bbox": [ + 169, + 859, + 823, + 888 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {T i m e A t t e n t i o n} (\\mathbf {H}) = \\operatorname {S o f t m a x} \\left(\\frac {\\operatorname {M a s k} (\\mathcal {C} \\otimes \\mathcal {T}) + \\mathcal {A}}{\\sqrt {d _ {k}}}\\right) \\mathbf {H} \\mathbf {W} _ {v}, \\operatorname {M a s k} (\\mathcal {M}) = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\mathcal {M} _ {i, j} = 1, \\\\ - \\infty & \\text {i f} \\mathcal {M} _ {i, j} = 0. \\end{array} \\right. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 891, + 825, + 929 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/5d7910511fbc43aba8665601afd2df9d133202af450ca9b630a9f2b1ffa9530a.jpg", + "image_caption": [ + "Figure 2: Illustration of TimeAttention. For univariate series, temporal mask $\\mathcal{T}$ keeps the causality. Given multivariate patch tokens sorted in a temporal-first order, we adopt the variable dependencies $\\mathcal{C}$ , an all-one matrix, as the left-operand of Kronecker product, expanding temporal mask to a block matrix, which exactly reflects dependencies of multivariate next token prediction. The formulation is also generalizable to univariate and covariate-informed contexts with pre-defined variable dependency." + ], + "image_footnote": [], + "bbox": [ + 173, + 99, + 823, + 318 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Eventually, token representations in $\\mathbf{H} = \\{\\mathbf{h}_{m,i}\\} \\in \\mathbb{R}^{NT\\times D}$ will be independently processed by feed-forward network and layer normalization, and fed into the next Transformer block.", + "bbox": [ + 169, + 412, + 823, + 443 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Unified Time Series Forecasting In multivariate forecasting, the variable dependency forms the complete graph, presenting an all-one matrix $\\mathcal{C}$ . By generalizing TimeAttention on multiple sequences, Transformers can leverage its length-flexibility to encompass relevant covariates as well. In this case, Timer-XL is adapted in two steps: (1) formulate the customized variable dependency as $\\mathcal{C}$ and (2) optimize the model using the supervision of target variables. An example (target- $A$ -covariate- $B$ ) of TimeAttention is illustrated on the right of Figure 2. In a nutshell, we adopt position embeddings for the temporal and variable dimensions. To achieve unified time series forecasting, we flatten 2D time series into a unified context and capture fine-grained causal token dependencies.", + "bbox": [ + 169, + 458, + 826, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 590, + 328, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We conduct evaluations of Timer-XL in three aspects, including (1) supervised training as a task-specific forecaster, (2) large-scale pre-training as a zero-shot forecaster, and (3) assessing the effectiveness of TimeAttention and model efficiency. Given that the long-context forecasting paradigm receives less attention in the community, which can be concealed due to the performance saturation on previous benchmarks (Makridakis et al., 2020; Wu et al., 2022), we established new long-context forecasting benchmarks. Detailed experimental configurations are provided in Appendix B.", + "bbox": [ + 169, + 625, + 826, + 709 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 UNIVARIATE TIME SERIES FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 727, + 503, + 739 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "**Setup** Due to the insufficient dataset length when extending contexts in univariate datasets (Makridakis et al., 2020), we adopt multivariate datasets from Liu et al. (2023). Although these datasets are originally multivariate, they aim to be predicted in a univariate approach with the implementation of channel independence. Different from the previous long-term forecasting setting, we focus on reliable prediction based on a long context. Therefore, we fix the prediction horizon and increase the lookback length to monthly and yearly levels. We also establish a long-context univariate benchmark based on the challenging 40-year ECMWF Reanalysis v5 dataset (Hersbach et al., 2020), where yearly contexts are adopted to predict the land-surface temperature of a single site (ERA5-S).", + "bbox": [ + 169, + 753, + 826, + 866 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Results As shown in Figure 3, the accuracy of univariate prediction can generally be improved by extending the daily context to monthly. We draw a similar conclusion on ERA5 (Table 15), where extending the context consistently helps in the specific model architecture. Notably, Timer-XL with", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/79fb7a53377762632a2d1083e3b63f76c73377f78a711bb58b721826bb32661e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 174, + 99, + 500, + 170 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a9b005998cd76436a1c88c4b9d2a8d95352de906af3c71c720b28e89a67c1a9e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 99, + 823, + 170 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d100c225ac9464f9239272523ac5c6506ac7f3812e369816cdc3d45165ebed62.jpg", + "image_caption": [ + "Figure 3: Univariate forecasting (pred-96) of well-acknowledged benchmarks under channel independence (Nie et al., 2022). We increase the lookback length to encompass monthly and yearly contexts." + ], + "image_footnote": [], + "bbox": [ + 176, + 172, + 500, + 244 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c92323559b403877fc1b283efc799de5a0b7712866bb329b6d210cb0b798ac4b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 172, + 821, + 244 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "decoder-only architecture outperforms encoder-only Transformer and linear forecaster in excessively long contexts. Further, we conduct representation analysis in Appendix E.4, revealing that Timer-XL is proficient at adaptively selecting information in vast observations and thus achieves breakthrough performance. It is also noteworthy that the performance of monthly and yearly contexts improves slowly and deteriorates, which may stem from increased noise and training difficulty inherent in data, which leaves a future direction to improve the context efficiency. Table 2 provides results on ERA5-S. Timer-XL consistently outperforms PatchTST on all sites, which can be credited to the maintenance of causality and token-wise supervision in the decoder-only architecture.", + "bbox": [ + 169, + 294, + 826, + 405 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Non-stationary Forecasting We delve into widespread non-stationarity in univariate tasks. It is commonly tackled by normalization (Kim et al., 2021) that greatly improves Transformer performance in previous benchmarks. However, we find it may be caused by the insufficient time span and training samples in these datasets. While normalization simplifies learning by aligning series with different means and variances to the same distribution, it limits the model capacity of Transformers, preventing them from learning variations among windows. The by-product can be mode collapse and oversmooth predictions. In Table 2 and Table 16, we evaluate the performance on ERA5 and datasets from Wu et al. (2022), which validates that Timer-XL can achieve better results even without instance normalization.", + "bbox": [ + 169, + 421, + 826, + 532 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/67e09f08fcc44247e4817ba8accddf80c0f8503a9eae5f83da73f3b0fc50d4d5.jpg", + "table_caption": [ + "Table 2: Univariate forecasting (input-3072-pred-96) of ERA5-S, encompassing 117k time points in each station (40-years). We evaluate PatchTST and Timer-XL with and without normalization (Kim et al., 2021). +Norm. indicates using the normalization. We train one model for each site separately." + ], + "table_footnote": [], + "table_body": "
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTST0.07910.2210.1890.3270.2770.4150.1860.3340.2660.4070.09400.2380.1370.2890.1750.319
+ Norm.0.07970.2200.1910.3230.2810.4190.1840.3340.2720.4110.09140.2330.1360.2870.1760.319
Timer-XL0.07390.2100.1790.3160.2620.4040.1820.3270.2540.3990.09010.2290.1340.2820.1680.310
+ Norm.0.07420.2100.1830.3170.2780.4180.1810.3300.2640.4070.08960.2270.1330.2810.1720.313
", + "bbox": [ + 176, + 599, + 821, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 MULTIVARIATE TIME SERIES FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 727, + 522, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "**Setup** We follow iTransformer (Liu et al., 2023) to evaluate multivariate forecasting performance. Toward a one-for-all forecaster, we evaluate performance of rolling forecast, that is, we trained one model for all prediction horizons by integrating the previous prediction into the lookback window in the next iteration. We further establish long-context multivariate forecasting benchmarks: ERA5 multi-station land-surface temperature prediction (ERA5-MS), and the global temperature and wind speed forecasting challenge (GTWSF) (Wu et al., 2023), to learn complex temporal dynamics and variable correlations with sufficient training samples.", + "bbox": [ + 169, + 753, + 826, + 852 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results As shown in Tables 3-4 and Figure 4, Timer-XL achieves the best results on both previous and new benchmarks. Essentially, Transformers that explicitly capture inter-series dependencies, such as UniTST (Liu et al., 2024a) and iTransformer, reasonably achieve decent performance in Table 3. Beyond iTransformer, Timer-XL can model fine-grained patch-wise temporal dependencies. With", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "TimeAttention, Timer-XL outperforms Timer especially on high-dimensional time series (13.2% in ECL and 6.3% in Traffic, with thousands of tokens in the context). Compared with the encoder-only UniTST, decoder-only Transformers excel at generalizing across varying prediction lengths in Table 4.", + "bbox": [ + 169, + 103, + 826, + 148 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7d45ba03ee83b18b2f20d6f15575a4d2df1639ef3ffab172a100980176335e37.jpg", + "image_caption": [ + "Figure 4: Multivariate forecasting of GTWSF (2-day-pred-1-day), involving 3850 worldwide stations spanning two years. Results of the baseline models are officially reported by Ding et al. (2024)." + ], + "image_footnote": [], + "bbox": [ + 178, + 155, + 821, + 300 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9ab83abc4d6716d5cf2526e9ca5a34b057878018d220d3b1183925fb12fdfc7b.jpg", + "table_caption": [ + "Table 3: Multivariate forecasting (96-pred-96) of well-acknowledged benchmarks. All models are trained from scratch. Results of baseline models are officially reported by Liu et al. (2023)." + ], + "table_footnote": [], + "table_body": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1380.2330.1590.2440.1390.2350.1480.2400.1970.2820.1810.2700.1680.2720.1690.2730.2010.317
ETTh10.3810.3990.3860.4010.3850.4020.3860.4050.3860.4000.4140.4190.3840.4020.5130.4910.4490.459
Traffic0.3870.2600.4130.2650.3890.2650.3950.2680.6500.3960.4620.2950.5930.3210.6120.3380.6130.388
Weather0.1650.2090.1760.2150.1650.2100.1740.2140.1960.2550.1770.2180.1720.2200.1730.2230.2660.336
Solar-Energy0.2000.2290.2040.2340.2030.2320.2030.2370.2900.3780.2340.2860.2500.2920.2150.2490.8840.711
", + "bbox": [ + 176, + 386, + 823, + 520 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9392748096ddc1333b1f929d4867af98b1d21e6efbde5736e65cc4d816ed5949.jpg", + "table_caption": [ + "Table 4: Multivariate forecasting (672-pred-{96, 192, 336, 720}) of well-acknowledged benchmarks. We evaluate one-for-all forecasters following Liu et al. (2024b): rolling forecasting for four forecast lengths with one model. Averaged results are reported here and full results are provided in Table 12." + ], + "table_footnote": [], + "table_body": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
ETTh10.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
Traffic0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
", + "bbox": [ + 176, + 578, + 821, + 712 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation Study Patching (Nie et al., 2022) has been demonstrated as an effective tokenization approach for time series, leading to the boom of Transformers in supervised deep forecasters and large time series models. To better cope with multivariate time series forecasting, we compared typical models on real-world benchmarks to address key questions: (1) whether to conduct explicit inter-series modeling or not (channel independence) and (2) whether to use decoder-only or encoder-only Transformers. The combination presents four Transformers in Table 5, which shows that Timer-XL combines the advantages of explicit inter-series modeling and the decoder-only architecture, which is suitable for multivariate time series forecasting with sufficient training samples.", + "bbox": [ + 169, + 727, + 826, + 840 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 COVARIATE-INFORMED TIME SERIES FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 856, + 578, + 871 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "**Setup** For the covariate-informed forecasting, we adopt the well-acknowledged electricity price forecasting (EPF) task (Lago et al., 2021). Each subset contains electricity price as the endogenous variable and two exogenous variables. Therefore, the variable dependency for Timer-XL is formulated", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d222f793e029a06ec91e2b4412e138e04eed2191dab3f4edefb70c5797760874.jpg", + "table_caption": [ + "Table 5: Multivariate forecasting (input-3072-pred-96) of ERA5-MS (40 years and 7 stations). We fairly evaluate Transformers that adopt patched time series. CI. indicates whether the Transformer uses channel independence (Nie et al., 2022). Arch. categorizes them into the encoder-only (E) and decoder-only (D) architectures. Different from ERA5-S in Table 2, we train one model for all sites." + ], + "table_footnote": [], + "table_body": "
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelCI.Arch.MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTSTYesE0.08150.2220.1900.3260.2750.4140.1850.3330.2650.4070.09770.2400.1390.2900.1760.319
UniTSTNoE0.07530.2130.1790.3180.2690.4100.1850.3300.2560.4010.09010.2300.1350.2840.1700.312
TimerYesD0.07340.2100.1820.3190.2680.4070.1830.3290.2550.3990.08770.2260.1320.2810.1690.310
Timer-XLNoD0.07360.2090.1740.3090.2630.4040.1820.3270.2520.3960.08720.2250.1300.2780.1660.307
", + "bbox": [ + 176, + 167, + 823, + 277 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "as $\\mathcal{C} = [[1,1,1],[0,1,0],[0,0,1]]$ . To investigate whether to learn causal or noncausal patch-wise dependencies in covariates, we implement two versions of Timer-XL: the original one with temporal causal mask $\\mathcal{T}$ , and the noncausal one with $\\mathcal{T}$ replaced by an all-one matrix.", + "bbox": [ + 169, + 287, + 823, + 333 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results As shown in Table 6, Timer-XL outperforms state-of-the-art models in covariate-informed tasks. Compared with TimeXer (Wang et al., 2024b), which treats an entire covariate as a token, Timer-XL learns fine-grained patch-wise dependencies. By the noncausal version of Timer-XL, we surprisingly find consistent conclusions with endogenous variables: results will be better if Timer-XL learns causal dependencies within exogenous variables. It again validates that next token prediction that maintains causality has a higher upper limit of performance.", + "bbox": [ + 169, + 345, + 826, + 431 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/04c943f1b98e55267a342571b7185f18ea5f6aa7e389f37d4635b3ad9e249b59.jpg", + "table_caption": [ + "Table 6: Covariate-informed forecasting (168-pred-24) of EPF. We implement two versions of TimerXL: Noncausal indicates that we do not maintain the causality within covariates by replacing temporal causal mask with all-one matrix. Results of baselines are officially reported by Wang et al. (2024b)." + ], + "table_footnote": [], + "table_body": "
ModelsTimer-XL (Ours)Timer-XL (Noncausal)TimeXer (2024b)iTransformer (2023)DLinear (2023)PatchTST (2022)Crossformer (2022)TimesNet (2022)Autoformer (2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
NP0.2340.2620.2370.2650.2380.2680.2650.3000.3090.3210.2670.2840.2450.2890.2500.2890.4020.398
PJM0.0890.1870.0920.1880.0880.1880.0970.1970.1080.2150.1060.2090.1490.1980.0970.1950.1680.267
BE0.3710.2430.4100.2790.3790.2430.3940.2700.4630.3130.4030.2640.4360.2940.4190.2880.5000.333
FR0.3810.2040.4060.2200.3840.2080.4390.2330.4290.2600.4110.2200.4400.2160.4310.2340.5190.295
DE0.4340.4150.4350.4150.4400.4180.4790.4430.5200.4630.4610.4320.5400.4230.5020.4460.6740.544
Average0.3020.2620.3160.2730.3060.2650.3350.2890.3660.3140.3300.2820.3620.2840.3400.2900.4530.368
", + "bbox": [ + 176, + 489, + 821, + 643 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 PRE-TRAINED TIME-SERIES TRANSFORMERS", + "text_level": 1, + "bbox": [ + 171, + 660, + 532, + 674 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "**Setup** Pre-training enriches time-series Transformers with generalizable forecasting capabilities. The outcome large time series model can cope with widespread challenges of few-shot and zero-shot forecasting. In this section, we conduct univariate pre-training on UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024) and evaluate zero-shot performance on benchmarks from Wu et al. (2022). We further conduct large-scale multivariate pre-training on our ERA5-Large dataset, which spans 40 years and encompasses 4920 stations. Subsequently, we evaluate three types of generalization results comparing PatchTST (encoder-only Transformer) and Timer-XL (decoder-only Transformer): pre-training on $80\\%$ stations and $80\\%$ time span and then forecast on the remaining stations (variable generalization), remaining time span (temporal generalization), and remaining split of time span and stations (variable and temporal generalization). To evaluate the benefit of pre-training with longer context, we compare the zero-shot performance of Timer (2024c) and Timer-XL, where the context length of pre-training is increased from 1440 to 2880.", + "bbox": [ + 169, + 686, + 826, + 854 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results We compare generalization performance on ERA5-Large in the middle of Figure 5 (a). Timer-XL achieves better results than PatchTST in all cases, revealing that decoder-only architecture has stronger generalization capability. Figure 5 (b) compares zero-shot performance of two pretrained Transformers with different context lengths, where Timer-XL outperforms previous Timer on", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "all benchmark datasets, validating that long-context pre-training enhances large time series models. In Table 7, we provide a comprehensive zero-shot evaluation under a comparable pre-training scale and model size, where Timer-XL achieves notable performance with better sample efficiency. The versatility and scalability make it a promising backbone of foundation models.", + "bbox": [ + 169, + 103, + 826, + 161 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f809df11e6c77a504c9d6611bd294faa583499ade1ad86527e9586aeba9856e7.jpg", + "image_caption": [ + "Figure 5: Illustration of one-for-all generalization (left). Based on the contextual flexibility, Timer-XL can predict heterogeneous time series, indicating three directions of generalization shown on the left. We compare performance when generalizing across the time and variables (middle), and zero-shot results across datasets (right), emphasizing the benefit of long-context pre-training." + ], + "image_footnote": [], + "bbox": [ + 181, + 178, + 413, + 311 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9bf8e911d60cef9234044b26c6892c245b20cac153d33c0d445242aa07ca1ca9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 415, + 178, + 594, + 314 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8d040fb43b89d37fe62e4580b2b8519c97cb6723bc57bc269df004eaa12a00a8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 178, + 815, + 314 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/e633e516bc4779be5567e3700747af241a230c792a27c7811d8fe1bd3fe08b09.jpg", + "table_caption": [ + "Table 7: Averaged results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. Corresponding prediction lengths include $\\{96,192,336,720\\}$ . Full results of all prediction lengths are provided in Table 13. $1^{\\text{st}}$ Count represents the number of wins achieved by a model under all prediction lengths and datasets. The detailed configuration of Timer-XLBase is provided in Table 11." + ], + "table_footnote": [ + "* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-).", + "* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here.", + "* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m." + ], + "table_body": "
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoEUltra(2024)MoiraiSmall(2024)MoiraiBase(2024)MoiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm10.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm20.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh10.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh20.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL0.1740.278------0.2180.3030.1870.2740.1860.270--0.7650.6860.2140.2780.2040.273
Weather0.2560.2940.2650.2970.2700.3000.2560.2880.2750.2860.2870.2810.2640.273--0.2940.3260.2920.3150.2790.306
\\( 1^{st} \\) Count15102130107000511001200002
", + "bbox": [ + 176, + 462, + 823, + 608 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.5 MODEL ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 667, + 344, + 681 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Model Efficiency To evaluate the model efficiency of Timer-XL with respect to the context length, it is essential to recognize the distinct characteristics of time series data compared to 1D sequences. Unlike natural language, the time series modality is characterized by the variable number $N$ and the input length. We adopt two representative multivariate datasets with different $N$ , and provide the memory footprint and training speed under gradually prolonged input. We evaluate typical approaches to handle multivariate series: (1) Timer-XL and Moiria that adopt channel dependence; (2) Timer that adopts channel independence. Intuitively, the complexity of the first type is $\\mathcal{O}(N^2 T^2)$ while the complexity of self-attention under channel independence is $\\mathcal{O}(NT^2)$ . However, results shown in Figure 6 reveal that measured overheads of Timer-XL is much less than $N$ times of Timer.", + "bbox": [ + 169, + 694, + 826, + 819 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Since the previous analysis of model efficiency on time-series Transformer predominantly focuses on self-attention on 1D time series, we initially present a theoretical derivation of the computational complexity of Transformers on 2D time series, including the parameter counts, memory footprint, and FLOPs in Table 8. We find that other parts of Transformers, such as feed-forward network, have a complexity of $\\mathcal{O}(NT)$ no matter which approach is adopted to handle multivariate time series. They also account for dominant overheads in existing benchmarks since the context length is not large enough, confirming our empirical results. Further, we introduce FlashAttention (Dao et al., 2022) to", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "improve the model efficiency, which is computationally equivalent and reduces the overall memory footprint of Timer-XL to $\\mathcal{O}(NT)$ without affecting performance.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/553584e7183e72b2a768d3fda64cf4138bca7bc170d557ad9865987ffb71461b.jpg", + "image_caption": [ + "Weather (21 Variables)" + ], + "image_footnote": [], + "bbox": [ + 186, + 151, + 500, + 234 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/4a2d17d81021b75adf6eab2fc8be89ebd6b9c6f957e6ade84aeff1196f10832b.jpg", + "image_caption": [ + "Weather (21 Variables)" + ], + "image_footnote": [], + "bbox": [ + 504, + 151, + 816, + 234 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/5ef83dc7f21995e29764acb949b98abb763fe9ee4fd5749b588be0ac111d733f.jpg", + "image_caption": [ + "ECL (321 Variables)", + "Figure 6: Efficiency analysis. We compare representative time-series Transformers on multivariate datasets with variable numbers ranging from ten to hundred and increase the lookback length." + ], + "image_footnote": [], + "bbox": [ + 181, + 247, + 500, + 330 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/8ede19e9b3c61c84a7349767d6ba8a18094a3bb91cf8ce891ea014c63de2403a.jpg", + "image_caption": [ + "ECL (321 Variables)" + ], + "image_footnote": [], + "bbox": [ + 506, + 247, + 816, + 330 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/80e46e2ca1351ea7849005b58e526640e11d4b5e879f7a64bde7ae578895fec0.jpg", + "image_caption": [ + "Learned Attention", + "Figure 7: Visualization of TimeAttention. It is from the first sample of a length 672 in the test split of Traffic. We visualize the last 10 variables with each contains 7 tokens. We present auto-correlation function plot. Auto-correlation can be reflected by the distribution of attention scores (bottom right). We average TimeAttention across sub-blocks, which indicates Pearson correlations (upper right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 382, + 464, + 551 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/3a06aff90573f0a821016699aae13f8670884469cca930d8e1961cdd607296d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 464, + 372, + 821, + 465 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/06321afc1ca0e3eb0b4cc245978620970bd5a8d172f6f7db5b451bbe63abb12b.jpg", + "image_caption": [ + "Sub-Block(3, 3)" + ], + "image_footnote": [], + "bbox": [ + 483, + 477, + 818, + 551 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Representation Analysis In addition to the enhanced performance, fine-grained token dependencies offer improved interpretability. We present a showcase visualization from Traffic in Figure 7. It is observed that sub-matrices along the diagonal generally receive greater attention, which reasonably reveals predominant dependencies within the endogenous variable. By zooming in a sub-block that corresponds to Variable-3, we observe that the attention distribution of the last row can indicate certain strong dependencies among patch tokens. This observation is also supported by the auto-correlation function plot (ACF), which reveals auto-correlations with certain lags and thus the model pays special attention to these tokens. Furthermore, we average each sub-matrix into one scalar. The outcome matrix can also illustrate Pearson correlations presented in the raw data.", + "bbox": [ + 169, + 623, + 826, + 750 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 CONCLUSION AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 171, + 768, + 495, + 782 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this paper, we emphasize the efficacy of causal Transformers in the forecasting of long-context time series. To facilitate long-context Transformers on diverse tasks, we propose multivariate next token prediction, a novel paradigm to predict multidimensional series with covariates. We present Timer-XL enhanced by TimeAttention as an extra-long version of pre-trained time-series Transformers. It simultaneously captures temporal dynamics and variable correlations by enhanced self-attention. In addition to achieving state-of-the-art performance on extensive benchmarks, we establish challenging benchmarks for long-context forecasting. By pre-training on large-scale heterogeneous time series, Timer-XL demonstrates notable zero-shot performance as a large time-series model. In the future, we will improve computational efficiency and build large domain-specific models with Timer-XL.", + "bbox": [ + 169, + 799, + 826, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 356, + 118 + ], + "page_idx": 10 + }, + { + "type": "ref_text", + "text": "This work was supported by the National Natural Science Foundation of China (U2342217 and 62021002), the BNRist Project, and the National Engineering Research Center for Big Data Software.", + "bbox": [ + 171, + 132, + 826, + 162 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 183, + 287, + 198 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815, 2024.", + "Yoshua Bengio, Réjean Ducharme, and Pascal Vincent. A neural probabilistic language model. Advances in neural information processing systems, 13, 2000.", + "George Box. Box and jenkins: time series analysis, forecasting and control. In A Very British Affair: Six Britons and the Development of Time Series Analysis During the 20th Century, pp. 161-215. Springer, 2013.", + "Defu Cao, Yujing Wang, Juanyong Duan, Ce Zhang, Xia Zhu, Congrui Huang, Yunhai Tong, Bixiong Xu, Jing Bai, Jie Tong, et al. Spectral temporal graph neural network for multivariate time-series forecasting. Advances in neural information processing systems, 33:17766-17778, 2020.", + "Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems, 35:16344-16359, 2022.", + "Abhimanyu Das, Weihao Kong, Rajat Sen, and Yichen Zhou. A decoder-only foundation model for time-series forecasting. arXiv preprint arXiv:2310.10688, 2023.", + "Xiaohan Ding, Yiyuan Zhang, Yixiao Ge, Sijie Zhao, Lin Song, Xiangyu Yue, and Ying Shan. Unireplknet: A universal perception large-kernel convnet for audio video point cloud time-series and image recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5513-5524, 2024.", + "Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. arXiv preprint arXiv:2402.03885, 2024.", + "Hans Hersbach, Bill Bell, Paul Berrisford, Shoji Hirahara, András Horányi, Joaquín Muñoz-Sabater, Julien Nicolas, Carole Peubey, Raluca Radu, Dinand Schepers, et al. The era5 global reanalysis. Quarterly Journal of the Royal Meteorological Society, 146(730):1999-2049, 2020.", + "RJ Hyndman. Forecasting: principles and practice. OTexts, 2018.", + "Taesung Kim, Jinhee Kim, Yunwon Tae, Cheonbok Park, Jang-Ho Choi, and Jaegul Choo. Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations, 2021.", + "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.", + "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023.", + "Jesus Lago, Grzegorz Marcjasz, Bart De Schutter, and Rafal Weron. Forecasting day-ahead electricity prices: A review of state-of-the-art algorithms, best practices and an open-access benchmark. Applied Energy, 293:116983, 2021.", + "Guokun Lai, Wei-Cheng Chang, Yiming Yang, and Hanxiao Liu. Modeling long-and short-term temporal patterns with deep neural networks. In The 41st international ACM SIGIR conference on research & development in information retrieval, pp. 95-104, 2018." + ], + "bbox": [ + 171, + 205, + 828, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shiyang Li, Xiaoyong Jin, Yao Xuan, Xiyou Zhou, Wenhu Chen, Yu-Xiang Wang, and Xifeng Yan. Enhancing the locality and breaking the memory bottleneck of transformer on time series forecasting. Advances in neural information processing systems, 32, 2019.", + "Bryan Lim, Sercan Ö Arık, Nicolas Loeff, and Tomas Pfister. Temporal fusion transformers for interpretable multi-horizon time series forecasting. International Journal of Forecasting, 37(4): 1748-1764, 2021.", + "Juncheng Liu, Chenghao Liu, Gerald Woo, Yiwei Wang, Bryan Hooi, Caiming Xiong, and Doyen Sahoo. Unitst: Effectively modeling inter-series and intra-series dependencies for multivariate time series forecasting. arXiv preprint arXiv:2406.04975, 2024a.", + "Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022a.", + "Shizhan Liu, Hang Yu, Cong Liao, Jianguo Li, Weiyao Lin, Alex X Liu, and Schahram Dustar. Pyraformer: Low-complexity pyramidal attention for long-range time series modeling and forecasting. In International conference on learning representations, 2021.", + "Yong Liu, Haixu Wu, Jianmin Wang, and Mingsheng Long. Non-stationary transformers: Exploring the stationarity in time series forecasting. Advances in Neural Information Processing Systems, 35: 9881-9893, 2022b.", + "Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. arXiv preprint arXiv:2310.06625, 2023.", + "Yong Liu, Guo Qin, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Autotimes: Autoregressive time series forecasters via large language models. arXiv preprint arXiv:2402.02370, 2024b.", + "Yong Liu, Haoran Zhang, Chenyu Li, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Timer: Generative pre-trained transformers are large time series models. In *Forty-first International Conference on Machine Learning*, 2024c.", + "Spyros Makridakis, Evangelos Spiliotis, and Vassilios Assimakopoulos. The m4 competition: 100,000 time series and 61 forecasting methods. International Journal of Forecasting, 36(1):54-74, 2020.", + "Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. arXiv preprint arXiv:2211.14730, 2022.", + "R OpenAI. Gpt-4 technical report. arxiv 2303.08774. View in Article, 2:13, 2023.", + "Boris N Oreshkin, Dmitri Carpov, Nicolas Chapados, and Yoshua Bengio. N-beats: Neural basis expansion analysis for interpretable time series forecasting. arXiv preprint arXiv:1905.10437, 2019.", + "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019.", + "PEMS. Traffic Dataset. http://pems.dot.ca.gov/.", + "Ofir Press, Noah A Smith, and Mike Lewis. Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409, 2021.", + "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551, 2020." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kashif Rasul, Arjun Ashok, Andrew Robert Williams, Arian Khorasani, George Adamopoulos, Rishika Bhagwatkar, Marin Biloš, Hera Ghonia, Nadhir Vincent Hassen, Anderson Schneider, et al. Lag-llama: Towards foundation models for time series forecasting. arXiv preprint arXiv:2310.08278, 2023.", + "David Salinas, Valentin Flunkert, Jan Gasthaus, and Tim Januschowski. Deeper: Probabilistic forecasting with autoregressive recurrent networks. International journal of forecasting, 36(3): 1181-1191, 2020.", + "Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. Time-moe: Billion-scale time series foundation models with mixture of experts. arXiv preprint arXiv:2409.16040, 2024.", + "Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024.", + "Huihui Sun and Xiaofeng Zhang. Study on coded permutation entropy of finite length gaussian white noise time series. Chinese Journal of Electronics, 33(1):185-194, 2024.", + "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.", + "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.", + "Xindi Wang, Mahsa Salmani, Parsa Omidi, Xiangyu Ren, Mehdi Rezagholizadeh, and Armaghan Eshaghi. Beyond the limits: A survey of techniques to extend the context length in large language models. arXiv preprint arXiv:2402.02244, 2024a.", + "Yuxuan Wang, Haixu Wu, Jiaxiang Dong, Yong Liu, Yunzhong Qiu, Haoran Zhang, Jianmin Wang, and Mingsheng Long. Timexer: Empowering transformers for time series forecasting with exogenous variables. arXiv preprint arXiv:2402.19072, 2024b.", + "Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Unified training of universal time series forecasting transformers. arXiv preprint arXiv:2402.02592, 2024.", + "Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. Advances in Neural Information Processing Systems, 34:22419-22430, 2021.", + "Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. arXiv preprint arXiv:2210.02186, 2022.", + "Haixu Wu, Hang Zhou, Mingsheng Long, and Jianmin Wang. Interpretable weather forecasting for worldwide stations with a unified deep model. Nature Machine Intelligence, 5(6):602-611, 2023.", + "Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023.", + "Manzil Zaheer, Satwik Kottur, Siamak Ravanbakhsh, Barnabas Poczos, Russ R Salakhutdinov, and Alexander J Smola. Deep sets. Advances in neural information processing systems, 30, 2017.", + "Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023.", + "Yunhao Zhang and Junchi Yan. Crossformer: Transformer utilizing cross-dimension dependency for multivariate time series forecasting. In The Eleventh International Conference on Learning Representations, 2022." + ], + "bbox": [ + 171, + 102, + 826, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. A survey of large language models. arXiv preprint arXiv:2303.18223, 2023.", + "bbox": [ + 171, + 103, + 826, + 147 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021.", + "bbox": [ + 171, + 152, + 826, + 198 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A PROOF OF MODEL EFFICIENCY", + "text_level": 1, + "bbox": [ + 171, + 220, + 470, + 237 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 SETUPS", + "text_level": 1, + "bbox": [ + 171, + 252, + 272, + 266 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Given an input univariate time series divided into $T$ tokens according to the patch size $P$ , which is fed into the vanilla Transformer. The training objective is to predict the next token of $P$ time points. We will generalize the derivation from 1D sequences to 2D time series based on different approaches to handle multivariate data with the variable number $N$ . We adopt the same denotations as before: Transformer consists of $L$ blocks with model dimension $D$ . The multi-head attention mechanism has $H$ heads, each with a dimension of $d_{k}$ for query, key, and value, and $d_{k} = \\frac{D}{H}$ . The intermediate dimension of feed-forward network is set as $D_{\\mathrm{ff}} = \\alpha D$ . The results are summarized in Table 8, we provide the detailed proof in the following sections.", + "bbox": [ + 169, + 277, + 826, + 391 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/aee443b20369e87818db55b6a02afcebd785e671a0ec2689101915dc48c905af.jpg", + "table_caption": [ + "Table 8: Parameters count and computational complexity of Transformers for multivariate time series." + ], + "table_footnote": [ + "* $L$ is the block number of Transformers. $D$ is the dimension of embeddings (the hidden dimension of FFN $D_{\\mathrm{ff}}$ is set as $\\alpha D$ ). $H$ is the head number and the dimension of query, key, and value $d_k = D / H$ . The overhead is to train on a multivariate time series ( $N$ -variables and $TP$ time points) with patch token length $P$ and context length $T$ . Set $N = 1$ for training on univariate time series." + ], + "table_body": "
MetricTypeCountComplexity
FLOPs(Training Speed)Channel Independence12(PDNT + L(D + H)NT2 + (2 + α)LD2NT)O(LDNT(D + T))
Channel Dependence12(PDNT + L(D + H)N2T2 + (2 + α)LD2NT)O(LDNT(D + NT))
ParametersEncoder-Only(4 + 2α)LD2 + 4LD + (1 + T)PDO(LD2)
Decoder-Only(4 + 2α)LD2 + 4LD + 2PDO(LD2)
Memory FootprintSelf-Attention4(D + P)NT + (32 + 8α)LDNT + 4LHN2T2O(LHN2T2)
FlashAttention4(D + P)NT + (32 + 8α)LDNTO(LDNT)
", + "bbox": [ + 173, + 431, + 823, + 527 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 FLOPs", + "text_level": 1, + "bbox": [ + 171, + 578, + 272, + 592 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As a preliminary, the multiplication between matrix $\\mathbf{A} \\in \\mathbb{R}^{n \\times m}$ and matrix $\\mathbf{C} \\in \\mathbb{R}^{m \\times p}$ requires $mnp$ multiplications and $mnp$ additions, resulting in $2mnp$ floating-point operations. Given batched matrices $\\mathbf{A} \\in \\mathbb{R}^{B \\times n \\times m}$ and $\\mathbf{C} \\in \\mathbb{R}^{B \\times m \\times p}$ , $B$ times matrix multiplications will be performed. It is evident that the batch size is a linear multiplier. Thus, we first omit $B$ to calculate the operations of dealing with one univariate series, and then we will reintroduce it to analyze channel independence.", + "bbox": [ + 169, + 603, + 826, + 676 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The computational cost of Transformers can be primarily categorized into two types: (1) multi-head attention calculation and (2) linear transformations. In contrast, the operations of layer normalization, residual connection, activation functions, and position embedding with the complexity of $\\mathcal{O}(TD)$ are less significant. Therefore, we derive the computational complexity mainly with respect to the above two types by delving into the forwarding process of one univariate series.", + "bbox": [ + 169, + 680, + 826, + 752 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Patch Embedding The tokenized time series $\\{\\mathbf{x}_i\\} \\in \\mathbb{R}^{T\\times P}$ is mapped into the embedding space through the patch-wise embedding $\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}$ , resulting in $2PDT$ operations.", + "bbox": [ + 169, + 765, + 823, + 796 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Self-Attention The calculation of self-attention begins with the computation of query, key and value by multiplying the patch embeddings with matrices $\\mathbf{W}_q$ , $\\mathbf{W}_k$ , $\\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}$ respectively in $H$ heads, which incurs a computational cost of $6HDd_kT = 6D^2T$ and yields $\\mathbf{Q}$ , $\\mathbf{K}$ , $\\mathbf{V} \\in \\mathbb{R}^{H \\times T \\times d_k}$ . Next, the dot product $\\mathbf{Q}\\mathbf{K}^\\top \\in \\mathbb{R}^{H \\times T \\times T}$ is conducted in each head, leading to $2Hd_kT^2 = 2DT^2$ operations. Following this, the Pre-Softmax map is divided by $\\sqrt{d_k}$ and processed through Softmax, which includes exponentiation, summation, and normalization of each element, resulting in $4HT^2$ operations. The subsequent multiplication with $\\mathbf{V}$ incurs $2Hd_kT^2 = 2DT^2$ operations. Finally, multiple heads are concatenated and multiplied by $\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}$ , contributing $2D^2T$ operations.", + "bbox": [ + 169, + 809, + 828, + 926 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Feed-Forward Network It first projects the token representations into the dimension of $D_{ff}$ and subsequently projects it back to the dimension $D$ , resulting in a total operation of $4\\alpha D^2 T$ .", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Patch Projection For encoder-only models, all token representations are flattened and mapped directly to $P$ time points by $\\mathbf{W}_d\\in \\mathbb{R}^{TD\\times P}$ . In contrast, token-wise projector $\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}$ in decoder-only models independently map each token to the predicted next token. In both cases, the number of operations is $2PDT$ , but the token-wise projector will result in a smaller parameter count.", + "bbox": [ + 169, + 151, + 826, + 208 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The forwarding operations in $L$ -layers Transformer is $4PDT + 4L(D + H)T^2 + (8 + 4\\alpha)LD^2 T$ in sum. Considering that the majority of operations in Transformers are binary operations (e.g., matrix multiplications), the gradients for both matrices are computed separately. As a result, the number of operations in backpropagation is the twice of forwarding. Therefore, the total operations of training a Transformer on a univariate series consisting of $T$ patches, each of length $P$ , is derived as:", + "bbox": [ + 169, + 214, + 826, + 286 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nf (T) = 1 2 P D T + 1 2 L (D + H) T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} T.\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 292, + 691, + 311 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We plug typical hyperparameters in the current time-series Transformers and forecasting benchmarks: $D = 512$ , $H = 8$ , $L = 4$ , $\\alpha = 4$ , $T = 7$ , and $P = 96$ , we obtain that:", + "bbox": [ + 169, + 320, + 825, + 349 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nf (T) = 2 4 9 6 0 T ^ {2} + 7 6 0 8 7 2 9 6 T \\propto 3. 2 8 * 1 0 ^ {- 4} T ^ {2} + T.\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 358, + 684, + 375 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Due to the prevalence of short contexts in the time series field, where $T \\ll D$ leads to a significant coefficient in $\\mathcal{O}(T)$ , we find the primary computational burden of time-series Transformer lies in linear transformations with $\\mathcal{O}(T)$ , rather than in multi-head self-attention with the $\\mathcal{O}(T^2)$ complexity.", + "bbox": [ + 169, + 385, + 826, + 429 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For multivariate series with $N$ variables, FLOPs is influenced by the handling of multivariate data. When adopting channel independence (Timer and PatchTST), $N$ can be regarded as the batch size $B$ :", + "bbox": [ + 169, + 434, + 826, + 463 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nN f (T) = 1 2 P D N T + 1 2 L (D + H) N T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} N T. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 470, + 825, + 489 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For models that capture fine-grained intra- and inter-series dependencies (Timer-XL and UniTST) in multivariate series, $N$ is reflected as the enlarged number of tokens:", + "bbox": [ + 169, + 498, + 823, + 527 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nf (N T) = 1 2 P D N T + 1 2 L (D + H) N ^ {2} T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} N T. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 536, + 825, + 553 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Notably, FLOPs is not entirely equivalent to actual runtime. While FlashAttention increases the overall FLOPs due to its recomputation process, it reduces the number of memory reads and writes. Given that on GPUs, computation is significantly faster than memory access, using FlashAttention can actually lead to further improvements in runtime performance.", + "bbox": [ + 169, + 561, + 825, + 619 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.3 PARAMETER COUNT", + "text_level": 1, + "bbox": [ + 171, + 638, + 361, + 652 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "From the above analysis, we observe that the parameter count of Transformers includes the following:", + "bbox": [ + 169, + 666, + 826, + 681 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Patch Embedding $\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}$ to obtain patch embeddings.", + "bbox": [ + 169, + 698, + 596, + 714 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Self-Attention $\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}$ of $H$ heads and $\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}$ for all heads.", + "bbox": [ + 169, + 729, + 741, + 747 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Feed-Forward Network $\\mathbf{W}_{\\mathrm{ffn1}}, \\mathbf{W}_{\\mathrm{ffn2}} \\in \\mathbb{R}^{D \\times D_{\\mathrm{ff}}} \\text{ in feed-forward network.}$", + "bbox": [ + 169, + 765, + 679, + 781 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Layer Normalization It contains the weight $\\mathbf{W} \\in \\mathbb{R}^D$ and the bias $\\mathbf{b} \\in \\mathbb{R}^D$ . Every Transformer block includes two normalizations after multi-head attention and feed-forward network respectively.", + "bbox": [ + 169, + 797, + 826, + 829 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Patch Projection $\\mathbf{W}_d\\in \\mathbb{R}^{TD\\times P}$ in flatten head and $\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}$ in token-wise projection.", + "bbox": [ + 169, + 844, + 794, + 861 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In sum, the total count of parameters in time-series Transformers can be expressed as:", + "bbox": [ + 169, + 867, + 736, + 882 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\text {P a r a m e t e r C o u n t} = \\left\\{ \\begin{array}{l l} (4 + 2 \\alpha) L D ^ {2} + 4 L D + (1 + T) P D, & \\text {u s i n g f l a t t e n h e a d}, \\\\ (4 + 2 \\alpha) L D ^ {2} + 4 L D + 2 P D, & \\text {u s i n g t o k e n - w i s e p r o j e c t i o n}. \\end{array} \\right. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 893, + 825, + 926 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.4 MEMORY FOOTPRINT", + "text_level": 1, + "bbox": [ + 171, + 104, + 370, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The memory footprint during training can be primarily categorized into three parts: activation values stored for backpropagation, model parameters, and optimizer parameters.", + "bbox": [ + 169, + 128, + 823, + 159 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Regardless of other precision types (e.g., FP16), model parameters and gradients are typically stored as 32-bit floating-point numbers, with each parameter occupying 4 bytes of memory. For time-series Transformers, memory footprint of activation values is given as follows:", + "bbox": [ + 169, + 165, + 825, + 207 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Patch Embedding Gradient computation for $\\mathbf{W}_e$ preserves its input $\\{\\mathbf{x}_i\\} \\in \\mathbb{R}^{T\\times P}$ of $4PT$ bytes.", + "bbox": [ + 169, + 220, + 826, + 238 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Self-Attention Gradient calculation for $\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}$ requires their inputs $\\mathbf{H} \\in \\mathbb{R}^{T \\times D}$ , amounting to a total of $4DT$ bytes. The dot product for attention map also needs to store $\\mathbf{Q}, \\mathbf{K}, \\mathbf{V} \\in \\mathbb{R}^{H \\times T \\times d_k}$ , which collectively require a total of $12DT$ bytes of memory. Gradient computation of $\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}$ necessitates the concatenated multi-head attention representations $\\mathbf{H} \\in \\mathbb{R}^{T \\times D}$ , which occupies $4DT$ bytes. If memory-efficient attention mechanisms like FlashAttention (Dao et al., 2022) is not applied, the outcome $\\mathbf{Q}\\mathbf{K}^\\top$ will be stored and occupy $4HT^2$ bytes. Instead, if FlashAttention is adopted, the storage overhead can be avoided.", + "bbox": [ + 169, + 251, + 826, + 352 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Feed-Forward Network ReLU activation function is typically employed in this module. The input $\\mathbf{H} \\in \\mathbb{R}^{T \\times D}$ must be retained, requiring a total of $4DT$ bytes. Additionally, the product $\\mathbf{W}_{\\mathrm{fin1}}\\mathbf{H}$ also needs to be stored, amounting to $4D_{\\mathrm{ff}}T$ bytes. Similarly, the output activations of ReLU, which serve as the input for subsequent linear transformations, necessitate another $4D_{\\mathrm{ff}}T$ bytes.", + "bbox": [ + 169, + 366, + 823, + 424 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Layer Normalization Each block of Transformer encompasses two layer normalizations, with each normalization retaining its input, resulting in the memory requirement of $8DT$ bytes.", + "bbox": [ + 169, + 438, + 823, + 468 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Patch Projection To perform backpropagation for $W_{d} \\in \\mathbb{R}^{D \\times P}$ , it is necessary to retain its input $\\mathbf{H} \\in \\mathbb{R}^{T \\times D}$ , resulting in a total memory requirement of $4DT$ bytes.", + "bbox": [ + 169, + 481, + 823, + 512 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The formula for the total activation values of the entire model occupying GPU memory is as follows:", + "bbox": [ + 169, + 517, + 826, + 532 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\text {M e m o r y F o o t p r i n t} = \\left\\{ \\begin{array}{l l} 4 (D + P) T + (3 2 + 8 \\alpha) L D T + 4 L H T ^ {2}, & \\mathrm {w / o F l a s h A t t e n t i o n ,} \\\\ 4 (D + P) T + (3 2 + 8 \\alpha) L D T, & \\text {w i t h F l a s h A t t e n t i o n .} \\end{array} \\right. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 540, + 825, + 574 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The derived occupancy of activation values increases proportionally with the batch size $B$ . For multivariate series, $N$ can be used as a multiplier in channel independence. For channel independence models, we can substitute $T$ with $NT$ as before. The total memory footprint is the sum of activation values and parameters of model and optimizer, which are proportional to the parameter count derived in Equation 11. Due to the limited model size in the time series field, the memory consumption of parameters is minimal and can be considered negligible in practice. Therefore, the overall memory footprint can be predominantly determined by the occupied memory of activation values.", + "bbox": [ + 169, + 582, + 825, + 680 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B EXPERIMENTAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 700, + 419, + 715 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1 DATASETS", + "text_level": 1, + "bbox": [ + 171, + 731, + 290, + 744 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We conduct experiments on well-acknowledged benchmarks to evaluate performance of the proposed Timer-XL, which includes (1) ETT (Zhou et al., 2021) contains 7 factors of electricity transformers from July 2016 to July 2018, which is recorded every hour or 15 minutes. (2) Weather (Wu et al., 2021) includes 21 meteorological factors collected every 10 minutes from the Max Planck Biogeochemistry Institute Weather Station in 2020. (3) ECL (Wu et al., 2021) records the hourly electricity consumption data of 321 clients. (4) Traffic (Wu et al., 2021) collects hourly road occupancy rates measured by 862 sensors on the San Francisco Bay area highways from January 2015 to December 2016. (5) Solar-Energy (Lai et al., 2018) records the solar power production of 137 PV plants in 2006, which are sampled every 10 minutes. (7) PEMS (Liu et al., 2022a) contains records from the public traffic network in California collected in 5-minute time windows. (8) EPF (Lago et al., 2021) includes five subsets that span six years. Each contains the electricity price as the endogenous variable to be predicted and two exogenous variables of the day-ahead electricity markets. (9) GTWSF (Wu et al.,", + "bbox": [ + 169, + 757, + 826, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "2023) is a dataset collected from the National Centers for Environmental Information (NCEI). This large-scale collection contains hourly averaged wind speed and temperature data from 3850 stations with different geographical scales and densities each, spanning from 2019 to 2021. (10) UTSD (Liu et al., 2024c) is a multi-domain time series dataset, which includes seven domains with a hierarchy of four volumes. We adopt the largest volume that encompasses 1 billion time points for pre-training.", + "bbox": [ + 169, + 103, + 823, + 175 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We further establish challenging forecasting benchmarks based on the ECMWF Reanalysis v5 (ERA5) dataset (Hersbach et al., 2020) to prevent potential overfitting and performance saturation of deep forecasters in existing benchmarks. Concretely, ERA5 is the fifth generation ECMWF atmospheric reanalysis of the global climate covering the period from January 1940 to the present, which provides hourly estimates of a large number of atmospheric, land, and oceanic climate variables, and includes information about uncertainties for all variables at reduced spatial and temporal resolutions. Due to its pattern sufficiency of temporal dynamics and variable correlations, we could establish practical benchmarks to thoroughly evaluate the performance for univariate and multivariate forecasting, as well as adopt it for large-scale pre-training to develop domain-specific large time series models.", + "bbox": [ + 169, + 180, + 826, + 306 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Our datasets are constructed as follows:", + "bbox": [ + 171, + 311, + 436, + 325 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ERA5-S: To establish a realistic univariate forecasting benchmark, we start from the basic principle of forecastability and make the prediction on sufficient lookback lengths. Instead of the short time span of training in previous benchmarks (generally no more than 2 years), we curated a three-hour frequency dataset spanning 40 years (January 1979 to December 2018) from ERA5, encompassing 116880 time points. In order to prevent overfitting on a single time series, we selected worldwide stations to form seven subsets.", + "- ERA5-MS: Each univariate series of ERA5-S provides partial observations governed by the spatio-temporal global weather system. Since discovering the global spatio-temporal correlations presents a fundamental challenge in meteorology, we convert ERA5-S into ERA5-MS by using seven subsets as a challenging multivariate forecasting benchmark. Based on the average results in Tables 2 and 5, we can validate the existence of multi-station correlations among selected stations, which have enhanced the average prediction accuracy.", + "- ERA5-Large: To explore the pure data-driven approach to build domain-specific large time series models, we further expanded the number of stations as ERA5-Large, a dataset that evenly covers meteorological 4920 worldwide stations and spans 40 years. We establish the dataset for pre-training, which is expected to generalize across the time (train on the past observations and generalize to the future) and across stations (train on partial stations and generalize to other unseen stations). The total number of time points is around half a billion." + ], + "bbox": [ + 215, + 340, + 826, + 601 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We follow the same data processing and train-validation-test split protocol used in TimesNet (Wu et al., 2022), where the train, validation, and test datasets are divided according to chronological order to prevent data leakage. Detailed dataset descriptions and prediction settings are provided in Table 9.", + "bbox": [ + 169, + 613, + 826, + 657 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.2 BASELINE MODELS", + "text_level": 1, + "bbox": [ + 171, + 672, + 354, + 686 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We aim to present Timer-XL as a foundation model for unified time series forecasting. We thoroughly include well-acknowledged and advanced models in each forecasting task. For univariate time series forecasting, we compare Timer-XL with PatchTST (Nie et al., 2022) under channel independence. For multivariate time series prediction, we report official results from Liu et al. (2023; 2024b); Ding et al. (2024), including UniRepLKNet (2024), iTransformer (2023), Corrformer (2023), DLinear (2023), TimesNet (2022), Non-stationary Transformer (2022b), Pyraformer (2021), Autoformer (2021), StemGNN (2020), DeepAR (2020), and N-BEATS (2019). We further reproduce the performance of related Transformers: Timer (2024c) and UniTST (2024a) based on their official repositories. For covariate-informed time series forecasting, we report the official results of TimeXer (2024b). For zero-shot forecasting, we follow Liu et al. (2024c) that predicts future length-96 windows in well-acknowledged datasets. Totally, more than 20 baselines are included for a complete comparison.", + "bbox": [ + 169, + 699, + 828, + 853 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.3 IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 869, + 410, + 883 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "All the experiments are implemented by PyTorch (Paszke et al., 2019) on NVIDIA A100 Tensor Core GPUs. We employ the Adam optimizer (Kingma & Ba, 2014) and MSE loss for model optimization.", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/edeb74890cf2ec87c63a080f175c702a03198c33d618859b8de9fecd1baaa226.jpg", + "table_caption": [ + "Table 9: Dataset descriptions. Dim. denotes the number of variables (For univariate forecasting, we adopt channel independence (Nie et al., 2022) or train separate models on each variable). Dataset Length denotes the number of time points in the (train, validation, test) splits." + ], + "table_footnote": [], + "table_body": "
TasksDatasetDim.Training SettingDataset LengthInformation (Frequency)
Univariate ForecastingETTh17{24, 96, 168, 672, 2880}→96(8545, 2881, 2881)Electricity (Hourly)
ECL321{24, 96, 168, 672, 2880, 8832}→96(18317, 2633, 5261)Electricity (Hourly)
Traffic862{24, 96, 168, 672, 2880, 8832}→96(12185, 1757, 3509)Transportation (Hourly)
PEMS03358{96, 288, 1152, 2016, 8064}→96(15617, 5135, 5135)Transportation (5 mins)
ERA5-S73072→96(81816, 11688, 23376)Climate (3 Hours)
Multivariate ForecastingETTh1, ETTh27{96, 672}→{96, 192, 336, 720}(8545, 2881, 2881)Electricity (Hourly)
ETTm1, ETTm27{96, 672}→{96, 192, 336, 720}(34465, 11521, 11521)Electricity (15 mins)
ECL321{96, 672}→{96, 192, 336, 720}(18317, 2633, 5261)Electricity (Hourly)
Traffic862{96, 672}→{96, 192, 336, 720}(12185, 1757, 3509)Transportation (Hourly)
Weather21{96, 672}→{96, 192, 336, 720}(36792, 5271, 10540)Climate (10 mins)
Solar-Energy137{96, 672}→{96, 192, 336, 720}(36601, 5161, 10417)Energy (10 mins)
ERA5-MS73072→96(81816, 11688, 23376)Climate (3 Hours)
GTWSF385048→24(12280, 1755, 3509)Wu et al. (2023)
Forecasting with CovariatesNP1+2168→24(36500, 5219, 10460)Electricity (Hourly)
PJM1+2168→24(36500, 5219, 10460)Electricity (Hourly)
BE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
FR1+2168→24(36500, 5219, 10460)Electricity (Hourly)
DE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
Pre-trainingERA5-Large49203072→96(81816, 11688, 23376)Climate (3 Hours)
UTSD-2880→96(868778970, 96530996, -)Liu et al. (2024c)
LOTSA-2880→96(231082956489, -, -)Woo et al. (2024)
", + "bbox": [ + 173, + 191, + 823, + 592 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/ae48b0c91207781b8299f66108f862a41b2dfc21e7c5ee0fadf0b0003ebd3f6c.jpg", + "table_caption": [ + "Table 10: Performance robustness of Timer-XL. The prediction settings and results keep the same with Table 12. The standard deviation is obtained from three random seeds." + ], + "table_footnote": [], + "table_body": "
DatasetECLETTh1Traffic
HorizonMSEMAEMSEMAEMSEMAE
960.127±0.0010.219±0.0010.364±0.0020.397±0.0010.340±0.0020.238±0.001
1920.145±0.0010.236±0.0010.405±0.0020.424±0.0010.360±0.0010.247±0.001
3360.159±0.0010.252±0.0010.427±0.0030.439±0.0020.377±0.0020.256±0.002
7200.187±0.0030.277±0.0030.439±0.0020.459±0.0040.418±0.0030.279±0.002
DatasetSolar-EnergyWeatherERA5-MS
HorizonMSEMAEMSEMAEMSEMAE
960.162±0.0030.221±0.0020.157±0.0020.205±0.0010.164±0.0010.307±0.000
1920.187±0.0030.239±0.0020.206±0.0030.250±0.002
3360.205±0.0030.255±0.0020.259±0.0030.291±0.003
7200.238±0.0030.279±0.0030.337±0.0020.344±0.002
", + "bbox": [ + 173, + 705, + 823, + 888 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We adopt channel independence from Nie et al. (2022) in univariate time series forecasting. Based on the prevalence of patch-level tokenization in the time series field, we reproduce typical Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a) based on their official repositories, and keep their model hyperparameters and training configurations the same to evaluate the inherent capability of base models. The results of other baselines are based on the benchmark provided by Liu et al. (2023; 2024b); Ding et al. (2024); Wang et al. (2024b), which is fairly built on the configurations provided by their original paper. Detailed experimental configurations are provided in Table 11. We also report the standard deviations under three runs with different random seeds in Table 10, which exhibits that the performance of Timer-XL is stable.", + "bbox": [ + 169, + 103, + 826, + 229 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For the metrics, we adopt the symmetric mean absolute percentage error (SMAPE), a metric that is independent of the numerical range, to evaluate one-for-all generalization performance on ERA5-Large. For other experiments, we adopt the root mean square error (MSE) and mean absolute error (MAE) that follows previous work. These metrics can be calculated as follows:", + "bbox": [ + 169, + 236, + 826, + 292 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {S M A P E} = \\frac {2 0 0}{T} \\sum_ {i = 1} ^ {T} \\frac {| \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} |}{| \\mathbf {X} _ {i} | + | \\widehat {\\mathbf {X}} _ {i} |}, \\mathrm {M S E} = \\sum_ {i = 1} ^ {T} | \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} | ^ {2}, \\mathrm {M A E} = \\sum_ {i = 1} ^ {T} | \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} |.\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 309, + 777, + 352 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Here $\\mathbf{X} \\in \\mathbb{R}^T$ is a univariate time series and $\\widehat{\\mathbf{X}}$ is the corresponding prediction. For multivariate time series, we further calculate the mean metric in the variable dimension.", + "bbox": [ + 169, + 369, + 823, + 402 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/2281e5e515e4ed79c036c0ecad9a5c9334e13d1e07ecf79709364cd138acdc82.jpg", + "table_caption": [ + "Table 11: Experimental configurations of Timer-XL and other baseline Transformers. All the experiments adopt the ADAM (2014) optimizer with the default hyperparameter $(\\beta_{1},\\beta_{2}) = (0.9,0.999)$ ." + ], + "table_footnote": [ + "* $L$ is the layer number of Transformers, $D$ is the dimension of token embedding (the hidden dimension of FFN is set as $4D$ ), $d_k$ is the dimension of query, key, and value, $H$ is the multi-head number, $P$ is the patch size, and LR is the initial learning rate." + ], + "table_body": "
ExperimentModelDatasetConfigurationTraining Process
LDdkHPLRLossBatch SizeEpochs
Univariate ForecastingTimer-XLECL3512648960.0005MSE204810
Traffic3512648960.001MSE204810
PatchTSTETTh11512648960.0005MSE25610
PEMS033512648960.0005MSE204810
ERA5-S1512648960.0005MSE204810
Multivariate ForecastingTimer-XLGlobal Temp.310241288240.0001MSE810
Global Wind310241288240.0001MSE810
ECL5512648960.0005MSE410
UniTSTTraffic4512648960.0005MSE410
TimerETTh1110241288960.0001MSE3210
PatchTSTWeather4512648960.0005MSE3210
Solar.6512648960.0001MSE1610
ERA5-MS3512648960.0001MSE25610
Forecasting with CovariatesTimer-XLNP3512648240.0001MSE410
TimeXerPJM2512648240.0001MSE1610
TimerBE2512648240.0001MSE1610
PatchTSTFR2512648240.0001MSE1610
DE2512648240.0001MSE1610
Pre-trainingTimer-XLERA5-Large4512648960.0001MSE4096010
PatchTST4512648960.0001MSE4096010
Timer-XLUTSD810241288960.00005MSE1638410
Timer(Liu et al., 2024c)810241288960.00005MSE1638410
Timer-XL810241288960.001MSE32768-
MoiraiSmallLOTSA6384646-
MoiraiBase(Woo et al., 2024)127686412-
MoiraiLarge2410246416-
", + "bbox": [ + 173, + 453, + 823, + 885 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C HYPERPARAMETER SENSITIVITY", + "text_level": 1, + "bbox": [ + 171, + 102, + 483, + 118 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We evaluate the hyperparameter sensitivity of Timer-XL on the ERA5-MS benchmark, as illustrated in Figure 8, concerning the following factors: the number of layers $L$ , the patch size $P$ , and the lookback length during inference. Our findings indicate that performance of Timer-XL generally improves with increases with $L$ , suggesting that Timer-XL is a scalable deep forecaster. Furthermore, our analysis of the influence of $P$ reveals that the optimal patch size is generally close to the predicted length, since it avoids multi-step error accumulations. Toward better long-term forecasting performance, it leaves a future improvement to adopt different patch sizes of input and output tokens. Finally, we investigate the impact of input length during inference. We discover that the optimal lookback length of during is not necessarily the length during training. Given that decoder-only Transformers can accommodate inference inputs shorter than those used during training, this finding is noteworthy and indicates the potential to improve the performance.", + "bbox": [ + 169, + 133, + 826, + 287 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/cebfa93fdf65c5dedcd6a2129ae37dfc15aa582d985a0e942aa4fa452f60e1d1.jpg", + "image_caption": [ + "Figure 8: Hyperparameter sensitivity of Timer-XL (input-3072-pred-96 on ERA5-MS), including the number of Transformer blocks $L$ , the patch size $P$ , and the input lookback length during inference." + ], + "image_footnote": [], + "bbox": [ + 173, + 301, + 380, + 424 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/c7726f346387b4ab3ccd5d840f7a19815834dce529f19809602030ebc9128ab9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 301, + 602, + 424 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/a79c3fa6fec9c0be936f9ec27b8924c91c2d991884f2d7d68907de2e14823559.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 607, + 301, + 821, + 425 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D SHOWCASES", + "text_level": 1, + "bbox": [ + 171, + 478, + 316, + 493 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To facilitate a clear comparison among various models, we present additional prediction visualization from diverse datasets in Figure 9 and 10. Showcases are randomly selected from Timer-XL and the following time-series Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a). Among them, Timer-XL presents the most accurate predictions.", + "bbox": [ + 169, + 510, + 823, + 566 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/d699be3b2fd3908c3ca12fdf475d384dbb566bb4d865c10627bb733d6ddd92b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 580, + 346, + 681 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/6d90511cc3496cfb8d5a97248d1778df7469251c0dcc59fa940de526a409af99.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 580, + 504, + 680 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/d114e928a62ac831b1cbf006925321e2a07f555b859858ad5414ea8bba53f2e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 580, + 663, + 680 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/82463877c629c28c01f047e385704d22f618c0d300a023ddb12b1976be4472d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 580, + 821, + 680 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/bf35f570271c578659781cd8afcca3dc1a7fbec741cbb0d7c973b65e5eb075ed.jpg", + "image_caption": [ + "Figure 9: Visualization results on univariate time series dataset. We adopt the forecasting setting of 2880-pred-96 on ECL, ETTh1 and Traffic, and 2016-pred-96 on PEMS." + ], + "image_footnote": [], + "bbox": [ + 174, + 686, + 346, + 776 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/435d076294cf93f1f7b0cab8c0ee6ea66ae7d35e0c25c4e48e63d5433090c1d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 686, + 504, + 776 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/8e3c7732312931f766822c9df0458873ad18b2a188a44071c71b027028749804.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 686, + 663, + 776 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/32d52704c334011db11f7cb6fdf4573af0300500b3a921eda8fb2459beb77dbd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 668, + 686, + 821, + 776 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "E SUPPLEMENTARY RESULTS", + "text_level": 1, + "bbox": [ + 171, + 837, + 434, + 852 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "E.1 FULL RESULT OF MULTIVARIATE FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 869, + 552, + 883 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 12 provides the complete results of the one-for-all multivariate forecasting benchmark across well-acknowledged datasets. We evaluate Timer-XL and baseline models by rolling forecasting: each", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/5e8296caa3edd268c7052da86f65f090ebbf6acf60789400e80184b721ab16b9.jpg", + "image_caption": [ + "Figure 10: Visualization results on multivariate time series dataset. We adopt the forecasting setting of 672-pred-96 on ETTh1 (7 Variables) and Traffic (862 Variables)." + ], + "image_footnote": [], + "bbox": [ + 174, + 102, + 823, + 300 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "model is trained with input length 672 and output length 96, and the predicted values are integrated as part of the input in the next iteration until reaching the desired forecast length in \\{96, 192, 336, 720\\}.", + "bbox": [ + 169, + 356, + 826, + 386 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We highlight that this benchmark evaluates the fundamental model versatility of deep forecasters, which aims to break the awkward situation of extensive training and model storage in pursuit of better practice for real-world forecasting requirements. On this benchmark, time-series Transformers significantly stand out from other baseline models, and our proposed Timer-XL can achieve state-of-the-art performance, making it a nice fundamental backbone of a one-for-all forecaster.", + "bbox": [ + 169, + 391, + 826, + 460 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "E.2 FULL RESULT OF ZERO-SHOT FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 479, + 529, + 494 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 13 provides the full results of zero-shot forecasting on the benchmark from Wu et al. (2022). We build Timer-XL based on the configuration in Table 11, which is pre-trained on the aggregated datasets of UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024). The patch size of Timer-XL is set as 96 and we conduct rolling forecast to obtain the desired forecast length in $\\{96, 192, 336, 720\\}$ .", + "bbox": [ + 169, + 506, + 826, + 563 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We evaluate most advanced large models based on their official model checkpoints, including TimeMoE (Shi et al., 2024), Moirai (Woo et al., 2024), TimesFM (Das et al., 2023), MOMENT Goswami et al. (2024), and Chronos (Ansari et al., 2024). We conduct zero-shot evaluations on datasets that are not included during the pre-training of corresponding models. For each of the evaluated model, we use their maximum input length during inference. The metric (MSE/MAE) is averaged from all predicted windows in the test split.", + "bbox": [ + 169, + 569, + 826, + 654 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "E.3 ABLATION STUDY OF TIMEATTENTION", + "text_level": 1, + "bbox": [ + 171, + 671, + 488, + 684 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We conduct evaluations on TimeAttention to validate the effectiveness of position embeddings. As for variable embedding, the distinction between endogenous and exogenous variables can improve performance. Based on our observation of the learned $u > v$ , we find that the token reasonably pays more attention to tokens of the endogenous variable. It leaves a prior to mask out minor dependencies that focuses less on exogenous variables. For the temporal dimension, other position embeddings are inferior to RoPE, since it uses the affine transformation, while others are additive, and thereby less confused with the same additive embedding for variables.", + "bbox": [ + 169, + 698, + 823, + 796 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "E.4 SUPPLEMENTARY RESULTS OF LONG-CONTEXT FORECASTING", + "text_level": 1, + "bbox": [ + 171, + 814, + 653, + 828 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Long context is a basic indicator of foundation models, which can support emergence capabilities such as prompting, in-context learning, retrieval-augmented generation, etc. However, the long-context forecasting paradigm receives less attention in the current community, which can be due to the lack of benchmarks. In the meteorological ERA5, it is necessary to support the context of more than years to contain a specific cycle (such as El Nino). In Table 15, the performance of Timer-XL and DLinear generally improves with the increased context length. By contrast, it reveals the performance", + "bbox": [ + 169, + 840, + 825, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/ea8eef19601bc8ab862161aa3f0584b058f8fbebe4c2ccfc6a294e055a883ecc.jpg", + "table_caption": [ + "Table 12: Full multivariate forecasting results: we conduct rolling forecast with a single model trained on each dataset (lookback length is 672) and accomplish four forecast lengths in $\\{96, 192, 336, 720\\}$ ." + ], + "table_footnote": [], + "table_body": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTh1960.3640.3970.3710.4040.3790.4150.3870.4180.3690.4000.3730.4030.4520.4630.4520.4780.4670.499
1920.4050.4240.4070.4290.4150.4380.4160.4370.4050.4220.4050.4250.4740.4770.4840.5100.4920.523
3360.4270.4390.4340.4450.4400.4540.4340.4500.4350.4450.4230.4400.4930.4890.5110.5220.5190.531
7200.4390.4590.4610.4660.4820.4820.4470.4730.4930.5080.4450.4710.5600.5340.5710.5430.5890.560
Avg0.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
ETTh2960.2770.3430.2850.3440.3430.3980.3040.3620.3050.3710.2890.3470.3400.3740.3480.4030.3580.397
1920.3480.3910.3650.4000.3760.4200.3720.4070.4120.4390.3600.3930.4020.4140.4080.4480.4350.451
3360.3750.4180.4120.4400.3990.4350.4180.4400.5270.5080.3890.4200.4520.4520.4240.4570.4540.475
7200.4090.4580.4680.4870.4190.4570.4630.4760.8300.6530.3980.4400.4620.4680.4480.4760.4790.492
Avg0.3520.4020.3820.4180.3840.4280.3890.4210.5180.4930.3590.4000.4140.4270.4070.4460.4310.454
ETTm1960.2900.3410.2810.3380.2890.3480.3110.3650.3070.3500.2850.3460.3380.3750.4140.4140.4660.466
1920.3370.3690.3300.3680.3320.3750.3530.3900.3370.3680.3290.3720.3710.3870.5240.4820.5040.496
3360.3740.3920.3670.3930.3650.3970.3870.4110.3660.3870.3630.3940.4100.4110.5410.4970.5740.530
7200.4370.4280.4320.4330.4210.4310.4520.4450.4190.4190.4210.4260.4780.4500.5780.5090.5960.558
Avg0.3590.3820.3520.3830.3520.3880.3760.4030.3570.3810.3490.3850.3990.4060.5140.4750.5350.512
ETTm2960.1750.2570.1750.2570.1710.2600.1830.2720.1670.2630.1720.2590.1870.2670.2370.3060.2550.339
1920.2420.3010.2390.3010.2280.2300.2500.3150.2300.3110.2330.2990.2490.3090.3300.3870.2790.335
3360.2930.3370.2930.3420.2820.3360.3110.3560.2980.3610.2800.3310.3210.3510.4040.4240.3310.374
7200.3760.3900.3920.4070.3800.3980.4170.4190.4320.4460.3570.3820.4970.4030.5250.4860.4130.450
Avg0.2710.3220.2750.3270.2650.3060.2900.3400.2820.3450.2610.3180.3140.3330.3740.4010.3200.374
ECL960.1270.2190.1290.2210.1300.2250.1330.2290.1380.2380.1320.2320.1840.2880.1850.2870.2560.357
1920.1450.2360.1480.2390.1500.2440.1580.2580.1520.2510.1510.2500.1920.2950.2820.3680.2910.376
3360.1590.2520.1640.2560.1660.2620.1680.2620.1670.2680.1710.2720.2000.3030.2890.3770.2900.379
7200.1870.2770.2010.2890.2060.2970.2050.2940.2030.3020.2220.3180.2280.3250.3050.3990.3200.403
Avg0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
Traffic960.3400.2380.3480.2400.3590.2500.3530.2590.3990.2850.3590.2550.5930.3150.6100.3220.6750.412
1920.3600.2470.3690.2500.3730.2570.3730.2670.4090.2900.3770.2650.5960.3170.6260.3460.6790.423
3360.3770.2560.3880.2600.3860.2650.3860.2750.4220.2970.3930.2760.6000.3190.6330.3520.6880.440
7200.4180.2790.4310.2850.4210.2860.4250.2960.4610.3190.4360.3050.6190.3350.6510.3660.6930.457
Avg0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather960.1570.2050.1510.2020.1520.2060.1740.2250.1690.2290.1490.2020.1690.2280.1850.2410.3550.409
1920.2060.2500.1960.2450.1980.2490.2270.2680.2110.2680.1940.2450.2220.2690.2860.3250.4210.450
3360.2590.2910.2490.2880.2510.2910.2900.3090.2580.3060.2440.2850.2900.3100.3230.3470.4520.465
7200.3370.3440.3300.3440.3220.3400.3740.3600.3200.3620.3170.3380.3760.3640.4360.4010.5130.496
Avg0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy960.1620.2210.2120.2300.1900.2400.1830.2650.1930.2580.1680.2370.1800.2720.1990.2900.2060.296
1920.1870.2390.2320.2460.2230.2640.2050.2830.2140.2740.1890.2570.1990.2860.2430.3070.2540.328
3360.2050.2550.2370.2530.2500.2830.2240.2990.2330.2910.2120.2770.2200.3010.2640.3220.2720.330
7200.2380.2790.2520.2660.2920.3110.2390.3160.2460.3070.2400.3050.2510.3210.3100.3390.3260.347
Avg0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
\\( 1^{\\text{st}} \\)Count23
", + "bbox": [ + 176, + 227, + 820, + 839 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/df82303335f7a17529bf97a572706050aec2649fac1387afa26d0099c989a53e.jpg", + "table_caption": [ + "Table 13: Full results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. $1^{\\mathrm{st}}$ Count represents the number of wins achieved by a model under all prediction lengths and datasets." + ], + "table_footnote": [ + "* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-).", + "* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here.", + "* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m." + ], + "table_body": "
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoELuTRA(2024)MoiiraiSmall(2024)MoiiraiBase(2024)MoiiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm1960.3170.3560.3380.3680.3090.3570.2810.3410.4180.3920.3630.3560.3800.3610.3610.3700.6540.5270.4540.4080.4570.403
1920.3580.3810.3530.3880.3460.3810.3050.3580.4310.4050.3880.3750.4120.3830.4140.4050.6620.5320.5670.4770.5300.450
3360.3860.4010.3810.4130.3730.4080.3690.3950.4330.4120.4160.3920.4360.4000.4450.4290.6720.5370.6620.5250.5770.481
7200.4300.4310.5040.4930.4750.4770.4690.4720.4620.4320.4600.4180.4620.4200.5120.4710.6920.5510.9000.5910.6600.526
Avg0.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm2960.1890.2770.2010.2910.1970.2860.1980.2880.2140.2880.2050.2730.2110.2740.2020.2700.2600.3350.1990.2740.1970.271
1920.2410.3150.2580.3340.2500.3220.2350.3120.2840.3320.2750.3160.2810.3180.2890.3210.2890.3500.2610.3220.2540.314
3360.2860.3480.3240.3730.3370.3750.2930.3480.3310.3620.3290.3500.3410.3550.3600.3660.3240.3690.3260.3660.3130.353
7200.3750.4020.4880.4640.4800.4610.4270.4280.4020.4080.4370.4110.4850.4280.4620.4300.3940.4090.4550.4390.4160.415
Avg0.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh1960.3690.3910.3570.3810.3500.3820.3490.3790.4010.4020.3760.3920.3810.3880.4140.4040.6880.5570.4400.3930.4410.390
1920.4050.4130.3840.4040.3880.4120.3950.4130.4350.4210.4120.4130.4340.4150.4650.4340.6880.5600.4920.4260.5020.524
3360.4180.4230.4110.4340.4110.4300.4470.4530.4380.4340.4330.4280.4850.4450.5030.4560.6750.5630.5500.4620.5760.467
7200.4230.4410.4490.4770.4270.4550.4570.4620.4390.4540.4470.4440.6110.5100.5110.4810.6830.5850.8820.5910.8350.583
Avg0.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh2960.2830.3420.3050.3590.3020.3540.2920.3520.2970.3360.2940.3300.2960.3300.3150.3490.3420.3960.3080.3430.3200.345
1920.3400.3790.3510.3860.3640.3850.3470.3790.3680.3810.3650.3750.3610.3710.3880.3950.3540.4020.3840.3920.4060.399
3360.3660.4000.3910.4180.4170.4250.4060.4190.3700.3930.3760.3900.3900.3900.4220.4270.3560.4070.4290.4300.4920.453
7200.3970.4310.4190.4540.5370.4960.4390.4470.4110.4260.4160.4330.4230.4180.4430.4540.3950.4340.5010.4770.6030.511
Avg0.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL960.1410.237
1920.1590.254
3360.1770.272
7200.2190.308
Avg0.1740.278
Weather960.1710.225
1920.2210.271
3360.2740.311
7200.3560.370
Avg0.2560.294
\\( 1^stCount \\)1510213010700051100120002
", + "bbox": [ + 176, + 178, + 821, + 580 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/535dd2678173801b4e371e0ddfab3b75b17ef7b397ed9875f0a61f6bf3a672fd.jpg", + "table_caption": [ + "Table 14: Embedding ablation in TimeAttention. For the temporal dimension, we compare prevalent relative and absolute position embeddings. As for the variable dimension, we explore the effectiveness of the variable embedding that distinguishes endogenous and exogenous variables." + ], + "table_footnote": [], + "table_body": "
DesignTemporalVariableTrafficWeatherSolar-EnergyERA5-MS
MSEMAEMSEMAEMSEMAEMSEMAE
Timer-XLRoPE (2024)with0.3400.2380.1570.2050.1620.2210.1640.307
ReplaceALiBi (2021)with0.3510.2460.1620.2120.1880.2100.1670.308
Relative (2020)with0.3610.2500.1630.2140.1970.2150.1680.309
Absolute (2017)with0.3810.2700.1590.2070.1710.2040.1650.306
w/oRoPE (2024)w/o0.3610.2540.1710.2170.1810.2210.2350.373
w/ow/o0.3630.2530.1640.2150.1940.2150.1670.309
", + "bbox": [ + 173, + 752, + 831, + 887 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/ea692af7f9c14e27355f9cc560e2556cfcb2e4f62c0bebaeeebdf1a1b68d30dd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 103, + 504, + 219 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/0528daa29f797b75cecd95ce4f5e853b4bfa4258b386d370e67434d2bcdd22bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 112, + 826, + 215 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/b48dafd63fd63593b1db526a2162e08aafa9da846564383f156ed3bc3af3dfa1.jpg", + "image_caption": [ + "Figure 11: Case studies of learned attention in encoder-/decoder-only Transformers." + ], + "image_footnote": [], + "bbox": [ + 176, + 224, + 506, + 339 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/d1f3b533ee13e593845504f471dde99e153a915b60dcf400c032ae496a7eac2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 234, + 825, + 335 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "degradation of PatchTST. Similar to the observations in Figure 3, the encoder-only architecture produces inferior predictions after thousands of time points, which can be concealed due to the short context adopted in previous benchmarks. Although PatchTST has conducted an initial exploration in the context of hundreds of time points, it inappropriately works in ever-long contexts. Therefore, we believe that context bottlenecks deserve further exploration in this community.", + "bbox": [ + 169, + 380, + 823, + 450 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/a1c342d369cdba7a3b2b11e76c67658ad5a342d12d35635d85cc25c22c782cdd.jpg", + "table_caption": [ + "Table 15: Performance on ERA5 (pred-1day). Lookback lengths vary from daily to yearly contexts." + ], + "table_footnote": [], + "table_body": "
ModelsTimer-XLPatchTSTDLinear
MetricMSEMAEMSEMAEMSEMAE
Lookback-8 (1 Day)0.08470.21000.08970.21960.09700.2276
Lookback-32 (4 Day)0.07130.19280.07780.20800.08410.2113
Lookback-56 (1 Week)0.06880.18910.07850.20820.08140.2081
Lookback-224 (1 Month)0.06750.18680.07450.20420.07880.2048
Lookback-960 (4 Month)0.06670.18630.11940.26960.07730.2031
Lookback-2944 (1 Year)0.06630.18570.11090.26380.07630.2024
", + "bbox": [ + 173, + 493, + 826, + 650 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Representation Analysis We further delve into long-context modeling from the perspective of learned representations. As shown in Figure 11, the decoder-only model can selectively focus on the previous context while PatchTST wrongly focuses on noisy parts. Since causality is the basis of forecasting, using causal masks leads to coherent token embeddings, while the unmasked attention mechanism may break the causality and prevent the model from telling each tokens.", + "bbox": [ + 169, + 670, + 823, + 741 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Normalization Section 4.1 has discussed instance normalization (Kim et al., 2021). It generally improves the performance of the previous encoder-only Transformers but leads to special problems in decoder-only Transformers (e.g., unmatched statistics in multi-step autoregression). However, it is indicative that Timer-XL without ReVIN can achieve competitive performance on well-acknowledged benchmarks in Table 16, while the performance of PatchTST may heavily rely on this normalization.", + "bbox": [ + 169, + 755, + 826, + 825 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "E.5 ILLUSTRATION OF TIMEATTENTION", + "text_level": 1, + "bbox": [ + 171, + 842, + 465, + 856 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Although the formulation to generalize from 1D sequences to multivariate time series is straightforward, Timer-XL is built on a decoder-only Transformer, an underexploited backbone among current time series models. As shown in Figure 12, challenges lie in capturing fine-grained dependencies between all variables in the patch level, while maintaining temporal causality in multiple sequences.", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/2896537c0bd20abbef90f4319d3f887646983044366ec717bc89af867eec5895.jpg", + "table_caption": [ + "Table 16: Evaluations (672-pred-96) on the effect of ReVIN (Kim et al., 2021) on Transformers." + ], + "table_footnote": [], + "table_body": "
ModelsTimer-XL with ReVINTimer-XL w/o ReVINPatchTST with ReVINPatchTST w/o ReVIN
MetricMSE | MAEMSE | MAEMSE | MAEMSE | MAE
ETTh10.364 | 0.3970.370 | 0.4010.370 | 0.3990.421 | 0.448
Weather0.157 | 0.2050.151 | 0.2050.149 | 0.1980.173 | 0.242
ECL0.127 | 0.2190.130 | 0.2250.129 | 0.2220.138 | 0.244
", + "bbox": [ + 176, + 131, + 823, + 226 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/a576faa6ff22bb6735f90db34194db5a7caf1608a31e20c518c7337fc4a9bf76.jpg", + "image_caption": [ + "(a) Univariate" + ], + "image_footnote": [], + "bbox": [ + 173, + 258, + 401, + 462 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/c661a2004600bd871e7770694566693df277b6790453406217428e8dfd8c73a7.jpg", + "image_caption": [ + "(b) Multivariate" + ], + "image_footnote": [], + "bbox": [ + 433, + 258, + 647, + 462 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/79a2f6434987c9c6c32267b735d53096628ec361f836171c9f619020a6e96774.jpg", + "image_caption": [ + "Figure 12: Illustration of TimeAttention for modeling univariate and multivariate time series." + ], + "image_footnote": [], + "bbox": [ + 665, + 258, + 825, + 463 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Technically, we introduce the masking formulation, whose key lies in the grouped causality of flattened 2D sequences. We derive it based on the Kronecker Product, which disentangles the large attention map into formalizable temporal and variable dependencies. It can be naturally extended to covariates or pre-defined variable dependencies, which may inspire a lot of future explorations.", + "bbox": [ + 169, + 503, + 826, + 561 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "F LIMITATIONS", + "text_level": 1, + "bbox": [ + 171, + 580, + 318, + 595 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Timer-XL is a unified model for time series forecasting. It can be used for task-specific training or scalable pre-training, handling varying-length and multivariate time series. As an autoregressive model, Timer-XL necessitates iterative generation for long-term forecasting, which may lead to error accumulation and inflexibility in the output length. In the future, we plan to incorporate multi-resolution patches for input and output series. Furthermore, given that Timer-XL explicitly captures fine-grained token dependencies, there remains significant potential to reduce the complexity of TimeAttention, particularly in high-dimensional and lengthy time series. Finally, we will investigate the factors contributing to the stagnation of Transformer performance in extremely long contexts, and seek insights in the time series modality to improve context efficiency.", + "bbox": [ + 169, + 612, + 826, + 739 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + } +] \ No newline at end of file diff --git a/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_model.json b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8b4300941934887fe182688c725179d2500c11ad --- /dev/null +++ b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_model.json @@ -0,0 +1,4100 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.787, + 0.147 + ], + "angle": 0, + "content": "TIMER-XL: LONG-CONTEXT TRANSFORMERS FOR UNIFIED TIME SERIES FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.17, + 0.7, + 0.228 + ], + "angle": 0, + "content": "Yong Liu\\*, Guo Qin\\*, Xiangdong Huang, Jianmin Wang, Mingsheng Long\\* \nSchool of Software, BNrist, Tsinghua University, Beijing 100084, China \n{liuyong21, qinguo24}@ mails.tsinghua.edu.cn \n{huangxdong, jimwang, mingsheng}@tsinghua.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.263, + 0.548, + 0.279 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.296, + 0.77, + 0.52 + ], + "angle": 0, + "content": "We present Timer-XL, a causal Transformer for unified time series forecasting. To uniformly predict multidimensional time series, we generalize next token prediction, predominantly adopted for 1D token sequences, to multivariate next token prediction. The paradigm formulates various forecasting tasks as a long-context prediction problem. We opt for decoder-only Transformers that capture causal dependencies from varying-length contexts for unified forecasting, making predictions on non-stationary univariate time series, multivariate series with complicated dynamics and correlations, as well as covariate-informed contexts that include exogenous variables. Technically, we propose a universal TimeAttention to capture fine-grained intra- and inter-series dependencies of flattened time series tokens (patches), which is further enhanced by deft position embedding for temporal causality and variable equivalence. Timer-XL achieves state-of-the-art performance across task-specific forecasting benchmarks through a unified approach. Based on large-scale pre-training, Timer-XL achieves state-of-the-art zero-shot performance, making it a promising architecture for pre-trained time series models. Code is available at this repository: https://github.com/thuml/Timer-XL." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.547, + 0.338, + 0.563 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.579, + 0.828, + 0.679 + ], + "angle": 0, + "content": "Transformers have been extensively applied to time series forecasting, becoming the backbone of task-specific models (Zhou et al., 2021; Wu et al., 2021) and pre-trained models (Das et al., 2023). While the majority of prior works have focused on long-term forecasting, reliable predictions are made by considering endogenous variations and exogenous correlations in the context (Box, 2013). Besides, the context length of pre-trained Transformers determines the maximum input and output length during inference. Therefore, long-context Transformers are more versatile than shorter ones, facilitating long-sequence and high-resolution generation (Yin et al., 2023; Wang et al., 2024a)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.683, + 0.827, + 0.81 + ], + "angle": 0, + "content": "However, existing Transformers in the time series field crucially encounter the context bottleneck. As shown in Figure 1, unlike Transformers for natural language and vision that learn dependencies among thousands to millions of tokens (Kirillov et al., 2023; OpenAI, 2023), time-series Transformers typically operate around limited contexts of up to hundreds of time series tokens (patches) (Nie et al., 2022). For univariate forecasting, a short-context input leads to insufficient learning of global tendencies, struggling to address non-stationarity in real-world time series (Hyndman, 2018). For multivariate forecasting, increasing research has demonstrated the effectiveness of explicitly capturing intra- and inter-channel dependencies (Zhang & Yan, 2022; Liu et al., 2023; 2024a), highlighting the practical urgency of extending the context length to encompass inter-correlated time series." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.815, + 0.829, + 0.903 + ], + "angle": 0, + "content": "Recently, causal Transformers characterized by the decoder-only architecture have become a predominant choice of large language models (Zhao et al., 2023) and garnered increasing attention in the development of large time series models (Rasul et al., 2023; Ansari et al., 2024). Based on contextual flexibility and autoregressive next token prediction, one model can accommodate varying lookback and prediction lengths (Liu et al., 2024b). Therefore, pre-training on longer contexts not only empowers them with the fundamental capability to incorporate more contextual information but" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.911, + 0.318, + 0.925 + ], + "angle": 0, + "content": "*Equal Contribution" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.105, + 0.346, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.101, + 0.828, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.293, + 0.825, + 0.337 + ], + "angle": 0, + "content": "Figure 1: We compare the context length (measured by token number) of Transformers in different modalities and propose Timer-XL that increases the length to thousands of patch tokens. Given the generality across contexts, Timer-XL is a versatile solution for various forecasting tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.355, + 0.827, + 0.426 + ], + "angle": 0, + "content": "also enhances the model versatility toward a one-for-all foundation model. Regarding any-variate and any-length time series as one context, previous work (Liu et al., 2024a) has achieved unified modeling on flattened tokens based on noncausal Transformers. However, our empirical results (Figure 3) reveal that encoder-only forecasters may encounter performance degradation in long-context forecasting, while decoder-only Transformers can mitigate this degradation well." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.828, + 0.585 + ], + "angle": 0, + "content": "In this work, we generalize the training objective of language modeling to multivariate next token prediction, achieving unified time series forecasting that covers tasks in Figure 1 (right). Based on the decoder-only architecture, we propose TimeAttention to facilitate Transformers on multidimensional time series, presenting Kronecker-based masking mechanism to train time-series Transformers in a channel-dependent approach. With specialized position embedding for multivariate series, TimeAttention is aware of the chronological order of time points and achieves permutation-equivalence (Zaheer et al., 2017) on variables. We enlarge the context to thousands of patch tokens and achieve state-of-the-art on univariate, multivariate, and covariate-informed forecasting benchmarks. By pre-training on large-scale datasets, we present Timer-XL as an extra long version of pre-trained time-series Transformers (Timer) (Liu et al., 2024c), which outperforms recent large models in zero-shot forecasting. Our contributions lie in three aspects:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.599, + 0.826, + 0.628 + ], + "angle": 0, + "content": "- We propose multivariate next token prediction and unified time series forecasting, strengthening Transformers with enlarged contexts to make information-complete predictions." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.633, + 0.825, + 0.675 + ], + "angle": 0, + "content": "- We introduce TimeAttention, a novel causal self-attention tailored for multidimensional time series, facilitating intra- and inter-series modeling with positional awareness and maintaining causality and scalability of Transformers." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.681, + 0.825, + 0.725 + ], + "angle": 0, + "content": "- We propose Timer-XL, a versatile Transformer for one-for-all forecasting, which mitigates performance degradation in long-context time series, achieves state-of-the-art performance in task-specific benchmarks, and presents notable zero-shot performance by pre-training." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.599, + 0.826, + 0.725 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.753, + 0.347, + 0.768 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.828, + 0.925 + ], + "angle": 0, + "content": "Transformers (Vaswani et al., 2017) for time series forecasting have undergone rapid advancements. Initial Transformer-based forecasters primarily focused on long-term forecasting (Li et al., 2019; Zhou et al., 2021; Wu et al., 2021; Sun & Zhang, 2024). However, the context length is not growing in pace, which hinders Transformers from making information-complete predictions. Another advancement has focused on multivariate forecasting. Unlike natural language, time series are multidimensional and inherently correlated (Hyndman, 2018). To learn intra- and inter-series dependencies, different tokenization of time-series Transformers has been proposed, including point-wise (Lim et al., 2021), patch-wise (Nie et al., 2022), and variable-wise (Liu et al., 2023) approaches, with deftly tailored architectures (Zhang & Yan, 2022; Wang et al., 2024b). However, few works highlight that multidimensional time series can be uniformly tackled by long-context Transformers without architectural" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "modification. In this work, we leverage causal Transformers, which excel at handling long-context sequences, and unify time series forecasting tasks into multivariate next token prediction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.252 + ], + "angle": 0, + "content": "Recently, time-series Transformers have experienced the evolution from small task-specific models to pre-trained large models (Das et al., 2023; Woo et al., 2024; Ansari et al., 2024). Among them, decoder-only Transformer is predominantly adopted as the backbone of large language models (Touvron et al., 2023; OpenAI, 2023), positioning as a scalable choice for general time series analysis (Liu et al., 2024c). By independently predicting each token with supervision, decoder-only models are also multi-length forecasters (Liu et al., 2024b), avoiding resource-intensive training and lookback-search. However, existing decoder-only Transformers are generally pre-trained in a channel-independent approach, making them inaccessible to inter-series dependencies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.828, + 0.356 + ], + "angle": 0, + "content": "Prior work has employed encoder-only Transformers to capture dependencies of multivariate time series (Liu et al., 2024a). However, our empirical study found that this architecture can be incompatible with causal forecasting, limiting the performance of Transformers. To implement next token prediction and multivariate forecasting in a single Transformer, we renovate the attention module, which disentangles fine-grained token dependencies into variable dependencies and temporal causal masks, capturing intra- and inter-series dependencies with causality and scalability maintained. In Table 1, we list representative time-series Transformers and highlight their differences." + }, + { + "type": "table_caption", + "bbox": [ + 0.27, + 0.372, + 0.726, + 0.386 + ], + "angle": 0, + "content": "Table 1: Comparison among representative time-series Transformers." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.388, + 0.825, + 0.478 + ], + "angle": 0, + "content": "
ModelPatchTST (2022)iTTrans. (2023)TimeXer (2024b)UniTST (2024a)Moirai (2024)Timer (2024c)Timer-XL (Ours)
Intra-Series
Inter-Series
Causal Trm.
Pre-Trained
" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.503, + 0.3, + 0.518 + ], + "angle": 0, + "content": "3 APPROACH" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.825, + 0.59 + ], + "angle": 0, + "content": "In this section, we first introduce a decoder-only Transformer to illustrate the procedure of next token prediction on univariate time series. As an extension, we design TimeAttention and propose Timer-XL for unified time series forecasting. It is applicable to univariate, multivariate, and covariate-informed scenarios by generalizing the context from 1D sequences to 2D time series." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.606, + 0.264, + 0.619 + ], + "angle": 0, + "content": "3.1 TIMER" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.632, + 0.827, + 0.661 + ], + "angle": 0, + "content": "Timer (Liu et al., 2024c) is a time-series Transformer trained by next token prediction (Bengio et al., 2000), which regards single-dimensional time series as non-overlapping patch tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.674, + 0.825, + 0.703 + ], + "angle": 0, + "content": "Next Token Prediction Given an univariate time series \\(\\mathbf{X} = \\{x_{1},\\dots ,x_{TP}\\}\\) of length \\(TP\\), a time series token is defined as \\(P\\) consecutive time points, also termed as the patch token:" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.707, + 0.825, + 0.724 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {i} = \\left\\{x _ {(i - 1) P + 1}, \\dots , x _ {i P} \\right\\} \\in \\mathbb {R} ^ {P}, i = 1, \\dots , T. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.726, + 0.816, + 0.741 + ], + "angle": 0, + "content": "The training objective is to independently predict the next patch token to maximize the likelihood:" + }, + { + "type": "equation", + "bbox": [ + 0.41, + 0.744, + 0.826, + 0.786 + ], + "angle": 0, + "content": "\\[\nP (\\mathbf {X}) = \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {i + 1} \\mid \\mathbf {x} _ {\\leq i}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.822, + 0.803 + ], + "angle": 0, + "content": "which is realized by a decoder-only architecture with the block number \\( L \\) and model dimension \\( D \\):" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.805, + 0.568, + 0.822 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {i} ^ {0} = \\mathbf {W} _ {e} \\mathbf {x} _ {i}, i = 1, \\dots , T,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.825, + 0.825, + 0.843 + ], + "angle": 0, + "content": "\\[\n\\mathbf {H} ^ {l} = \\operatorname {T r m B l o c k} \\left(\\mathbf {H} ^ {l - 1}\\right), l = 1, \\dots , L, \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.847, + 0.577, + 0.864 + ], + "angle": 0, + "content": "\\[\n\\{\\hat {\\mathbf {x}} _ {i + 1} \\} = \\mathbf {H} ^ {L} \\mathbf {W} _ {d}, i = 1, \\dots , T.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "For simplicity, we omit the block index \\(l\\). Timer adopts \\(\\mathbf{W}_e\\), \\(\\mathbf{W}_d \\in \\mathbb{R}^{D \\times P}\\) that independently embed and project the token embeddings as \\(\\mathbf{H} = \\{\\mathbf{h}_i\\} \\in \\mathbb{R}^{T \\times D}\\). TrmBlock includes feed-forward network and self-attention with the temporal causal mask \\(\\mathcal{T} \\in \\mathbb{R}^{T \\times T}\\). \\(\\mathbf{h}_i \\in \\mathbb{R}^D\\) is the context representation of the previous \\(i\\) tokens. All predicted \\(\\hat{\\mathbf{x}}_{i+1}\\) are supervised with ground truth via MSE loss." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.566, + 0.119 + ], + "angle": 0, + "content": "3.2 GENERALIZE 1D SEQUENCES TO 2D TIME SERIES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.175 + ], + "angle": 0, + "content": "For the enlarged context with the additional dimension, our proposed attention mechanism aims to (1) thoroughly capture intra- and inter-series dependencies and (2) preserve causality within the temporal dimension. Without loss of generality, we illustrate this with the case of multivariate forecasting." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.186, + 0.825, + 0.217 + ], + "angle": 0, + "content": "Multivariate Next Token Prediction Given a multivariate time series \\(\\mathbf{X} \\in \\mathbb{R}^{N \\times TP}\\) with the number of variables \\(N\\), the time series token \\(\\mathbf{x}_{m,i}\\) is defined as the \\(i\\)-th patch of the \\(m\\)-th variable:" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.219, + 0.826, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {m, i} = \\left\\{\\mathbf {X} _ {m, (i - 1) P + 1}, \\dots , \\mathbf {X} _ {m, i P} \\right\\} \\in \\mathbb {R} ^ {P}, m = 1, \\dots , N, i = 1, \\dots , T. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.238, + 0.825, + 0.267 + ], + "angle": 0, + "content": "The training objective is still to independently predict the next token. Unlike before, each prediction is made based on tokens of the previous time \\((\\leq i)\\) from all \\(N\\) variables:" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.269, + 0.826, + 0.31 + ], + "angle": 0, + "content": "\\[\nP (\\mathbf {X}) = \\prod_ {m = 1} ^ {N} \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {m, i + 1} \\mid \\mathbf {x} _ {:, \\leq i}\\right) = \\prod_ {m = 1} ^ {N} \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {m, i + 1} \\mid \\mathbf {x} _ {1, \\leq i}, \\dots , \\mathbf {x} _ {N, \\leq i}\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.826, + 0.369 + ], + "angle": 0, + "content": "Compared with Equation 2, the multivariate context length increases from \\( T \\) to \\( NT \\). By contrast, the benefit is that this paradigm learns causal dependencies within each sequence while incorporating exogenous variable correlations from other sequences, making it a universal forecasting paradigm that outperforms channel-independent (Nie et al., 2022) or variable-centric models (Liu et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.374, + 0.826, + 0.421 + ], + "angle": 0, + "content": "Technically, we independently apply \\(\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}\\) on each token to obtain patch-wise representation \\(\\mathbf{h}_{m,i}\\in \\mathbb{R}^D\\), which will encompass contextual information from \\(N_i\\) tokens through Transformer blocks and be eventually projected by \\(\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}\\) into the predicted patch token \\(\\hat{\\mathbf{x}}_{m,i + 1}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.827, + 0.504 + ], + "angle": 0, + "content": "Position Embedding Position embedding has not been sufficiently explored in time-series Transformers. To avoid inherent permutation-invariance of self-attention, positional embedding is required to reflect the chronological order of tokens on the temporal dimension. As for the variable dimension, shuffling the input order of variables should not affect anything other than the output order of variables. Formally, the processing on multiple variables should be permutation-equivalent (Zaheer et al., 2017)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.826, + 0.567 + ], + "angle": 0, + "content": "To meet the above requirements, we adopt RoPE (Su et al., 2024), a widely utilized position embedding on the temporal dimension. For the variable dimension, we use two learnable scalars in each head to keep the permutation-equivalence of variables (Woo et al., 2024). Beyond simply incorporating them together, we provide detailed ablations in Section E.3 to demonstrate the effectiveness:" + }, + { + "type": "equation", + "bbox": [ + 0.267, + 0.568, + 0.825, + 0.586 + ], + "angle": 0, + "content": "\\[\n\\mathcal {A} _ {m n, i j} = \\mathbf {h} _ {m, i} ^ {\\top} \\mathbf {W} _ {q} \\mathbf {R} _ {\\theta , i - j} \\mathbf {W} _ {k} ^ {\\top} \\mathbf {h} _ {n, j} + u \\cdot \\mathbb {1} (m = n) + v \\cdot \\mathbb {1} (m \\neq n), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.825, + 0.634 + ], + "angle": 0, + "content": "where \\(\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}\\) and \\(d_k\\) is the dimension of the query, key, and value. \\(\\mathbf{R}_{\\theta,t} \\in \\mathbb{R}^{d_k \\times d_k}\\) is the rotary matrix with rotation degree \\(t \\cdot \\theta\\), \\(\\mathbb{1}(\\cdot)\\) is the indicator function, and \\(u, v \\in \\mathbb{R}\\) are learnable parameters for the token to distinguish its endogenous and exogenous time series." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.826, + 0.718 + ], + "angle": 0, + "content": "TimeAttention In contrast to variable-wise (Liu et al., 2023) and non-causal patch-wise tokens (Nie et al., 2022; Woo et al., 2024), our TimeAttention aims to capture causal patch-wise dependencies within and among all variables. Concretely, we sort patch tokens by flattening their 2D indices into 1D indices in the temporal-first manner, which is illustrated in the upper left of Figure 2. Note that the order of variables does not matter, since Equation 6 guarantees their permutation-equivalence." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.723, + 0.826, + 0.822 + ], + "angle": 0, + "content": "We provide an intuitive example to illustrate the causal dependencies within multivariate time series: considering the 2nd token of time series A. To predict its next token, its representation h should be exactly dependent on the tokens-{1,2,4,5}. Similarly, we provide all causal dependencies of each token in Figure 12. Based on the visualized attention mask and variable dependencies presented in Figure 2, where all variables are inter-correlated, all token dependencies in \\(\\mathcal{A}\\) can be formally disentangled by the Kronecker product into (1) the adjacency matrix of the variable dependency graph \\(\\mathcal{C} \\in \\mathbb{R}^{N \\times N}\\) and (2) the causal temporal mask \\(\\mathcal{T} \\in \\mathbb{R}^{T \\times T}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.26, + 0.825, + 0.826, + 0.858 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} _ {i, j} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} j \\leq i, \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right. \\mathcal {C} _ {m, n} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f v a r i a b l e} m \\text {i s d e p e n d e n t o n} n, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.861, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Let the Kronecker product \\(\\otimes : (\\mathbb{R}^{N \\times N}, \\mathbb{R}^{T \\times T}) \\mapsto \\mathbb{R}^{NT \\times NT}\\) take two matrices and produce a block matrix. Consequently, TimeAttention is formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.892, + 0.826, + 0.93 + ], + "angle": 0, + "content": "\\[\n\\operatorname {T i m e A t t e n t i o n} (\\mathbf {H}) = \\operatorname {S o f t m a x} \\left(\\frac {\\operatorname {M a s k} (\\mathcal {C} \\otimes \\mathcal {T}) + \\mathcal {A}}{\\sqrt {d _ {k}}}\\right) \\mathbf {H} \\mathbf {W} _ {v}, \\operatorname {M a s k} (\\mathcal {M}) = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\mathcal {M} _ {i, j} = 1, \\\\ - \\infty & \\text {i f} \\mathcal {M} _ {i, j} = 0. \\end{array} \\right. \\tag {8}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.825, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.324, + 0.829, + 0.397 + ], + "angle": 0, + "content": "Figure 2: Illustration of TimeAttention. For univariate series, temporal mask \\(\\mathcal{T}\\) keeps the causality. Given multivariate patch tokens sorted in a temporal-first order, we adopt the variable dependencies \\(\\mathcal{C}\\), an all-one matrix, as the left-operand of Kronecker product, expanding temporal mask to a block matrix, which exactly reflects dependencies of multivariate next token prediction. The formulation is also generalizable to univariate and covariate-informed contexts with pre-defined variable dependency." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.413, + 0.825, + 0.444 + ], + "angle": 0, + "content": "Eventually, token representations in \\(\\mathbf{H} = \\{\\mathbf{h}_{m,i}\\} \\in \\mathbb{R}^{NT\\times D}\\) will be independently processed by feed-forward network and layer normalization, and fed into the next Transformer block." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.827, + 0.572 + ], + "angle": 0, + "content": "Unified Time Series Forecasting In multivariate forecasting, the variable dependency forms the complete graph, presenting an all-one matrix \\(\\mathcal{C}\\). By generalizing TimeAttention on multiple sequences, Transformers can leverage its length-flexibility to encompass relevant covariates as well. In this case, Timer-XL is adapted in two steps: (1) formulate the customized variable dependency as \\(\\mathcal{C}\\) and (2) optimize the model using the supervision of target variables. An example (target-\\(A\\)-covariate-\\(B\\)) of TimeAttention is illustrated on the right of Figure 2. In a nutshell, we adopt position embeddings for the temporal and variable dimensions. To achieve unified time series forecasting, we flatten 2D time series into a unified context and capture fine-grained causal token dependencies." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.592, + 0.329, + 0.607 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.625, + 0.828, + 0.71 + ], + "angle": 0, + "content": "We conduct evaluations of Timer-XL in three aspects, including (1) supervised training as a task-specific forecaster, (2) large-scale pre-training as a zero-shot forecaster, and (3) assessing the effectiveness of TimeAttention and model efficiency. Given that the long-context forecasting paradigm receives less attention in the community, which can be concealed due to the performance saturation on previous benchmarks (Makridakis et al., 2020; Wu et al., 2022), we established new long-context forecasting benchmarks. Detailed experimental configurations are provided in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.504, + 0.741 + ], + "angle": 0, + "content": "4.1 UNIVARIATE TIME SERIES FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.754, + 0.827, + 0.867 + ], + "angle": 0, + "content": "**Setup** Due to the insufficient dataset length when extending contexts in univariate datasets (Makridakis et al., 2020), we adopt multivariate datasets from Liu et al. (2023). Although these datasets are originally multivariate, they aim to be predicted in a univariate approach with the implementation of channel independence. Different from the previous long-term forecasting setting, we focus on reliable prediction based on a long context. Therefore, we fix the prediction horizon and increase the lookback length to monthly and yearly levels. We also establish a long-context univariate benchmark based on the challenging 40-year ECMWF Reanalysis v5 dataset (Hersbach et al., 2020), where yearly contexts are adopted to predict the land-surface temperature of a single site (ERA5-S)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Results As shown in Figure 3, the accuracy of univariate prediction can generally be improved by extending the daily context to monthly. We draw a similar conclusion on ERA5 (Table 15), where extending the context consistently helps in the specific model architecture. Notably, Timer-XL with" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.1, + 0.5, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.1, + 0.825, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.174, + 0.5, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.174, + 0.822, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.252, + 0.828, + 0.281 + ], + "angle": 0, + "content": "Figure 3: Univariate forecasting (pred-96) of well-acknowledged benchmarks under channel independence (Nie et al., 2022). We increase the lookback length to encompass monthly and yearly contexts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.295, + 0.828, + 0.406 + ], + "angle": 0, + "content": "decoder-only architecture outperforms encoder-only Transformer and linear forecaster in excessively long contexts. Further, we conduct representation analysis in Appendix E.4, revealing that Timer-XL is proficient at adaptively selecting information in vast observations and thus achieves breakthrough performance. It is also noteworthy that the performance of monthly and yearly contexts improves slowly and deteriorates, which may stem from increased noise and training difficulty inherent in data, which leaves a future direction to improve the context efficiency. Table 2 provides results on ERA5-S. Timer-XL consistently outperforms PatchTST on all sites, which can be credited to the maintenance of causality and token-wise supervision in the decoder-only architecture." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.828, + 0.534 + ], + "angle": 0, + "content": "Non-stationary Forecasting We delve into widespread non-stationarity in univariate tasks. It is commonly tackled by normalization (Kim et al., 2021) that greatly improves Transformer performance in previous benchmarks. However, we find it may be caused by the insufficient time span and training samples in these datasets. While normalization simplifies learning by aligning series with different means and variances to the same distribution, it limits the model capacity of Transformers, preventing them from learning variations among windows. The by-product can be mode collapse and oversmooth predictions. In Table 2 and Table 16, we evaluate the performance on ERA5 and datasets from Wu et al. (2022), which validates that Timer-XL can achieve better results even without instance normalization." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.552, + 0.828, + 0.595 + ], + "angle": 0, + "content": "Table 2: Univariate forecasting (input-3072-pred-96) of ERA5-S, encompassing 117k time points in each station (40-years). We evaluate PatchTST and Timer-XL with and without normalization (Kim et al., 2021). +Norm. indicates using the normalization. We train one model for each site separately." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.6, + 0.822, + 0.702 + ], + "angle": 0, + "content": "
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTST0.07910.2210.1890.3270.2770.4150.1860.3340.2660.4070.09400.2380.1370.2890.1750.319
+ Norm.0.07970.2200.1910.3230.2810.4190.1840.3340.2720.4110.09140.2330.1360.2870.1760.319
Timer-XL0.07390.2100.1790.3160.2620.4040.1820.3270.2540.3990.09010.2290.1340.2820.1680.310
+ Norm.0.07420.2100.1830.3170.2780.4180.1810.3300.2640.4070.08960.2270.1330.2810.1720.313
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.524, + 0.741 + ], + "angle": 0, + "content": "4.2 MULTIVARIATE TIME SERIES FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.754, + 0.828, + 0.853 + ], + "angle": 0, + "content": "**Setup** We follow iTransformer (Liu et al., 2023) to evaluate multivariate forecasting performance. Toward a one-for-all forecaster, we evaluate performance of rolling forecast, that is, we trained one model for all prediction horizons by integrating the previous prediction into the lookback window in the next iteration. We further establish long-context multivariate forecasting benchmarks: ERA5 multi-station land-surface temperature prediction (ERA5-MS), and the global temperature and wind speed forecasting challenge (GTWSF) (Wu et al., 2023), to learn complex temporal dynamics and variable correlations with sufficient training samples." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Results As shown in Tables 3-4 and Figure 4, Timer-XL achieves the best results on both previous and new benchmarks. Essentially, Transformers that explicitly capture inter-series dependencies, such as UniTST (Liu et al., 2024a) and iTransformer, reasonably achieve decent performance in Table 3. Beyond iTransformer, Timer-XL can model fine-grained patch-wise temporal dependencies. With" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.149 + ], + "angle": 0, + "content": "TimeAttention, Timer-XL outperforms Timer especially on high-dimensional time series (13.2% in ECL and 6.3% in Traffic, with thousands of tokens in the context). Compared with the encoder-only UniTST, decoder-only Transformers excel at generalizing across varying prediction lengths in Table 4." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.156, + 0.822, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.339 + ], + "angle": 0, + "content": "Figure 4: Multivariate forecasting of GTWSF (2-day-pred-1-day), involving 3850 worldwide stations spanning two years. Results of the baseline models are officially reported by Ding et al. (2024)." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.354, + 0.825, + 0.384 + ], + "angle": 0, + "content": "Table 3: Multivariate forecasting (96-pred-96) of well-acknowledged benchmarks. All models are trained from scratch. Results of baseline models are officially reported by Liu et al. (2023)." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.387, + 0.825, + 0.521 + ], + "angle": 0, + "content": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1380.2330.1590.2440.1390.2350.1480.2400.1970.2820.1810.2700.1680.2720.1690.2730.2010.317
ETTh10.3810.3990.3860.4010.3850.4020.3860.4050.3860.4000.4140.4190.3840.4020.5130.4910.4490.459
Traffic0.3870.2600.4130.2650.3890.2650.3950.2680.6500.3960.4620.2950.5930.3210.6120.3380.6130.388
Weather0.1650.2090.1760.2150.1650.2100.1740.2140.1960.2550.1770.2180.1720.2200.1730.2230.2660.336
Solar-Energy0.2000.2290.2040.2340.2030.2320.2030.2370.2900.3780.2340.2860.2500.2920.2150.2490.8840.711
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.532, + 0.827, + 0.576 + ], + "angle": 0, + "content": "Table 4: Multivariate forecasting (672-pred-{96, 192, 336, 720}) of well-acknowledged benchmarks. We evaluate one-for-all forecasters following Liu et al. (2024b): rolling forecasting for four forecast lengths with one model. Averaged results are reported here and full results are provided in Table 12." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.579, + 0.822, + 0.713 + ], + "angle": 0, + "content": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
ETTh10.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
Traffic0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.728, + 0.827, + 0.841 + ], + "angle": 0, + "content": "Ablation Study Patching (Nie et al., 2022) has been demonstrated as an effective tokenization approach for time series, leading to the boom of Transformers in supervised deep forecasters and large time series models. To better cope with multivariate time series forecasting, we compared typical models on real-world benchmarks to address key questions: (1) whether to conduct explicit inter-series modeling or not (channel independence) and (2) whether to use decoder-only or encoder-only Transformers. The combination presents four Transformers in Table 5, which shows that Timer-XL combines the advantages of explicit inter-series modeling and the decoder-only architecture, which is suitable for multivariate time series forecasting with sufficient training samples." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.857, + 0.58, + 0.872 + ], + "angle": 0, + "content": "4.3 COVARIATE-INFORMED TIME SERIES FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "**Setup** For the covariate-informed forecasting, we adopt the well-acknowledged electricity price forecasting (EPF) task (Lago et al., 2021). Each subset contains electricity price as the endogenous variable and two exogenous variables. Therefore, the variable dependency for Timer-XL is formulated" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.106, + 0.825, + 0.163 + ], + "angle": 0, + "content": "Table 5: Multivariate forecasting (input-3072-pred-96) of ERA5-MS (40 years and 7 stations). We fairly evaluate Transformers that adopt patched time series. CI. indicates whether the Transformer uses channel independence (Nie et al., 2022). Arch. categorizes them into the encoder-only (E) and decoder-only (D) architectures. Different from ERA5-S in Table 2, we train one model for all sites." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.168, + 0.825, + 0.278 + ], + "angle": 0, + "content": "
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelCI.Arch.MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTSTYesE0.08150.2220.1900.3260.2750.4140.1850.3330.2650.4070.09770.2400.1390.2900.1760.319
UniTSTNoE0.07530.2130.1790.3180.2690.4100.1850.3300.2560.4010.09010.2300.1350.2840.1700.312
TimerYesD0.07340.2100.1820.3190.2680.4070.1830.3290.2550.3990.08770.2260.1320.2810.1690.310
Timer-XLNoD0.07360.2090.1740.3090.2630.4040.1820.3270.2520.3960.08720.2250.1300.2780.1660.307
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.825, + 0.334 + ], + "angle": 0, + "content": "as \\(\\mathcal{C} = [[1,1,1],[0,1,0],[0,0,1]]\\). To investigate whether to learn causal or noncausal patch-wise dependencies in covariates, we implement two versions of Timer-XL: the original one with temporal causal mask \\(\\mathcal{T}\\), and the noncausal one with \\(\\mathcal{T}\\) replaced by an all-one matrix." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.827, + 0.432 + ], + "angle": 0, + "content": "Results As shown in Table 6, Timer-XL outperforms state-of-the-art models in covariate-informed tasks. Compared with TimeXer (Wang et al., 2024b), which treats an entire covariate as a token, Timer-XL learns fine-grained patch-wise dependencies. By the noncausal version of Timer-XL, we surprisingly find consistent conclusions with endogenous variables: results will be better if Timer-XL learns causal dependencies within exogenous variables. It again validates that next token prediction that maintains causality has a higher upper limit of performance." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.443, + 0.827, + 0.487 + ], + "angle": 0, + "content": "Table 6: Covariate-informed forecasting (168-pred-24) of EPF. We implement two versions of TimerXL: Noncausal indicates that we do not maintain the causality within covariates by replacing temporal causal mask with all-one matrix. Results of baselines are officially reported by Wang et al. (2024b)." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.491, + 0.822, + 0.645 + ], + "angle": 0, + "content": "
ModelsTimer-XL (Ours)Timer-XL (Noncausal)TimeXer (2024b)iTransformer (2023)DLinear (2023)PatchTST (2022)Crossformer (2022)TimesNet (2022)Autoformer (2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
NP0.2340.2620.2370.2650.2380.2680.2650.3000.3090.3210.2670.2840.2450.2890.2500.2890.4020.398
PJM0.0890.1870.0920.1880.0880.1880.0970.1970.1080.2150.1060.2090.1490.1980.0970.1950.1680.267
BE0.3710.2430.4100.2790.3790.2430.3940.2700.4630.3130.4030.2640.4360.2940.4190.2880.5000.333
FR0.3810.2040.4060.2200.3840.2080.4390.2330.4290.2600.4110.2200.4400.2160.4310.2340.5190.295
DE0.4340.4150.4350.4150.4400.4180.4790.4430.5200.4630.4610.4320.5400.4230.5020.4460.6740.544
Average0.3020.2620.3160.2730.3060.2650.3350.2890.3660.3140.3300.2820.3620.2840.3400.2900.4530.368
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.661, + 0.533, + 0.675 + ], + "angle": 0, + "content": "4.4 PRE-TRAINED TIME-SERIES TRANSFORMERS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.687, + 0.827, + 0.855 + ], + "angle": 0, + "content": "**Setup** Pre-training enriches time-series Transformers with generalizable forecasting capabilities. The outcome large time series model can cope with widespread challenges of few-shot and zero-shot forecasting. In this section, we conduct univariate pre-training on UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024) and evaluate zero-shot performance on benchmarks from Wu et al. (2022). We further conduct large-scale multivariate pre-training on our ERA5-Large dataset, which spans 40 years and encompasses 4920 stations. Subsequently, we evaluate three types of generalization results comparing PatchTST (encoder-only Transformer) and Timer-XL (decoder-only Transformer): pre-training on \\(80\\%\\) stations and \\(80\\%\\) time span and then forecast on the remaining stations (variable generalization), remaining time span (temporal generalization), and remaining split of time span and stations (variable and temporal generalization). To evaluate the benefit of pre-training with longer context, we compare the zero-shot performance of Timer (2024c) and Timer-XL, where the context length of pre-training is increased from 1440 to 2880." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Results We compare generalization performance on ERA5-Large in the middle of Figure 5 (a). Timer-XL achieves better results than PatchTST in all cases, revealing that decoder-only architecture has stronger generalization capability. Figure 5 (b) compares zero-shot performance of two pretrained Transformers with different context lengths, where Timer-XL outperforms previous Timer on" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.162 + ], + "angle": 0, + "content": "all benchmark datasets, validating that long-context pre-training enhances large time series models. In Table 7, we provide a comprehensive zero-shot evaluation under a comparable pre-training scale and model size, where Timer-XL achieves notable performance with better sample efficiency. The versatility and scalability make it a promising backbone of foundation models." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.179, + 0.415, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.416, + 0.179, + 0.596, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.598, + 0.179, + 0.816, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.323, + 0.828, + 0.381 + ], + "angle": 0, + "content": "Figure 5: Illustration of one-for-all generalization (left). Based on the contextual flexibility, Timer-XL can predict heterogeneous time series, indicating three directions of generalization shown on the left. We compare performance when generalizing across the time and variables (middle), and zero-shot results across datasets (right), emphasizing the benefit of long-context pre-training." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.397, + 0.828, + 0.456 + ], + "angle": 0, + "content": "Table 7: Averaged results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. Corresponding prediction lengths include \\(\\{96,192,336,720\\}\\). Full results of all prediction lengths are provided in Table 13. \\(1^{\\text{st}}\\) Count represents the number of wins achieved by a model under all prediction lengths and datasets. The detailed configuration of Timer-XLBase is provided in Table 11." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.463, + 0.825, + 0.609 + ], + "angle": 0, + "content": "
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoEUltra(2024)MoiraiSmall(2024)MoiraiBase(2024)MoiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm10.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm20.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh10.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh20.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL0.1740.278------0.2180.3030.1870.2740.1860.270--0.7650.6860.2140.2780.2040.273
Weather0.2560.2940.2650.2970.2700.3000.2560.2880.2750.2860.2870.2810.2640.273--0.2940.3260.2920.3150.2790.306
\\( 1^{st} \\) Count15102130107000511001200002
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.197, + 0.61, + 0.721, + 0.622 + ], + "angle": 0, + "content": "* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-)." + }, + { + "type": "table_footnote", + "bbox": [ + 0.199, + 0.622, + 0.76, + 0.633 + ], + "angle": 0, + "content": "* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here." + }, + { + "type": "table_footnote", + "bbox": [ + 0.199, + 0.633, + 0.641, + 0.644 + ], + "angle": 0, + "content": "* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m." + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.61, + 0.76, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.668, + 0.345, + 0.682 + ], + "angle": 0, + "content": "4.5 MODEL ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.695, + 0.827, + 0.82 + ], + "angle": 0, + "content": "Model Efficiency To evaluate the model efficiency of Timer-XL with respect to the context length, it is essential to recognize the distinct characteristics of time series data compared to 1D sequences. Unlike natural language, the time series modality is characterized by the variable number \\(N\\) and the input length. We adopt two representative multivariate datasets with different \\(N\\), and provide the memory footprint and training speed under gradually prolonged input. We evaluate typical approaches to handle multivariate series: (1) Timer-XL and Moiria that adopt channel dependence; (2) Timer that adopts channel independence. Intuitively, the complexity of the first type is \\(\\mathcal{O}(N^2 T^2)\\) while the complexity of self-attention under channel independence is \\(\\mathcal{O}(NT^2)\\). However, results shown in Figure 6 reveal that measured overheads of Timer-XL is much less than \\(N\\) times of Timer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Since the previous analysis of model efficiency on time-series Transformer predominantly focuses on self-attention on 1D time series, we initially present a theoretical derivation of the computational complexity of Transformers on 2D time series, including the parameter counts, memory footprint, and FLOPs in Table 8. We find that other parts of Transformers, such as feed-forward network, have a complexity of \\(\\mathcal{O}(NT)\\) no matter which approach is adopted to handle multivariate time series. They also account for dominant overheads in existing benchmarks since the context length is not large enough, confirming our empirical results. Further, we introduce FlashAttention (Dao et al., 2022) to" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "improve the model efficiency, which is computationally equivalent and reduces the overall memory footprint of Timer-XL to \\(\\mathcal{O}(NT)\\) without affecting performance." + }, + { + "type": "image_caption", + "bbox": [ + 0.295, + 0.142, + 0.403, + 0.152 + ], + "angle": 0, + "content": "Weather (21 Variables)" + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.152, + 0.5, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.142, + 0.719, + 0.152 + ], + "angle": 0, + "content": "Weather (21 Variables)" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.152, + 0.817, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.238, + 0.403, + 0.248 + ], + "angle": 0, + "content": "ECL (321 Variables)" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.248, + 0.5, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.622, + 0.238, + 0.719, + 0.248 + ], + "angle": 0, + "content": "ECL (321 Variables)" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.248, + 0.817, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Figure 6: Efficiency analysis. We compare representative time-series Transformers on multivariate datasets with variable numbers ranging from ten to hundred and increase the lookback length." + }, + { + "type": "image_caption", + "bbox": [ + 0.272, + 0.374, + 0.359, + 0.383 + ], + "angle": 0, + "content": "Learned Attention" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.383, + 0.465, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.465, + 0.373, + 0.822, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.493, + 0.469, + 0.567, + 0.479 + ], + "angle": 0, + "content": "Sub-Block(3, 3)" + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.478, + 0.82, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.557, + 0.827, + 0.614 + ], + "angle": 0, + "content": "Figure 7: Visualization of TimeAttention. It is from the first sample of a length 672 in the test split of Traffic. We visualize the last 10 variables with each contains 7 tokens. We present auto-correlation function plot. Auto-correlation can be reflected by the distribution of attention scores (bottom right). We average TimeAttention across sub-blocks, which indicates Pearson correlations (upper right)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.624, + 0.827, + 0.75 + ], + "angle": 0, + "content": "Representation Analysis In addition to the enhanced performance, fine-grained token dependencies offer improved interpretability. We present a showcase visualization from Traffic in Figure 7. It is observed that sub-matrices along the diagonal generally receive greater attention, which reasonably reveals predominant dependencies within the endogenous variable. By zooming in a sub-block that corresponds to Variable-3, we observe that the attention distribution of the last row can indicate certain strong dependencies among patch tokens. This observation is also supported by the auto-correlation function plot (ACF), which reveals auto-correlations with certain lags and thus the model pays special attention to these tokens. Furthermore, we average each sub-matrix into one scalar. The outcome matrix can also illustrate Pearson correlations presented in the raw data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.769, + 0.496, + 0.784 + ], + "angle": 0, + "content": "5 CONCLUSION AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.8, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In this paper, we emphasize the efficacy of causal Transformers in the forecasting of long-context time series. To facilitate long-context Transformers on diverse tasks, we propose multivariate next token prediction, a novel paradigm to predict multidimensional series with covariates. We present Timer-XL enhanced by TimeAttention as an extra-long version of pre-trained time-series Transformers. It simultaneously captures temporal dynamics and variable correlations by enhanced self-attention. In addition to achieving state-of-the-art performance on extensive benchmarks, we establish challenging benchmarks for long-context forecasting. By pre-training on large-scale heterogeneous time series, Timer-XL demonstrates notable zero-shot performance as a large time-series model. In the future, we will improve computational efficiency and build large domain-specific models with Timer-XL." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.357, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.133, + 0.828, + 0.164 + ], + "angle": 0, + "content": "This work was supported by the National Natural Science Foundation of China (U2342217 and 62021002), the BNRist Project, and the National Engineering Research Center for Big Data Software." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.184, + 0.289, + 0.199 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.829, + 0.251 + ], + "angle": 0, + "content": "Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.259, + 0.829, + 0.289 + ], + "angle": 0, + "content": "Yoshua Bengio, Réjean Ducharme, and Pascal Vincent. A neural probabilistic language model. Advances in neural information processing systems, 13, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.298, + 0.829, + 0.341 + ], + "angle": 0, + "content": "George Box. Box and jenkins: time series analysis, forecasting and control. In A Very British Affair: Six Britons and the Development of Time Series Analysis During the 20th Century, pp. 161-215. Springer, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.35, + 0.829, + 0.393 + ], + "angle": 0, + "content": "Defu Cao, Yujing Wang, Juanyong Duan, Ce Zhang, Xia Zhu, Congrui Huang, Yunhai Tong, Bixiong Xu, Jing Bai, Jie Tong, et al. Spectral temporal graph neural network for multivariate time-series forecasting. Advances in neural information processing systems, 33:17766-17778, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.402, + 0.829, + 0.444 + ], + "angle": 0, + "content": "Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems, 35:16344-16359, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.453, + 0.829, + 0.484 + ], + "angle": 0, + "content": "Abhimanyu Das, Weihao Kong, Rajat Sen, and Yichen Zhou. A decoder-only foundation model for time-series forecasting. arXiv preprint arXiv:2310.10688, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.492, + 0.829, + 0.549 + ], + "angle": 0, + "content": "Xiaohan Ding, Yiyuan Zhang, Yixiao Ge, Sijie Zhao, Lin Song, Xiangyu Yue, and Ying Shan. Unireplknet: A universal perception large-kernel convnet for audio video point cloud time-series and image recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5513-5524, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.558, + 0.829, + 0.601 + ], + "angle": 0, + "content": "Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. arXiv preprint arXiv:2402.03885, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.61, + 0.829, + 0.654 + ], + "angle": 0, + "content": "Hans Hersbach, Bill Bell, Paul Berrisford, Shoji Hirahara, András Horányi, Joaquín Muñoz-Sabater, Julien Nicolas, Carole Peubey, Raluca Radu, Dinand Schepers, et al. The era5 global reanalysis. Quarterly Journal of the Royal Meteorological Society, 146(730):1999-2049, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.662, + 0.611, + 0.679 + ], + "angle": 0, + "content": "RJ Hyndman. Forecasting: principles and practice. OTexts, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.687, + 0.829, + 0.731 + ], + "angle": 0, + "content": "Taesung Kim, Jinhee Kim, Yunwon Tae, Cheonbok Park, Jang-Ho Choi, and Jaegul Choo. Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.739, + 0.829, + 0.768 + ], + "angle": 0, + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.777, + 0.829, + 0.821 + ], + "angle": 0, + "content": "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.83, + 0.829, + 0.873 + ], + "angle": 0, + "content": "Jesus Lago, Grzegorz Marcjasz, Bart De Schutter, and Rafal Weron. Forecasting day-ahead electricity prices: A review of state-of-the-art algorithms, best practices and an open-access benchmark. Applied Energy, 293:116983, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Guokun Lai, Wei-Cheng Chang, Yiming Yang, and Hanxiao Liu. Modeling long-and short-term temporal patterns with deep neural networks. In The 41st international ACM SIGIR conference on research & development in information retrieval, pp. 95-104, 2018." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.207, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Shiyang Li, Xiaoyong Jin, Yao Xuan, Xiyou Zhou, Wenhu Chen, Yu-Xiang Wang, and Xifeng Yan. Enhancing the locality and breaking the memory bottleneck of transformer on time series forecasting. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.827, + 0.2 + ], + "angle": 0, + "content": "Bryan Lim, Sercan Ö Arık, Nicolas Loeff, and Tomas Pfister. Temporal fusion transformers for interpretable multi-horizon time series forecasting. International Journal of Forecasting, 37(4): 1748-1764, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.211, + 0.825, + 0.256 + ], + "angle": 0, + "content": "Juncheng Liu, Chenghao Liu, Gerald Woo, Yiwei Wang, Bryan Hooi, Caiming Xiong, and Doyen Sahoo. Unitst: Effectively modeling inter-series and intra-series dependencies for multivariate time series forecasting. arXiv preprint arXiv:2406.04975, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.265, + 0.827, + 0.309 + ], + "angle": 0, + "content": "Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.319, + 0.827, + 0.363 + ], + "angle": 0, + "content": "Shizhan Liu, Hang Yu, Cong Liao, Jianguo Li, Weiyao Lin, Alex X Liu, and Schahram Dustar. Pyraformer: Low-complexity pyramidal attention for long-range time series modeling and forecasting. In International conference on learning representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.373, + 0.827, + 0.415 + ], + "angle": 0, + "content": "Yong Liu, Haixu Wu, Jianmin Wang, and Mingsheng Long. Non-stationary transformers: Exploring the stationarity in time series forecasting. Advances in Neural Information Processing Systems, 35: 9881-9893, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.426, + 0.827, + 0.469 + ], + "angle": 0, + "content": "Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. arXiv preprint arXiv:2310.06625, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.48, + 0.827, + 0.523 + ], + "angle": 0, + "content": "Yong Liu, Guo Qin, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Autotimes: Autoregressive time series forecasters via large language models. arXiv preprint arXiv:2402.02370, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.534, + 0.827, + 0.578 + ], + "angle": 0, + "content": "Yong Liu, Haoran Zhang, Chenyu Li, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Timer: Generative pre-trained transformers are large time series models. In *Forty-first International Conference on Machine Learning*, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.588, + 0.827, + 0.618 + ], + "angle": 0, + "content": "Spyros Makridakis, Evangelos Spiliotis, and Vassilios Assimakopoulos. The m4 competition: 100,000 time series and 61 forecasting methods. International Journal of Forecasting, 36(1):54-74, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.628, + 0.825, + 0.658 + ], + "angle": 0, + "content": "Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. arXiv preprint arXiv:2211.14730, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.668, + 0.71, + 0.684 + ], + "angle": 0, + "content": "R OpenAI. Gpt-4 technical report. arxiv 2303.08774. View in Article, 2:13, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.693, + 0.827, + 0.736 + ], + "angle": 0, + "content": "Boris N Oreshkin, Dmitri Carpov, Nicolas Chapados, and Yoshua Bengio. N-beats: Neural basis expansion analysis for interpretable time series forecasting. arXiv preprint arXiv:1905.10437, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.747, + 0.827, + 0.804 + ], + "angle": 0, + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.816, + 0.565, + 0.831 + ], + "angle": 0, + "content": "PEMS. Traffic Dataset. http://pems.dot.ca.gov/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.842, + 0.825, + 0.872 + ], + "angle": 0, + "content": "Ofir Press, Noah A Smith, and Mike Lewis. Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.162 + ], + "angle": 0, + "content": "Kashif Rasul, Arjun Ashok, Andrew Robert Williams, Arian Khorasani, George Adamopoulos, Rishika Bhagwatkar, Marin Biloš, Hera Ghonia, Nadhir Vincent Hassen, Anderson Schneider, et al. Lag-llama: Towards foundation models for time series forecasting. arXiv preprint arXiv:2310.08278, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.827, + 0.213 + ], + "angle": 0, + "content": "David Salinas, Valentin Flunkert, Jan Gasthaus, and Tim Januschowski. Deeper: Probabilistic forecasting with autoregressive recurrent networks. International journal of forecasting, 36(3): 1181-1191, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.221, + 0.827, + 0.266 + ], + "angle": 0, + "content": "Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. Time-moe: Billion-scale time series foundation models with mixture of experts. arXiv preprint arXiv:2409.16040, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.273, + 0.825, + 0.306 + ], + "angle": 0, + "content": "Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.311, + 0.825, + 0.344 + ], + "angle": 0, + "content": "Huihui Sun and Xiaofeng Zhang. Study on coded permutation entropy of finite length gaussian white noise time series. Chinese Journal of Electronics, 33(1):185-194, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.35, + 0.825, + 0.394 + ], + "angle": 0, + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.402, + 0.825, + 0.446 + ], + "angle": 0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.454, + 0.825, + 0.499 + ], + "angle": 0, + "content": "Xindi Wang, Mahsa Salmani, Parsa Omidi, Xiangyu Ren, Mehdi Rezagholizadeh, and Armaghan Eshaghi. Beyond the limits: A survey of techniques to extend the context length in large language models. arXiv preprint arXiv:2402.02244, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.506, + 0.827, + 0.551 + ], + "angle": 0, + "content": "Yuxuan Wang, Haixu Wu, Jiaxiang Dong, Yong Liu, Yunzhong Qiu, Haoran Zhang, Jianmin Wang, and Mingsheng Long. Timexer: Empowering transformers for time series forecasting with exogenous variables. arXiv preprint arXiv:2402.19072, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.558, + 0.827, + 0.602 + ], + "angle": 0, + "content": "Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Unified training of universal time series forecasting transformers. arXiv preprint arXiv:2402.02592, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.61, + 0.825, + 0.655 + ], + "angle": 0, + "content": "Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. Advances in Neural Information Processing Systems, 34:22419-22430, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.663, + 0.827, + 0.705 + ], + "angle": 0, + "content": "Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. arXiv preprint arXiv:2210.02186, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.714, + 0.827, + 0.746 + ], + "angle": 0, + "content": "Haixu Wu, Hang Zhou, Mingsheng Long, and Jianmin Wang. Interpretable weather forecasting for worldwide stations with a unified deep model. Nature Machine Intelligence, 5(6):602-611, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.753, + 0.825, + 0.785 + ], + "angle": 0, + "content": "Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.791, + 0.825, + 0.823 + ], + "angle": 0, + "content": "Manzil Zaheer, Satwik Kottur, Siamak Ravanbakhsh, Barnabas Poczos, Russ R Salakhutdinov, and Alexander J Smola. Deep sets. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.827, + 0.873 + ], + "angle": 0, + "content": "Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Yunhao Zhang and Junchi Yan. Crossformer: Transformer utilizing cross-dimension dependency for multivariate time series forecasting. In The Eleventh International Conference on Learning Representations, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.827, + 0.148 + ], + "angle": 0, + "content": "Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. A survey of large language models. arXiv preprint arXiv:2303.18223, 2023." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.154, + 0.828, + 0.199 + ], + "angle": 0, + "content": "Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.222, + 0.471, + 0.238 + ], + "angle": 0, + "content": "A PROOF OF MODEL EFFICIENCY" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.253, + 0.274, + 0.267 + ], + "angle": 0, + "content": "A.1 SETUPS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.279, + 0.828, + 0.392 + ], + "angle": 0, + "content": "Given an input univariate time series divided into \\(T\\) tokens according to the patch size \\(P\\), which is fed into the vanilla Transformer. The training objective is to predict the next token of \\(P\\) time points. We will generalize the derivation from 1D sequences to 2D time series based on different approaches to handle multivariate data with the variable number \\(N\\). We adopt the same denotations as before: Transformer consists of \\(L\\) blocks with model dimension \\(D\\). The multi-head attention mechanism has \\(H\\) heads, each with a dimension of \\(d_{k}\\) for query, key, and value, and \\(d_{k} = \\frac{D}{H}\\). The intermediate dimension of feed-forward network is set as \\(D_{\\mathrm{ff}} = \\alpha D\\). The results are summarized in Table 8, we provide the detailed proof in the following sections." + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.413, + 0.827, + 0.429 + ], + "angle": 0, + "content": "Table 8: Parameters count and computational complexity of Transformers for multivariate time series." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.432, + 0.825, + 0.529 + ], + "angle": 0, + "content": "
MetricTypeCountComplexity
FLOPs(Training Speed)Channel Independence12(PDNT + L(D + H)NT2 + (2 + α)LD2NT)O(LDNT(D + T))
Channel Dependence12(PDNT + L(D + H)N2T2 + (2 + α)LD2NT)O(LDNT(D + NT))
ParametersEncoder-Only(4 + 2α)LD2 + 4LD + (1 + T)PDO(LD2)
Decoder-Only(4 + 2α)LD2 + 4LD + 2PDO(LD2)
Memory FootprintSelf-Attention4(D + P)NT + (32 + 8α)LDNT + 4LHN2T2O(LHN2T2)
FlashAttention4(D + P)NT + (32 + 8α)LDNTO(LDNT)
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.188, + 0.53, + 0.826, + 0.563 + ], + "angle": 0, + "content": "* \\( L \\) is the block number of Transformers. \\( D \\) is the dimension of embeddings (the hidden dimension of FFN \\( D_{\\mathrm{ff}} \\) is set as \\( \\alpha D \\)). \\( H \\) is the head number and the dimension of query, key, and value \\( d_k = D / H \\). The overhead is to train on a multivariate time series (\\( N \\)-variables and \\( TP \\) time points) with patch token length \\( P \\) and context length \\( T \\). Set \\( N = 1 \\) for training on univariate time series." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.579, + 0.273, + 0.593 + ], + "angle": 0, + "content": "A.2 FLOPs" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.827, + 0.677 + ], + "angle": 0, + "content": "As a preliminary, the multiplication between matrix \\(\\mathbf{A} \\in \\mathbb{R}^{n \\times m}\\) and matrix \\(\\mathbf{C} \\in \\mathbb{R}^{m \\times p}\\) requires \\(mnp\\) multiplications and \\(mnp\\) additions, resulting in \\(2mnp\\) floating-point operations. Given batched matrices \\(\\mathbf{A} \\in \\mathbb{R}^{B \\times n \\times m}\\) and \\(\\mathbf{C} \\in \\mathbb{R}^{B \\times m \\times p}\\), \\(B\\) times matrix multiplications will be performed. It is evident that the batch size is a linear multiplier. Thus, we first omit \\(B\\) to calculate the operations of dealing with one univariate series, and then we will reintroduce it to analyze channel independence." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.827, + 0.753 + ], + "angle": 0, + "content": "The computational cost of Transformers can be primarily categorized into two types: (1) multi-head attention calculation and (2) linear transformations. In contrast, the operations of layer normalization, residual connection, activation functions, and position embedding with the complexity of \\( \\mathcal{O}(TD) \\) are less significant. Therefore, we derive the computational complexity mainly with respect to the above two types by delving into the forwarding process of one univariate series." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.766, + 0.825, + 0.797 + ], + "angle": 0, + "content": "Patch Embedding The tokenized time series \\(\\{\\mathbf{x}_i\\} \\in \\mathbb{R}^{T\\times P}\\) is mapped into the embedding space through the patch-wise embedding \\(\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}\\), resulting in \\(2PDT\\) operations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.81, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Self-Attention The calculation of self-attention begins with the computation of query, key and value by multiplying the patch embeddings with matrices \\(\\mathbf{W}_q\\), \\(\\mathbf{W}_k\\), \\(\\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}\\) respectively in \\(H\\) heads, which incurs a computational cost of \\(6HDd_kT = 6D^2T\\) and yields \\(\\mathbf{Q}\\), \\(\\mathbf{K}\\), \\(\\mathbf{V} \\in \\mathbb{R}^{H \\times T \\times d_k}\\). Next, the dot product \\(\\mathbf{Q}\\mathbf{K}^\\top \\in \\mathbb{R}^{H \\times T \\times T}\\) is conducted in each head, leading to \\(2Hd_kT^2 = 2DT^2\\) operations. Following this, the Pre-Softmax map is divided by \\(\\sqrt{d_k}\\) and processed through Softmax, which includes exponentiation, summation, and normalization of each element, resulting in \\(4HT^2\\) operations. The subsequent multiplication with \\(\\mathbf{V}\\) incurs \\(2Hd_kT^2 = 2DT^2\\) operations. Finally, multiple heads are concatenated and multiplied by \\(\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}\\), contributing \\(2D^2T\\) operations." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "Feed-Forward Network It first projects the token representations into the dimension of \\( D_{ff} \\) and subsequently projects it back to the dimension \\( D \\), resulting in a total operation of \\( 4\\alpha D^2 T \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.152, + 0.827, + 0.209 + ], + "angle": 0, + "content": "Patch Projection For encoder-only models, all token representations are flattened and mapped directly to \\(P\\) time points by \\(\\mathbf{W}_d\\in \\mathbb{R}^{TD\\times P}\\). In contrast, token-wise projector \\(\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}\\) in decoder-only models independently map each token to the predicted next token. In both cases, the number of operations is \\(2PDT\\), but the token-wise projector will result in a smaller parameter count." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.215, + 0.828, + 0.287 + ], + "angle": 0, + "content": "The forwarding operations in \\( L \\)-layers Transformer is \\( 4PDT + 4L(D + H)T^2 + (8 + 4\\alpha)LD^2 T \\) in sum. Considering that the majority of operations in Transformers are binary operations (e.g., matrix multiplications), the gradients for both matrices are computed separately. As a result, the number of operations in backpropagation is the twice of forwarding. Therefore, the total operations of training a Transformer on a univariate series consisting of \\( T \\) patches, each of length \\( P \\), is derived as:" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.294, + 0.692, + 0.312 + ], + "angle": 0, + "content": "\\[\nf (T) = 1 2 P D T + 1 2 L (D + H) T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} T.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.321, + 0.826, + 0.35 + ], + "angle": 0, + "content": "We plug typical hyperparameters in the current time-series Transformers and forecasting benchmarks: \\( D = 512 \\), \\( H = 8 \\), \\( L = 4 \\), \\( \\alpha = 4 \\), \\( T = 7 \\), and \\( P = 96 \\), we obtain that:" + }, + { + "type": "equation", + "bbox": [ + 0.311, + 0.359, + 0.686, + 0.376 + ], + "angle": 0, + "content": "\\[\nf (T) = 2 4 9 6 0 T ^ {2} + 7 6 0 8 7 2 9 6 T \\propto 3. 2 8 * 1 0 ^ {- 4} T ^ {2} + T.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.386, + 0.827, + 0.43 + ], + "angle": 0, + "content": "Due to the prevalence of short contexts in the time series field, where \\( T \\ll D \\) leads to a significant coefficient in \\( \\mathcal{O}(T) \\), we find the primary computational burden of time-series Transformer lies in linear transformations with \\( \\mathcal{O}(T) \\), rather than in multi-head self-attention with the \\( \\mathcal{O}(T^2) \\) complexity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.435, + 0.827, + 0.464 + ], + "angle": 0, + "content": "For multivariate series with \\(N\\) variables, FLOPs is influenced by the handling of multivariate data. When adopting channel independence (Timer and PatchTST), \\(N\\) can be regarded as the batch size \\(B\\):" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.472, + 0.826, + 0.49 + ], + "angle": 0, + "content": "\\[\nN f (T) = 1 2 P D N T + 1 2 L (D + H) N T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} N T. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.499, + 0.825, + 0.528 + ], + "angle": 0, + "content": "For models that capture fine-grained intra- and inter-series dependencies (Timer-XL and UniTST) in multivariate series, \\(N\\) is reflected as the enlarged number of tokens:" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.537, + 0.826, + 0.554 + ], + "angle": 0, + "content": "\\[\nf (N T) = 1 2 P D N T + 1 2 L (D + H) N ^ {2} T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} N T. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.563, + 0.826, + 0.62 + ], + "angle": 0, + "content": "Notably, FLOPs is not entirely equivalent to actual runtime. While FlashAttention increases the overall FLOPs due to its recomputation process, it reduces the number of memory reads and writes. Given that on GPUs, computation is significantly faster than memory access, using FlashAttention can actually lead to further improvements in runtime performance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.639, + 0.362, + 0.654 + ], + "angle": 0, + "content": "A.3 PARAMETER COUNT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.827, + 0.683 + ], + "angle": 0, + "content": "From the above analysis, we observe that the parameter count of Transformers includes the following:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.699, + 0.597, + 0.715 + ], + "angle": 0, + "content": "Patch Embedding \\(\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}\\) to obtain patch embeddings." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.743, + 0.748 + ], + "angle": 0, + "content": "Self-Attention \\(\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}\\) of \\(H\\) heads and \\(\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}\\) for all heads." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.766, + 0.68, + 0.782 + ], + "angle": 0, + "content": "Feed-Forward Network \\(\\mathbf{W}_{\\mathrm{ffn1}}, \\mathbf{W}_{\\mathrm{ffn2}} \\in \\mathbb{R}^{D \\times D_{\\mathrm{ff}}} \\text{ in feed-forward network.}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.827, + 0.83 + ], + "angle": 0, + "content": "Layer Normalization It contains the weight \\(\\mathbf{W} \\in \\mathbb{R}^D\\) and the bias \\(\\mathbf{b} \\in \\mathbb{R}^D\\). Every Transformer block includes two normalizations after multi-head attention and feed-forward network respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.845, + 0.795, + 0.862 + ], + "angle": 0, + "content": "Patch Projection \\(\\mathbf{W}_d\\in \\mathbb{R}^{TD\\times P}\\) in flatten head and \\(\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}\\) in token-wise projection." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.738, + 0.883 + ], + "angle": 0, + "content": "In sum, the total count of parameters in time-series Transformers can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.894, + 0.826, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\text {P a r a m e t e r C o u n t} = \\left\\{ \\begin{array}{l l} (4 + 2 \\alpha) L D ^ {2} + 4 L D + (1 + T) P D, & \\text {u s i n g f l a t t e n h e a d}, \\\\ (4 + 2 \\alpha) L D ^ {2} + 4 L D + 2 P D, & \\text {u s i n g t o k e n - w i s e p r o j e c t i o n}. \\end{array} \\right. \\tag {11}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.371, + 0.119 + ], + "angle": 0, + "content": "A.4 MEMORY FOOTPRINT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.16 + ], + "angle": 0, + "content": "The memory footprint during training can be primarily categorized into three parts: activation values stored for backpropagation, model parameters, and optimizer parameters." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.166, + 0.826, + 0.208 + ], + "angle": 0, + "content": "Regardless of other precision types (e.g., FP16), model parameters and gradients are typically stored as 32-bit floating-point numbers, with each parameter occupying 4 bytes of memory. For time-series Transformers, memory footprint of activation values is given as follows:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.222, + 0.827, + 0.239 + ], + "angle": 0, + "content": "Patch Embedding Gradient computation for \\(\\mathbf{W}_e\\) preserves its input \\(\\{\\mathbf{x}_i\\} \\in \\mathbb{R}^{T\\times P}\\) of \\(4PT\\) bytes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.252, + 0.828, + 0.353 + ], + "angle": 0, + "content": "Self-Attention Gradient calculation for \\(\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}\\) requires their inputs \\(\\mathbf{H} \\in \\mathbb{R}^{T \\times D}\\), amounting to a total of \\(4DT\\) bytes. The dot product for attention map also needs to store \\(\\mathbf{Q}, \\mathbf{K}, \\mathbf{V} \\in \\mathbb{R}^{H \\times T \\times d_k}\\), which collectively require a total of \\(12DT\\) bytes of memory. Gradient computation of \\(\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}\\) necessitates the concatenated multi-head attention representations \\(\\mathbf{H} \\in \\mathbb{R}^{T \\times D}\\), which occupies \\(4DT\\) bytes. If memory-efficient attention mechanisms like FlashAttention (Dao et al., 2022) is not applied, the outcome \\(\\mathbf{Q}\\mathbf{K}^\\top\\) will be stored and occupy \\(4HT^2\\) bytes. Instead, if FlashAttention is adopted, the storage overhead can be avoided." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.367, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Feed-Forward Network ReLU activation function is typically employed in this module. The input \\(\\mathbf{H} \\in \\mathbb{R}^{T \\times D}\\) must be retained, requiring a total of \\(4DT\\) bytes. Additionally, the product \\(\\mathbf{W}_{\\mathrm{fin1}}\\mathbf{H}\\) also needs to be stored, amounting to \\(4D_{\\mathrm{ff}}T\\) bytes. Similarly, the output activations of ReLU, which serve as the input for subsequent linear transformations, necessitate another \\(4D_{\\mathrm{ff}}T\\) bytes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.439, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Layer Normalization Each block of Transformer encompasses two layer normalizations, with each normalization retaining its input, resulting in the memory requirement of \\(8DT\\) bytes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.482, + 0.825, + 0.513 + ], + "angle": 0, + "content": "Patch Projection To perform backpropagation for \\( W_{d} \\in \\mathbb{R}^{D \\times P} \\), it is necessary to retain its input \\( \\mathbf{H} \\in \\mathbb{R}^{T \\times D} \\), resulting in a total memory requirement of \\( 4DT \\) bytes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.518, + 0.827, + 0.534 + ], + "angle": 0, + "content": "The formula for the total activation values of the entire model occupying GPU memory is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.541, + 0.826, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\text {M e m o r y F o o t p r i n t} = \\left\\{ \\begin{array}{l l} 4 (D + P) T + (3 2 + 8 \\alpha) L D T + 4 L H T ^ {2}, & \\mathrm {w / o F l a s h A t t e n t i o n ,} \\\\ 4 (D + P) T + (3 2 + 8 \\alpha) L D T, & \\text {w i t h F l a s h A t t e n t i o n .} \\end{array} \\right. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.583, + 0.826, + 0.681 + ], + "angle": 0, + "content": "The derived occupancy of activation values increases proportionally with the batch size \\( B \\). For multivariate series, \\( N \\) can be used as a multiplier in channel independence. For channel independence models, we can substitute \\( T \\) with \\( NT \\) as before. The total memory footprint is the sum of activation values and parameters of model and optimizer, which are proportional to the parameter count derived in Equation 11. Due to the limited model size in the time series field, the memory consumption of parameters is minimal and can be considered negligible in practice. Therefore, the overall memory footprint can be predominantly determined by the occupied memory of activation values." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.701, + 0.421, + 0.716 + ], + "angle": 0, + "content": "B EXPERIMENTAL DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.732, + 0.292, + 0.746 + ], + "angle": 0, + "content": "B.1 DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.828, + 0.926 + ], + "angle": 0, + "content": "We conduct experiments on well-acknowledged benchmarks to evaluate performance of the proposed Timer-XL, which includes (1) ETT (Zhou et al., 2021) contains 7 factors of electricity transformers from July 2016 to July 2018, which is recorded every hour or 15 minutes. (2) Weather (Wu et al., 2021) includes 21 meteorological factors collected every 10 minutes from the Max Planck Biogeochemistry Institute Weather Station in 2020. (3) ECL (Wu et al., 2021) records the hourly electricity consumption data of 321 clients. (4) Traffic (Wu et al., 2021) collects hourly road occupancy rates measured by 862 sensors on the San Francisco Bay area highways from January 2015 to December 2016. (5) Solar-Energy (Lai et al., 2018) records the solar power production of 137 PV plants in 2006, which are sampled every 10 minutes. (7) PEMS (Liu et al., 2022a) contains records from the public traffic network in California collected in 5-minute time windows. (8) EPF (Lago et al., 2021) includes five subsets that span six years. Each contains the electricity price as the endogenous variable to be predicted and two exogenous variables of the day-ahead electricity markets. (9) GTWSF (Wu et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.176 + ], + "angle": 0, + "content": "2023) is a dataset collected from the National Centers for Environmental Information (NCEI). This large-scale collection contains hourly averaged wind speed and temperature data from 3850 stations with different geographical scales and densities each, spanning from 2019 to 2021. (10) UTSD (Liu et al., 2024c) is a multi-domain time series dataset, which includes seven domains with a hierarchy of four volumes. We adopt the largest volume that encompasses 1 billion time points for pre-training." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.181, + 0.827, + 0.307 + ], + "angle": 0, + "content": "We further establish challenging forecasting benchmarks based on the ECMWF Reanalysis v5 (ERA5) dataset (Hersbach et al., 2020) to prevent potential overfitting and performance saturation of deep forecasters in existing benchmarks. Concretely, ERA5 is the fifth generation ECMWF atmospheric reanalysis of the global climate covering the period from January 1940 to the present, which provides hourly estimates of a large number of atmospheric, land, and oceanic climate variables, and includes information about uncertainties for all variables at reduced spatial and temporal resolutions. Due to its pattern sufficiency of temporal dynamics and variable correlations, we could establish practical benchmarks to thoroughly evaluate the performance for univariate and multivariate forecasting, as well as adopt it for large-scale pre-training to develop domain-specific large time series models." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.313, + 0.437, + 0.327 + ], + "angle": 0, + "content": "Our datasets are constructed as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.341, + 0.825, + 0.425 + ], + "angle": 0, + "content": "- ERA5-S: To establish a realistic univariate forecasting benchmark, we start from the basic principle of forecastability and make the prediction on sufficient lookback lengths. Instead of the short time span of training in previous benchmarks (generally no more than 2 years), we curated a three-hour frequency dataset spanning 40 years (January 1979 to December 2018) from ERA5, encompassing 116880 time points. In order to prevent overfitting on a single time series, we selected worldwide stations to form seven subsets." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.43, + 0.827, + 0.514 + ], + "angle": 0, + "content": "- ERA5-MS: Each univariate series of ERA5-S provides partial observations governed by the spatio-temporal global weather system. Since discovering the global spatio-temporal correlations presents a fundamental challenge in meteorology, we convert ERA5-S into ERA5-MS by using seven subsets as a challenging multivariate forecasting benchmark. Based on the average results in Tables 2 and 5, we can validate the existence of multi-station correlations among selected stations, which have enhanced the average prediction accuracy." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.518, + 0.827, + 0.602 + ], + "angle": 0, + "content": "- ERA5-Large: To explore the pure data-driven approach to build domain-specific large time series models, we further expanded the number of stations as ERA5-Large, a dataset that evenly covers meteorological 4920 worldwide stations and spans 40 years. We establish the dataset for pre-training, which is expected to generalize across the time (train on the past observations and generalize to the future) and across stations (train on partial stations and generalize to other unseen stations). The total number of time points is around half a billion." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.341, + 0.827, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.614, + 0.827, + 0.658 + ], + "angle": 0, + "content": "We follow the same data processing and train-validation-test split protocol used in TimesNet (Wu et al., 2022), where the train, validation, and test datasets are divided according to chronological order to prevent data leakage. Detailed dataset descriptions and prediction settings are provided in Table 9." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.674, + 0.356, + 0.687 + ], + "angle": 0, + "content": "B.2 BASELINE MODELS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.7, + 0.829, + 0.854 + ], + "angle": 0, + "content": "We aim to present Timer-XL as a foundation model for unified time series forecasting. We thoroughly include well-acknowledged and advanced models in each forecasting task. For univariate time series forecasting, we compare Timer-XL with PatchTST (Nie et al., 2022) under channel independence. For multivariate time series prediction, we report official results from Liu et al. (2023; 2024b); Ding et al. (2024), including UniRepLKNet (2024), iTransformer (2023), Corrformer (2023), DLinear (2023), TimesNet (2022), Non-stationary Transformer (2022b), Pyraformer (2021), Autoformer (2021), StemGNN (2020), DeepAR (2020), and N-BEATS (2019). We further reproduce the performance of related Transformers: Timer (2024c) and UniTST (2024a) based on their official repositories. For covariate-informed time series forecasting, we report the official results of TimeXer (2024b). For zero-shot forecasting, we follow Liu et al. (2024c) that predicts future length-96 windows in well-acknowledged datasets. Totally, more than 20 baselines are included for a complete comparison." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.411, + 0.884 + ], + "angle": 0, + "content": "B.3 IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.828, + 0.926 + ], + "angle": 0, + "content": "All the experiments are implemented by PyTorch (Paszke et al., 2019) on NVIDIA A100 Tensor Core GPUs. We employ the Adam optimizer (Kingma & Ba, 2014) and MSE loss for model optimization." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.145, + 0.825, + 0.189 + ], + "angle": 0, + "content": "Table 9: Dataset descriptions. Dim. denotes the number of variables (For univariate forecasting, we adopt channel independence (Nie et al., 2022) or train separate models on each variable). Dataset Length denotes the number of time points in the (train, validation, test) splits." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.193, + 0.825, + 0.593 + ], + "angle": 0, + "content": "
TasksDatasetDim.Training SettingDataset LengthInformation (Frequency)
Univariate ForecastingETTh17{24, 96, 168, 672, 2880}→96(8545, 2881, 2881)Electricity (Hourly)
ECL321{24, 96, 168, 672, 2880, 8832}→96(18317, 2633, 5261)Electricity (Hourly)
Traffic862{24, 96, 168, 672, 2880, 8832}→96(12185, 1757, 3509)Transportation (Hourly)
PEMS03358{96, 288, 1152, 2016, 8064}→96(15617, 5135, 5135)Transportation (5 mins)
ERA5-S73072→96(81816, 11688, 23376)Climate (3 Hours)
Multivariate ForecastingETTh1, ETTh27{96, 672}→{96, 192, 336, 720}(8545, 2881, 2881)Electricity (Hourly)
ETTm1, ETTm27{96, 672}→{96, 192, 336, 720}(34465, 11521, 11521)Electricity (15 mins)
ECL321{96, 672}→{96, 192, 336, 720}(18317, 2633, 5261)Electricity (Hourly)
Traffic862{96, 672}→{96, 192, 336, 720}(12185, 1757, 3509)Transportation (Hourly)
Weather21{96, 672}→{96, 192, 336, 720}(36792, 5271, 10540)Climate (10 mins)
Solar-Energy137{96, 672}→{96, 192, 336, 720}(36601, 5161, 10417)Energy (10 mins)
ERA5-MS73072→96(81816, 11688, 23376)Climate (3 Hours)
GTWSF385048→24(12280, 1755, 3509)Wu et al. (2023)
Forecasting with CovariatesNP1+2168→24(36500, 5219, 10460)Electricity (Hourly)
PJM1+2168→24(36500, 5219, 10460)Electricity (Hourly)
BE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
FR1+2168→24(36500, 5219, 10460)Electricity (Hourly)
DE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
Pre-trainingERA5-Large49203072→96(81816, 11688, 23376)Climate (3 Hours)
UTSD-2880→96(868778970, 96530996, -)Liu et al. (2024c)
LOTSA-2880→96(231082956489, -, -)Woo et al. (2024)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.673, + 0.825, + 0.701 + ], + "angle": 0, + "content": "Table 10: Performance robustness of Timer-XL. The prediction settings and results keep the same with Table 12. The standard deviation is obtained from three random seeds." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.706, + 0.825, + 0.889 + ], + "angle": 0, + "content": "
DatasetECLETTh1Traffic
HorizonMSEMAEMSEMAEMSEMAE
960.127±0.0010.219±0.0010.364±0.0020.397±0.0010.340±0.0020.238±0.001
1920.145±0.0010.236±0.0010.405±0.0020.424±0.0010.360±0.0010.247±0.001
3360.159±0.0010.252±0.0010.427±0.0030.439±0.0020.377±0.0020.256±0.002
7200.187±0.0030.277±0.0030.439±0.0020.459±0.0040.418±0.0030.279±0.002
DatasetSolar-EnergyWeatherERA5-MS
HorizonMSEMAEMSEMAEMSEMAE
960.162±0.0030.221±0.0020.157±0.0020.205±0.0010.164±0.0010.307±0.000
1920.187±0.0030.239±0.0020.206±0.0030.250±0.002
3360.205±0.0030.255±0.0020.259±0.0030.291±0.003
7200.238±0.0030.279±0.0030.337±0.0020.344±0.002
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.231 + ], + "angle": 0, + "content": "We adopt channel independence from Nie et al. (2022) in univariate time series forecasting. Based on the prevalence of patch-level tokenization in the time series field, we reproduce typical Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a) based on their official repositories, and keep their model hyperparameters and training configurations the same to evaluate the inherent capability of base models. The results of other baselines are based on the benchmark provided by Liu et al. (2023; 2024b); Ding et al. (2024); Wang et al. (2024b), which is fairly built on the configurations provided by their original paper. Detailed experimental configurations are provided in Table 11. We also report the standard deviations under three runs with different random seeds in Table 10, which exhibits that the performance of Timer-XL is stable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.237, + 0.828, + 0.294 + ], + "angle": 0, + "content": "For the metrics, we adopt the symmetric mean absolute percentage error (SMAPE), a metric that is independent of the numerical range, to evaluate one-for-all generalization performance on ERA5-Large. For other experiments, we adopt the root mean square error (MSE) and mean absolute error (MAE) that follows previous work. These metrics can be calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.31, + 0.778, + 0.353 + ], + "angle": 0, + "content": "\\[\n\\mathrm {S M A P E} = \\frac {2 0 0}{T} \\sum_ {i = 1} ^ {T} \\frac {| \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} |}{| \\mathbf {X} _ {i} | + | \\widehat {\\mathbf {X}} _ {i} |}, \\mathrm {M S E} = \\sum_ {i = 1} ^ {T} | \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} | ^ {2}, \\mathrm {M A E} = \\sum_ {i = 1} ^ {T} | \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} |.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.371, + 0.825, + 0.403 + ], + "angle": 0, + "content": "Here \\(\\mathbf{X} \\in \\mathbb{R}^T\\) is a univariate time series and \\(\\widehat{\\mathbf{X}}\\) is the corresponding prediction. For multivariate time series, we further calculate the mean metric in the variable dimension." + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.422, + 0.827, + 0.452 + ], + "angle": 0, + "content": "Table 11: Experimental configurations of Timer-XL and other baseline Transformers. All the experiments adopt the ADAM (2014) optimizer with the default hyperparameter \\((\\beta_{1},\\beta_{2}) = (0.9,0.999)\\)." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.454, + 0.825, + 0.886 + ], + "angle": 0, + "content": "
ExperimentModelDatasetConfigurationTraining Process
LDdkHPLRLossBatch SizeEpochs
Univariate ForecastingTimer-XLECL3512648960.0005MSE204810
Traffic3512648960.001MSE204810
PatchTSTETTh11512648960.0005MSE25610
PEMS033512648960.0005MSE204810
ERA5-S1512648960.0005MSE204810
Multivariate ForecastingTimer-XLGlobal Temp.310241288240.0001MSE810
Global Wind310241288240.0001MSE810
ECL5512648960.0005MSE410
UniTSTTraffic4512648960.0005MSE410
TimerETTh1110241288960.0001MSE3210
PatchTSTWeather4512648960.0005MSE3210
Solar.6512648960.0001MSE1610
ERA5-MS3512648960.0001MSE25610
Forecasting with CovariatesTimer-XLNP3512648240.0001MSE410
TimeXerPJM2512648240.0001MSE1610
TimerBE2512648240.0001MSE1610
PatchTSTFR2512648240.0001MSE1610
DE2512648240.0001MSE1610
Pre-trainingTimer-XLERA5-Large4512648960.0001MSE4096010
PatchTST4512648960.0001MSE4096010
Timer-XLUTSD810241288960.00005MSE1638410
Timer(Liu et al., 2024c)810241288960.00005MSE1638410
Timer-XL810241288960.001MSE32768-
MoiraiSmallLOTSA6384646-
MoiraiBase(Woo et al., 2024)127686412-
MoiraiLarge2410246416-
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.188, + 0.887, + 0.825, + 0.91 + ], + "angle": 0, + "content": "* \\( L \\) is the layer number of Transformers, \\( D \\) is the dimension of token embedding (the hidden dimension of FFN is set as \\( 4D \\)), \\( d_k \\) is the dimension of query, key, and value, \\( H \\) is the multi-head number, \\( P \\) is the patch size, and LR is the initial learning rate." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.485, + 0.119 + ], + "angle": 0, + "content": "C HYPERPARAMETER SENSITIVITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.827, + 0.289 + ], + "angle": 0, + "content": "We evaluate the hyperparameter sensitivity of Timer-XL on the ERA5-MS benchmark, as illustrated in Figure 8, concerning the following factors: the number of layers \\( L \\), the patch size \\( P \\), and the lookback length during inference. Our findings indicate that performance of Timer-XL generally improves with increases with \\( L \\), suggesting that Timer-XL is a scalable deep forecaster. Furthermore, our analysis of the influence of \\( P \\) reveals that the optimal patch size is generally close to the predicted length, since it avoids multi-step error accumulations. Toward better long-term forecasting performance, it leaves a future improvement to adopt different patch sizes of input and output tokens. Finally, we investigate the impact of input length during inference. We discover that the optimal lookback length of during is not necessarily the length during training. Given that decoder-only Transformers can accommodate inference inputs shorter than those used during training, this finding is noteworthy and indicates the potential to improve the performance." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.302, + 0.382, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.302, + 0.603, + 0.425 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.609, + 0.302, + 0.822, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.429, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Figure 8: Hyperparameter sensitivity of Timer-XL (input-3072-pred-96 on ERA5-MS), including the number of Transformer blocks \\( L \\), the patch size \\( P \\), and the input lookback length during inference." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.479, + 0.318, + 0.494 + ], + "angle": 0, + "content": "D SHOWCASES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.511, + 0.825, + 0.568 + ], + "angle": 0, + "content": "To facilitate a clear comparison among various models, we present additional prediction visualization from diverse datasets in Figure 9 and 10. Showcases are randomly selected from Timer-XL and the following time-series Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a). Among them, Timer-XL presents the most accurate predictions." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.582, + 0.348, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.582, + 0.505, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.582, + 0.664, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.582, + 0.822, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.687, + 0.347, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.688, + 0.505, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.688, + 0.664, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.669, + 0.688, + 0.822, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.788, + 0.825, + 0.816 + ], + "angle": 0, + "content": "Figure 9: Visualization results on univariate time series dataset. We adopt the forecasting setting of 2880-pred-96 on ECL, ETTh1 and Traffic, and 2016-pred-96 on PEMS." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.838, + 0.436, + 0.853 + ], + "angle": 0, + "content": "E SUPPLEMENTARY RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.553, + 0.884 + ], + "angle": 0, + "content": "E.1 FULL RESULT OF MULTIVARIATE FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Table 12 provides the complete results of the one-for-all multivariate forecasting benchmark across well-acknowledged datasets. We evaluate Timer-XL and baseline models by rolling forecasting: each" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.825, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.309, + 0.825, + 0.339 + ], + "angle": 0, + "content": "Figure 10: Visualization results on multivariate time series dataset. We adopt the forecasting setting of 672-pred-96 on ETTh1 (7 Variables) and Traffic (862 Variables)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.827, + 0.387 + ], + "angle": 0, + "content": "model is trained with input length 672 and output length 96, and the predicted values are integrated as part of the input in the next iteration until reaching the desired forecast length in \\{96, 192, 336, 720\\}." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.392, + 0.828, + 0.462 + ], + "angle": 0, + "content": "We highlight that this benchmark evaluates the fundamental model versatility of deep forecasters, which aims to break the awkward situation of extensive training and model storage in pursuit of better practice for real-world forecasting requirements. On this benchmark, time-series Transformers significantly stand out from other baseline models, and our proposed Timer-XL can achieve state-of-the-art performance, making it a nice fundamental backbone of a one-for-all forecaster." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.48, + 0.53, + 0.495 + ], + "angle": 0, + "content": "E.2 FULL RESULT OF ZERO-SHOT FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.507, + 0.827, + 0.564 + ], + "angle": 0, + "content": "Table 13 provides the full results of zero-shot forecasting on the benchmark from Wu et al. (2022). We build Timer-XL based on the configuration in Table 11, which is pre-trained on the aggregated datasets of UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024). The patch size of Timer-XL is set as 96 and we conduct rolling forecast to obtain the desired forecast length in \\(\\{96, 192, 336, 720\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.828, + 0.655 + ], + "angle": 0, + "content": "We evaluate most advanced large models based on their official model checkpoints, including TimeMoE (Shi et al., 2024), Moirai (Woo et al., 2024), TimesFM (Das et al., 2023), MOMENT Goswami et al. (2024), and Chronos (Ansari et al., 2024). We conduct zero-shot evaluations on datasets that are not included during the pre-training of corresponding models. For each of the evaluated model, we use their maximum input length during inference. The metric (MSE/MAE) is averaged from all predicted windows in the test split." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.672, + 0.49, + 0.685 + ], + "angle": 0, + "content": "E.3 ABLATION STUDY OF TIMEATTENTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.699, + 0.825, + 0.797 + ], + "angle": 0, + "content": "We conduct evaluations on TimeAttention to validate the effectiveness of position embeddings. As for variable embedding, the distinction between endogenous and exogenous variables can improve performance. Based on our observation of the learned \\( u > v \\), we find that the token reasonably pays more attention to tokens of the endogenous variable. It leaves a prior to mask out minor dependencies that focuses less on exogenous variables. For the temporal dimension, other position embeddings are inferior to RoPE, since it uses the affine transformation, while others are additive, and thereby less confused with the same additive embedding for variables." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.815, + 0.655, + 0.829 + ], + "angle": 0, + "content": "E.4 SUPPLEMENTARY RESULTS OF LONG-CONTEXT FORECASTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Long context is a basic indicator of foundation models, which can support emergence capabilities such as prompting, in-context learning, retrieval-augmented generation, etc. However, the long-context forecasting paradigm receives less attention in the current community, which can be due to the lack of benchmarks. In the meteorological ERA5, it is necessary to support the context of more than years to contain a specific cycle (such as El Nino). In Table 15, the performance of Timer-XL and DLinear generally improves with the increased context length. By contrast, it reveals the performance" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.195, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Table 12: Full multivariate forecasting results: we conduct rolling forecast with a single model trained on each dataset (lookback length is 672) and accomplish four forecast lengths in \\(\\{96, 192, 336, 720\\}\\)." + }, + { + "type": "table", + "bbox": [ + 0.178, + 0.228, + 0.821, + 0.84 + ], + "angle": 0, + "content": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTh1960.3640.3970.3710.4040.3790.4150.3870.4180.3690.4000.3730.4030.4520.4630.4520.4780.4670.499
1920.4050.4240.4070.4290.4150.4380.4160.4370.4050.4220.4050.4250.4740.4770.4840.5100.4920.523
3360.4270.4390.4340.4450.4400.4540.4340.4500.4350.4450.4230.4400.4930.4890.5110.5220.5190.531
7200.4390.4590.4610.4660.4820.4820.4470.4730.4930.5080.4450.4710.5600.5340.5710.5430.5890.560
Avg0.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
ETTh2960.2770.3430.2850.3440.3430.3980.3040.3620.3050.3710.2890.3470.3400.3740.3480.4030.3580.397
1920.3480.3910.3650.4000.3760.4200.3720.4070.4120.4390.3600.3930.4020.4140.4080.4480.4350.451
3360.3750.4180.4120.4400.3990.4350.4180.4400.5270.5080.3890.4200.4520.4520.4240.4570.4540.475
7200.4090.4580.4680.4870.4190.4570.4630.4760.8300.6530.3980.4400.4620.4680.4480.4760.4790.492
Avg0.3520.4020.3820.4180.3840.4280.3890.4210.5180.4930.3590.4000.4140.4270.4070.4460.4310.454
ETTm1960.2900.3410.2810.3380.2890.3480.3110.3650.3070.3500.2850.3460.3380.3750.4140.4140.4660.466
1920.3370.3690.3300.3680.3320.3750.3530.3900.3370.3680.3290.3720.3710.3870.5240.4820.5040.496
3360.3740.3920.3670.3930.3650.3970.3870.4110.3660.3870.3630.3940.4100.4110.5410.4970.5740.530
7200.4370.4280.4320.4330.4210.4310.4520.4450.4190.4190.4210.4260.4780.4500.5780.5090.5960.558
Avg0.3590.3820.3520.3830.3520.3880.3760.4030.3570.3810.3490.3850.3990.4060.5140.4750.5350.512
ETTm2960.1750.2570.1750.2570.1710.2600.1830.2720.1670.2630.1720.2590.1870.2670.2370.3060.2550.339
1920.2420.3010.2390.3010.2280.2300.2500.3150.2300.3110.2330.2990.2490.3090.3300.3870.2790.335
3360.2930.3370.2930.3420.2820.3360.3110.3560.2980.3610.2800.3310.3210.3510.4040.4240.3310.374
7200.3760.3900.3920.4070.3800.3980.4170.4190.4320.4460.3570.3820.4970.4030.5250.4860.4130.450
Avg0.2710.3220.2750.3270.2650.3060.2900.3400.2820.3450.2610.3180.3140.3330.3740.4010.3200.374
ECL960.1270.2190.1290.2210.1300.2250.1330.2290.1380.2380.1320.2320.1840.2880.1850.2870.2560.357
1920.1450.2360.1480.2390.1500.2440.1580.2580.1520.2510.1510.2500.1920.2950.2820.3680.2910.376
3360.1590.2520.1640.2560.1660.2620.1680.2620.1670.2680.1710.2720.2000.3030.2890.3770.2900.379
7200.1870.2770.2010.2890.2060.2970.2050.2940.2030.3020.2220.3180.2280.3250.3050.3990.3200.403
Avg0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
Traffic960.3400.2380.3480.2400.3590.2500.3530.2590.3990.2850.3590.2550.5930.3150.6100.3220.6750.412
1920.3600.2470.3690.2500.3730.2570.3730.2670.4090.2900.3770.2650.5960.3170.6260.3460.6790.423
3360.3770.2560.3880.2600.3860.2650.3860.2750.4220.2970.3930.2760.6000.3190.6330.3520.6880.440
7200.4180.2790.4310.2850.4210.2860.4250.2960.4610.3190.4360.3050.6190.3350.6510.3660.6930.457
Avg0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather960.1570.2050.1510.2020.1520.2060.1740.2250.1690.2290.1490.2020.1690.2280.1850.2410.3550.409
1920.2060.2500.1960.2450.1980.2490.2270.2680.2110.2680.1940.2450.2220.2690.2860.3250.4210.450
3360.2590.2910.2490.2880.2510.2910.2900.3090.2580.3060.2440.2850.2900.3100.3230.3470.4520.465
7200.3370.3440.3300.3440.3220.3400.3740.3600.3200.3620.3170.3380.3760.3640.4360.4010.5130.496
Avg0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy960.1620.2210.2120.2300.1900.2400.1830.2650.1930.2580.1680.2370.1800.2720.1990.2900.2060.296
1920.1870.2390.2320.2460.2230.2640.2050.2830.2140.2740.1890.2570.1990.2860.2430.3070.2540.328
3360.2050.2550.2370.2530.2500.2830.2240.2990.2330.2910.2120.2770.2200.3010.2640.3220.2720.330
7200.2380.2790.2520.2660.2920.3110.2390.3160.2460.3070.2400.3050.2510.3210.3100.3390.3260.347
Avg0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
\\( 1^{\\text{st}} \\)Count23
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.145, + 0.825, + 0.174 + ], + "angle": 0, + "content": "Table 13: Full results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. \\(1^{\\mathrm{st}}\\) Count represents the number of wins achieved by a model under all prediction lengths and datasets." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.179, + 0.823, + 0.582 + ], + "angle": 0, + "content": "
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoELuTRA(2024)MoiiraiSmall(2024)MoiiraiBase(2024)MoiiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm1960.3170.3560.3380.3680.3090.3570.2810.3410.4180.3920.3630.3560.3800.3610.3610.3700.6540.5270.4540.4080.4570.403
1920.3580.3810.3530.3880.3460.3810.3050.3580.4310.4050.3880.3750.4120.3830.4140.4050.6620.5320.5670.4770.5300.450
3360.3860.4010.3810.4130.3730.4080.3690.3950.4330.4120.4160.3920.4360.4000.4450.4290.6720.5370.6620.5250.5770.481
7200.4300.4310.5040.4930.4750.4770.4690.4720.4620.4320.4600.4180.4620.4200.5120.4710.6920.5510.9000.5910.6600.526
Avg0.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm2960.1890.2770.2010.2910.1970.2860.1980.2880.2140.2880.2050.2730.2110.2740.2020.2700.2600.3350.1990.2740.1970.271
1920.2410.3150.2580.3340.2500.3220.2350.3120.2840.3320.2750.3160.2810.3180.2890.3210.2890.3500.2610.3220.2540.314
3360.2860.3480.3240.3730.3370.3750.2930.3480.3310.3620.3290.3500.3410.3550.3600.3660.3240.3690.3260.3660.3130.353
7200.3750.4020.4880.4640.4800.4610.4270.4280.4020.4080.4370.4110.4850.4280.4620.4300.3940.4090.4550.4390.4160.415
Avg0.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh1960.3690.3910.3570.3810.3500.3820.3490.3790.4010.4020.3760.3920.3810.3880.4140.4040.6880.5570.4400.3930.4410.390
1920.4050.4130.3840.4040.3880.4120.3950.4130.4350.4210.4120.4130.4340.4150.4650.4340.6880.5600.4920.4260.5020.524
3360.4180.4230.4110.4340.4110.4300.4470.4530.4380.4340.4330.4280.4850.4450.5030.4560.6750.5630.5500.4620.5760.467
7200.4230.4410.4490.4770.4270.4550.4570.4620.4390.4540.4470.4440.6110.5100.5110.4810.6830.5850.8820.5910.8350.583
Avg0.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh2960.2830.3420.3050.3590.3020.3540.2920.3520.2970.3360.2940.3300.2960.3300.3150.3490.3420.3960.3080.3430.3200.345
1920.3400.3790.3510.3860.3640.3850.3470.3790.3680.3810.3650.3750.3610.3710.3880.3950.3540.4020.3840.3920.4060.399
3360.3660.4000.3910.4180.4170.4250.4060.4190.3700.3930.3760.3900.3900.3900.4220.4270.3560.4070.4290.4300.4920.453
7200.3970.4310.4190.4540.5370.4960.4390.4470.4110.4260.4160.4330.4230.4180.4430.4540.3950.4340.5010.4770.6030.511
Avg0.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL960.1410.237
1920.1590.254
3360.1770.272
7200.2190.308
Avg0.1740.278
Weather960.1710.225
1920.2210.271
3360.2740.311
7200.3560.370
Avg0.2560.294
\\( 1^stCount \\)1510213010700051100120002
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.198, + 0.584, + 0.721, + 0.595 + ], + "angle": 0, + "content": "* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-)." + }, + { + "type": "table_footnote", + "bbox": [ + 0.199, + 0.595, + 0.76, + 0.606 + ], + "angle": 0, + "content": "* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here." + }, + { + "type": "table_footnote", + "bbox": [ + 0.199, + 0.606, + 0.642, + 0.618 + ], + "angle": 0, + "content": "* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.584, + 0.76, + 0.618 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.705, + 0.825, + 0.748 + ], + "angle": 0, + "content": "Table 14: Embedding ablation in TimeAttention. For the temporal dimension, we compare prevalent relative and absolute position embeddings. As for the variable dimension, we explore the effectiveness of the variable embedding that distinguishes endogenous and exogenous variables." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.753, + 0.833, + 0.888 + ], + "angle": 0, + "content": "
DesignTemporalVariableTrafficWeatherSolar-EnergyERA5-MS
MSEMAEMSEMAEMSEMAEMSEMAE
Timer-XLRoPE (2024)with0.3400.2380.1570.2050.1620.2210.1640.307
ReplaceALiBi (2021)with0.3510.2460.1620.2120.1880.2100.1670.308
Relative (2020)with0.3610.2500.1630.2140.1970.2150.1680.309
Absolute (2017)with0.3810.2700.1590.2070.1710.2040.1650.306
w/oRoPE (2024)w/o0.3610.2540.1710.2170.1810.2210.2350.373
w/ow/o0.3630.2530.1640.2150.1940.2150.1670.309
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.104, + 0.506, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.113, + 0.828, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.226, + 0.508, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.235, + 0.826, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.35, + 0.774, + 0.365 + ], + "angle": 0, + "content": "Figure 11: Case studies of learned attention in encoder-/decoder-only Transformers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.825, + 0.452 + ], + "angle": 0, + "content": "degradation of PatchTST. Similar to the observations in Figure 3, the encoder-only architecture produces inferior predictions after thousands of time points, which can be concealed due to the short context adopted in previous benchmarks. Although PatchTST has conducted an initial exploration in the context of hundreds of time points, it inappropriately works in ever-long contexts. Therefore, we believe that context bottlenecks deserve further exploration in this community." + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.474, + 0.825, + 0.491 + ], + "angle": 0, + "content": "Table 15: Performance on ERA5 (pred-1day). Lookback lengths vary from daily to yearly contexts." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.494, + 0.827, + 0.651 + ], + "angle": 0, + "content": "
ModelsTimer-XLPatchTSTDLinear
MetricMSEMAEMSEMAEMSEMAE
Lookback-8 (1 Day)0.08470.21000.08970.21960.09700.2276
Lookback-32 (4 Day)0.07130.19280.07780.20800.08410.2113
Lookback-56 (1 Week)0.06880.18910.07850.20820.08140.2081
Lookback-224 (1 Month)0.06750.18680.07450.20420.07880.2048
Lookback-960 (4 Month)0.06670.18630.11940.26960.07730.2031
Lookback-2944 (1 Year)0.06630.18570.11090.26380.07630.2024
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.825, + 0.742 + ], + "angle": 0, + "content": "Representation Analysis We further delve into long-context modeling from the perspective of learned representations. As shown in Figure 11, the decoder-only model can selectively focus on the previous context while PatchTST wrongly focuses on noisy parts. Since causality is the basis of forecasting, using causal masks leads to coherent token embeddings, while the unmasked attention mechanism may break the causality and prevent the model from telling each tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.756, + 0.827, + 0.827 + ], + "angle": 0, + "content": "Normalization Section 4.1 has discussed instance normalization (Kim et al., 2021). It generally improves the performance of the previous encoder-only Transformers but leads to special problems in decoder-only Transformers (e.g., unmatched statistics in multi-step autoregression). However, it is indicative that Timer-XL without ReVIN can achieve competitive performance on well-acknowledged benchmarks in Table 16, while the performance of PatchTST may heavily rely on this normalization." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.843, + 0.466, + 0.857 + ], + "angle": 0, + "content": "E.5 ILLUSTRATION OF TIMEATTENTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Although the formulation to generalize from 1D sequences to multivariate time series is straightforward, Timer-XL is built on a decoder-only Transformer, an underexploited backbone among current time series models. As shown in Figure 12, challenges lie in capturing fine-grained dependencies between all variables in the patch level, while maintaining temporal causality in multiple sequences." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.181, + 0.113, + 0.816, + 0.129 + ], + "angle": 0, + "content": "Table 16: Evaluations (672-pred-96) on the effect of ReVIN (Kim et al., 2021) on Transformers." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.132, + 0.825, + 0.227 + ], + "angle": 0, + "content": "
ModelsTimer-XL with ReVINTimer-XL w/o ReVINPatchTST with ReVINPatchTST w/o ReVIN
MetricMSE | MAEMSE | MAEMSE | MAEMSE | MAE
ETTh10.364 | 0.3970.370 | 0.4010.370 | 0.3990.421 | 0.448
Weather0.157 | 0.2050.151 | 0.2050.149 | 0.1980.173 | 0.242
ECL0.127 | 0.2190.130 | 0.2250.129 | 0.2220.138 | 0.244
" + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.242, + 0.332, + 0.256 + ], + "angle": 0, + "content": "(a) Univariate" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.259, + 0.403, + 0.463 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.242, + 0.684, + 0.256 + ], + "angle": 0, + "content": "(b) Multivariate" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.259, + 0.648, + 0.463 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.26, + 0.826, + 0.464 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.192, + 0.473, + 0.804, + 0.489 + ], + "angle": 0, + "content": "Figure 12: Illustration of TimeAttention for modeling univariate and multivariate time series." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.505, + 0.827, + 0.562 + ], + "angle": 0, + "content": "Technically, we introduce the masking formulation, whose key lies in the grouped causality of flattened 2D sequences. We derive it based on the Kronecker Product, which disentangles the large attention map into formalizable temporal and variable dependencies. It can be naturally extended to covariates or pre-defined variable dependencies, which may inspire a lot of future explorations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.582, + 0.319, + 0.597 + ], + "angle": 0, + "content": "F LIMITATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.613, + 0.828, + 0.74 + ], + "angle": 0, + "content": "Timer-XL is a unified model for time series forecasting. It can be used for task-specific training or scalable pre-training, handling varying-length and multivariate time series. As an autoregressive model, Timer-XL necessitates iterative generation for long-term forecasting, which may lead to error accumulation and inflexibility in the output length. In the future, we plan to incorporate multi-resolution patches for input and output series. Furthermore, given that Timer-XL explicitly captures fine-grained token dependencies, there remains significant potential to reduce the complexity of TimeAttention, particularly in high-dimensional and lengthy time series. Finally, we will investigate the factors contributing to the stagnation of Transformer performance in extremely long contexts, and seek insights in the time series modality to improve context efficiency." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ] +] \ No newline at end of file diff --git a/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_origin.pdf b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..42c4ec70b2ee33d675bd7363f2f97c7f23b58c86 --- /dev/null +++ b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/1000abc3-3f82-4c7b-a0aa-1b66e4569e7b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72063f7bd029c41ece5f644e1c1a854af41d2402bab1e85bf549f3e5bd178b59 +size 3991391 diff --git a/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/full.md b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f70c2ad517b3121c85613df8f60e65e3053515b3 --- /dev/null +++ b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/full.md @@ -0,0 +1,582 @@ +# TIMER-XL: LONG-CONTEXT TRANSFORMERS FOR UNIFIED TIME SERIES FORECASTING + +Yong Liu\*, Guo Qin\*, Xiangdong Huang, Jianmin Wang, Mingsheng Long\* +School of Software, BNrist, Tsinghua University, Beijing 100084, China +{liuyong21, qinguo24}@ mails.tsinghua.edu.cn +{huangxdong, jimwang, mingsheng}@tsinghua.edu.cn + +# ABSTRACT + +We present Timer-XL, a causal Transformer for unified time series forecasting. To uniformly predict multidimensional time series, we generalize next token prediction, predominantly adopted for 1D token sequences, to multivariate next token prediction. The paradigm formulates various forecasting tasks as a long-context prediction problem. We opt for decoder-only Transformers that capture causal dependencies from varying-length contexts for unified forecasting, making predictions on non-stationary univariate time series, multivariate series with complicated dynamics and correlations, as well as covariate-informed contexts that include exogenous variables. Technically, we propose a universal TimeAttention to capture fine-grained intra- and inter-series dependencies of flattened time series tokens (patches), which is further enhanced by deft position embedding for temporal causality and variable equivalence. Timer-XL achieves state-of-the-art performance across task-specific forecasting benchmarks through a unified approach. Based on large-scale pre-training, Timer-XL achieves state-of-the-art zero-shot performance, making it a promising architecture for pre-trained time series models. Code is available at this repository: https://github.com/thuml/Timer-XL. + +# 1 INTRODUCTION + +Transformers have been extensively applied to time series forecasting, becoming the backbone of task-specific models (Zhou et al., 2021; Wu et al., 2021) and pre-trained models (Das et al., 2023). While the majority of prior works have focused on long-term forecasting, reliable predictions are made by considering endogenous variations and exogenous correlations in the context (Box, 2013). Besides, the context length of pre-trained Transformers determines the maximum input and output length during inference. Therefore, long-context Transformers are more versatile than shorter ones, facilitating long-sequence and high-resolution generation (Yin et al., 2023; Wang et al., 2024a). + +However, existing Transformers in the time series field crucially encounter the context bottleneck. As shown in Figure 1, unlike Transformers for natural language and vision that learn dependencies among thousands to millions of tokens (Kirillov et al., 2023; OpenAI, 2023), time-series Transformers typically operate around limited contexts of up to hundreds of time series tokens (patches) (Nie et al., 2022). For univariate forecasting, a short-context input leads to insufficient learning of global tendencies, struggling to address non-stationarity in real-world time series (Hyndman, 2018). For multivariate forecasting, increasing research has demonstrated the effectiveness of explicitly capturing intra- and inter-channel dependencies (Zhang & Yan, 2022; Liu et al., 2023; 2024a), highlighting the practical urgency of extending the context length to encompass inter-correlated time series. + +Recently, causal Transformers characterized by the decoder-only architecture have become a predominant choice of large language models (Zhao et al., 2023) and garnered increasing attention in the development of large time series models (Rasul et al., 2023; Ansari et al., 2024). Based on contextual flexibility and autoregressive next token prediction, one model can accommodate varying lookback and prediction lengths (Liu et al., 2024b). Therefore, pre-training on longer contexts not only empowers them with the fundamental capability to incorporate more contextual information but + +![](images/0a23332ffe30348e29683222f624e59a9642b12c38b219e0830c57188039601e.jpg) +Figure 1: We compare the context length (measured by token number) of Transformers in different modalities and propose Timer-XL that increases the length to thousands of patch tokens. Given the generality across contexts, Timer-XL is a versatile solution for various forecasting tasks. + +![](images/be99a7a17398a5e28835a3acb6957e447d5b6e901b66ad0a133862b2f5676c6a.jpg) + +also enhances the model versatility toward a one-for-all foundation model. Regarding any-variate and any-length time series as one context, previous work (Liu et al., 2024a) has achieved unified modeling on flattened tokens based on noncausal Transformers. However, our empirical results (Figure 3) reveal that encoder-only forecasters may encounter performance degradation in long-context forecasting, while decoder-only Transformers can mitigate this degradation well. + +In this work, we generalize the training objective of language modeling to multivariate next token prediction, achieving unified time series forecasting that covers tasks in Figure 1 (right). Based on the decoder-only architecture, we propose TimeAttention to facilitate Transformers on multidimensional time series, presenting Kronecker-based masking mechanism to train time-series Transformers in a channel-dependent approach. With specialized position embedding for multivariate series, TimeAttention is aware of the chronological order of time points and achieves permutation-equivalence (Zaheer et al., 2017) on variables. We enlarge the context to thousands of patch tokens and achieve state-of-the-art on univariate, multivariate, and covariate-informed forecasting benchmarks. By pre-training on large-scale datasets, we present Timer-XL as an extra long version of pre-trained time-series Transformers (Timer) (Liu et al., 2024c), which outperforms recent large models in zero-shot forecasting. Our contributions lie in three aspects: + +- We propose multivariate next token prediction and unified time series forecasting, strengthening Transformers with enlarged contexts to make information-complete predictions. +- We introduce TimeAttention, a novel causal self-attention tailored for multidimensional time series, facilitating intra- and inter-series modeling with positional awareness and maintaining causality and scalability of Transformers. +- We propose Timer-XL, a versatile Transformer for one-for-all forecasting, which mitigates performance degradation in long-context time series, achieves state-of-the-art performance in task-specific benchmarks, and presents notable zero-shot performance by pre-training. + +# 2 RELATED WORK + +Transformers (Vaswani et al., 2017) for time series forecasting have undergone rapid advancements. Initial Transformer-based forecasters primarily focused on long-term forecasting (Li et al., 2019; Zhou et al., 2021; Wu et al., 2021; Sun & Zhang, 2024). However, the context length is not growing in pace, which hinders Transformers from making information-complete predictions. Another advancement has focused on multivariate forecasting. Unlike natural language, time series are multidimensional and inherently correlated (Hyndman, 2018). To learn intra- and inter-series dependencies, different tokenization of time-series Transformers has been proposed, including point-wise (Lim et al., 2021), patch-wise (Nie et al., 2022), and variable-wise (Liu et al., 2023) approaches, with deftly tailored architectures (Zhang & Yan, 2022; Wang et al., 2024b). However, few works highlight that multidimensional time series can be uniformly tackled by long-context Transformers without architectural + +modification. In this work, we leverage causal Transformers, which excel at handling long-context sequences, and unify time series forecasting tasks into multivariate next token prediction. + +Recently, time-series Transformers have experienced the evolution from small task-specific models to pre-trained large models (Das et al., 2023; Woo et al., 2024; Ansari et al., 2024). Among them, decoder-only Transformer is predominantly adopted as the backbone of large language models (Touvron et al., 2023; OpenAI, 2023), positioning as a scalable choice for general time series analysis (Liu et al., 2024c). By independently predicting each token with supervision, decoder-only models are also multi-length forecasters (Liu et al., 2024b), avoiding resource-intensive training and lookback-search. However, existing decoder-only Transformers are generally pre-trained in a channel-independent approach, making them inaccessible to inter-series dependencies. + +Prior work has employed encoder-only Transformers to capture dependencies of multivariate time series (Liu et al., 2024a). However, our empirical study found that this architecture can be incompatible with causal forecasting, limiting the performance of Transformers. To implement next token prediction and multivariate forecasting in a single Transformer, we renovate the attention module, which disentangles fine-grained token dependencies into variable dependencies and temporal causal masks, capturing intra- and inter-series dependencies with causality and scalability maintained. In Table 1, we list representative time-series Transformers and highlight their differences. + +Table 1: Comparison among representative time-series Transformers. + +
ModelPatchTST (2022)iTTrans. (2023)TimeXer (2024b)UniTST (2024a)Moirai (2024)Timer (2024c)Timer-XL (Ours)
Intra-Series
Inter-Series
Causal Trm.
Pre-Trained
+ +# 3 APPROACH + +In this section, we first introduce a decoder-only Transformer to illustrate the procedure of next token prediction on univariate time series. As an extension, we design TimeAttention and propose Timer-XL for unified time series forecasting. It is applicable to univariate, multivariate, and covariate-informed scenarios by generalizing the context from 1D sequences to 2D time series. + +# 3.1 TIMER + +Timer (Liu et al., 2024c) is a time-series Transformer trained by next token prediction (Bengio et al., 2000), which regards single-dimensional time series as non-overlapping patch tokens. + +Next Token Prediction Given an univariate time series $\mathbf{X} = \{x_{1},\dots ,x_{TP}\}$ of length $TP$ , a time series token is defined as $P$ consecutive time points, also termed as the patch token: + +$$ +\mathbf {x} _ {i} = \left\{x _ {(i - 1) P + 1}, \dots , x _ {i P} \right\} \in \mathbb {R} ^ {P}, i = 1, \dots , T. \tag {1} +$$ + +The training objective is to independently predict the next patch token to maximize the likelihood: + +$$ +P (\mathbf {X}) = \prod_ {i = 1} ^ {T} p \left(\mathbf {x} _ {i + 1} \mid \mathbf {x} _ {\leq i}\right), \tag {2} +$$ + +which is realized by a decoder-only architecture with the block number $L$ and model dimension $D$ : + +$$ +\mathbf {h} _ {i} ^ {0} = \mathbf {W} _ {e} \mathbf {x} _ {i}, i = 1, \dots , T, +$$ + +$$ +\mathbf {H} ^ {l} = \operatorname {T r m B l o c k} \left(\mathbf {H} ^ {l - 1}\right), l = 1, \dots , L, \tag {3} +$$ + +$$ +\{\hat {\mathbf {x}} _ {i + 1} \} = \mathbf {H} ^ {L} \mathbf {W} _ {d}, i = 1, \dots , T. +$$ + +For simplicity, we omit the block index $l$ . Timer adopts $\mathbf{W}_e$ , $\mathbf{W}_d \in \mathbb{R}^{D \times P}$ that independently embed and project the token embeddings as $\mathbf{H} = \{\mathbf{h}_i\} \in \mathbb{R}^{T \times D}$ . TrmBlock includes feed-forward network and self-attention with the temporal causal mask $\mathcal{T} \in \mathbb{R}^{T \times T}$ . $\mathbf{h}_i \in \mathbb{R}^D$ is the context representation of the previous $i$ tokens. All predicted $\hat{\mathbf{x}}_{i+1}$ are supervised with ground truth via MSE loss. + +# 3.2 GENERALIZE 1D SEQUENCES TO 2D TIME SERIES + +For the enlarged context with the additional dimension, our proposed attention mechanism aims to (1) thoroughly capture intra- and inter-series dependencies and (2) preserve causality within the temporal dimension. Without loss of generality, we illustrate this with the case of multivariate forecasting. + +Multivariate Next Token Prediction Given a multivariate time series $\mathbf{X} \in \mathbb{R}^{N \times TP}$ with the number of variables $N$ , the time series token $\mathbf{x}_{m,i}$ is defined as the $i$ -th patch of the $m$ -th variable: + +$$ +\mathbf {x} _ {m, i} = \left\{\mathbf {X} _ {m, (i - 1) P + 1}, \dots , \mathbf {X} _ {m, i P} \right\} \in \mathbb {R} ^ {P}, m = 1, \dots , N, i = 1, \dots , T. \tag {4} +$$ + +The training objective is still to independently predict the next token. Unlike before, each prediction is made based on tokens of the previous time $(\leq i)$ from all $N$ variables: + +$$ +P (\mathbf {X}) = \prod_ {m = 1} ^ {N} \prod_ {i = 1} ^ {T} p \left(\mathbf {x} _ {m, i + 1} \mid \mathbf {x} _ {:, \leq i}\right) = \prod_ {m = 1} ^ {N} \prod_ {i = 1} ^ {T} p \left(\mathbf {x} _ {m, i + 1} \mid \mathbf {x} _ {1, \leq i}, \dots , \mathbf {x} _ {N, \leq i}\right). \tag {5} +$$ + +Compared with Equation 2, the multivariate context length increases from $T$ to $NT$ . By contrast, the benefit is that this paradigm learns causal dependencies within each sequence while incorporating exogenous variable correlations from other sequences, making it a universal forecasting paradigm that outperforms channel-independent (Nie et al., 2022) or variable-centric models (Liu et al., 2023). + +Technically, we independently apply $\mathbf{W}_e\in \mathbb{R}^{D\times P}$ on each token to obtain patch-wise representation $\mathbf{h}_{m,i}\in \mathbb{R}^D$ , which will encompass contextual information from $N_i$ tokens through Transformer blocks and be eventually projected by $\mathbf{W}_d\in \mathbb{R}^{D\times P}$ into the predicted patch token $\hat{\mathbf{x}}_{m,i + 1}$ . + +Position Embedding Position embedding has not been sufficiently explored in time-series Transformers. To avoid inherent permutation-invariance of self-attention, positional embedding is required to reflect the chronological order of tokens on the temporal dimension. As for the variable dimension, shuffling the input order of variables should not affect anything other than the output order of variables. Formally, the processing on multiple variables should be permutation-equivalent (Zaheer et al., 2017). + +To meet the above requirements, we adopt RoPE (Su et al., 2024), a widely utilized position embedding on the temporal dimension. For the variable dimension, we use two learnable scalars in each head to keep the permutation-equivalence of variables (Woo et al., 2024). Beyond simply incorporating them together, we provide detailed ablations in Section E.3 to demonstrate the effectiveness: + +$$ +\mathcal {A} _ {m n, i j} = \mathbf {h} _ {m, i} ^ {\top} \mathbf {W} _ {q} \mathbf {R} _ {\theta , i - j} \mathbf {W} _ {k} ^ {\top} \mathbf {h} _ {n, j} + u \cdot \mathbb {1} (m = n) + v \cdot \mathbb {1} (m \neq n), \tag {6} +$$ + +where $\mathbf{W}_q, \mathbf{W}_k, \mathbf{W}_v \in \mathbb{R}^{D \times d_k}$ and $d_k$ is the dimension of the query, key, and value. $\mathbf{R}_{\theta,t} \in \mathbb{R}^{d_k \times d_k}$ is the rotary matrix with rotation degree $t \cdot \theta$ , $\mathbb{1}(\cdot)$ is the indicator function, and $u, v \in \mathbb{R}$ are learnable parameters for the token to distinguish its endogenous and exogenous time series. + +TimeAttention In contrast to variable-wise (Liu et al., 2023) and non-causal patch-wise tokens (Nie et al., 2022; Woo et al., 2024), our TimeAttention aims to capture causal patch-wise dependencies within and among all variables. Concretely, we sort patch tokens by flattening their 2D indices into 1D indices in the temporal-first manner, which is illustrated in the upper left of Figure 2. Note that the order of variables does not matter, since Equation 6 guarantees their permutation-equivalence. + +We provide an intuitive example to illustrate the causal dependencies within multivariate time series: considering the 2nd token of time series A. To predict its next token, its representation h should be exactly dependent on the tokens-{1,2,4,5}. Similarly, we provide all causal dependencies of each token in Figure 12. Based on the visualized attention mask and variable dependencies presented in Figure 2, where all variables are inter-correlated, all token dependencies in $\mathcal{A}$ can be formally disentangled by the Kronecker product into (1) the adjacency matrix of the variable dependency graph $\mathcal{C} \in \mathbb{R}^{N \times N}$ and (2) the causal temporal mask $\mathcal{T} \in \mathbb{R}^{T \times T}$ : + +$$ +\mathcal {T} _ {i, j} = \left\{ \begin{array}{l l} 1 & \text {i f} j \leq i, \\ 0 & \text {o t h e r w i s e ,} \end{array} \right. \mathcal {C} _ {m, n} = \left\{ \begin{array}{l l} 1 & \text {i f v a r i a b l e} m \text {i s d e p e n d e n t o n} n, \\ 0 & \text {o t h e r w i s e .} \end{array} \right. \tag {7} +$$ + +Let the Kronecker product $\otimes : (\mathbb{R}^{N \times N}, \mathbb{R}^{T \times T}) \mapsto \mathbb{R}^{NT \times NT}$ take two matrices and produce a block matrix. Consequently, TimeAttention is formulated as follows: + +$$ +\operatorname {T i m e A t t e n t i o n} (\mathbf {H}) = \operatorname {S o f t m a x} \left(\frac {\operatorname {M a s k} (\mathcal {C} \otimes \mathcal {T}) + \mathcal {A}}{\sqrt {d _ {k}}}\right) \mathbf {H} \mathbf {W} _ {v}, \operatorname {M a s k} (\mathcal {M}) = \left\{ \begin{array}{l l} 0 & \text {i f} \mathcal {M} _ {i, j} = 1, \\ - \infty & \text {i f} \mathcal {M} _ {i, j} = 0. \end{array} \right. \tag {8} +$$ + +![](images/5d7910511fbc43aba8665601afd2df9d133202af450ca9b630a9f2b1ffa9530a.jpg) +Figure 2: Illustration of TimeAttention. For univariate series, temporal mask $\mathcal{T}$ keeps the causality. Given multivariate patch tokens sorted in a temporal-first order, we adopt the variable dependencies $\mathcal{C}$ , an all-one matrix, as the left-operand of Kronecker product, expanding temporal mask to a block matrix, which exactly reflects dependencies of multivariate next token prediction. The formulation is also generalizable to univariate and covariate-informed contexts with pre-defined variable dependency. + +Eventually, token representations in $\mathbf{H} = \{\mathbf{h}_{m,i}\} \in \mathbb{R}^{NT\times D}$ will be independently processed by feed-forward network and layer normalization, and fed into the next Transformer block. + +Unified Time Series Forecasting In multivariate forecasting, the variable dependency forms the complete graph, presenting an all-one matrix $\mathcal{C}$ . By generalizing TimeAttention on multiple sequences, Transformers can leverage its length-flexibility to encompass relevant covariates as well. In this case, Timer-XL is adapted in two steps: (1) formulate the customized variable dependency as $\mathcal{C}$ and (2) optimize the model using the supervision of target variables. An example (target- $A$ -covariate- $B$ ) of TimeAttention is illustrated on the right of Figure 2. In a nutshell, we adopt position embeddings for the temporal and variable dimensions. To achieve unified time series forecasting, we flatten 2D time series into a unified context and capture fine-grained causal token dependencies. + +# 4 EXPERIMENTS + +We conduct evaluations of Timer-XL in three aspects, including (1) supervised training as a task-specific forecaster, (2) large-scale pre-training as a zero-shot forecaster, and (3) assessing the effectiveness of TimeAttention and model efficiency. Given that the long-context forecasting paradigm receives less attention in the community, which can be concealed due to the performance saturation on previous benchmarks (Makridakis et al., 2020; Wu et al., 2022), we established new long-context forecasting benchmarks. Detailed experimental configurations are provided in Appendix B. + +# 4.1 UNIVARIATE TIME SERIES FORECASTING + +**Setup** Due to the insufficient dataset length when extending contexts in univariate datasets (Makridakis et al., 2020), we adopt multivariate datasets from Liu et al. (2023). Although these datasets are originally multivariate, they aim to be predicted in a univariate approach with the implementation of channel independence. Different from the previous long-term forecasting setting, we focus on reliable prediction based on a long context. Therefore, we fix the prediction horizon and increase the lookback length to monthly and yearly levels. We also establish a long-context univariate benchmark based on the challenging 40-year ECMWF Reanalysis v5 dataset (Hersbach et al., 2020), where yearly contexts are adopted to predict the land-surface temperature of a single site (ERA5-S). + +Results As shown in Figure 3, the accuracy of univariate prediction can generally be improved by extending the daily context to monthly. We draw a similar conclusion on ERA5 (Table 15), where extending the context consistently helps in the specific model architecture. Notably, Timer-XL with + +![](images/79fb7a53377762632a2d1083e3b63f76c73377f78a711bb58b721826bb32661e.jpg) + +![](images/a9b005998cd76436a1c88c4b9d2a8d95352de906af3c71c720b28e89a67c1a9e.jpg) + +![](images/d100c225ac9464f9239272523ac5c6506ac7f3812e369816cdc3d45165ebed62.jpg) +Figure 3: Univariate forecasting (pred-96) of well-acknowledged benchmarks under channel independence (Nie et al., 2022). We increase the lookback length to encompass monthly and yearly contexts. + +![](images/c92323559b403877fc1b283efc799de5a0b7712866bb329b6d210cb0b798ac4b.jpg) + +decoder-only architecture outperforms encoder-only Transformer and linear forecaster in excessively long contexts. Further, we conduct representation analysis in Appendix E.4, revealing that Timer-XL is proficient at adaptively selecting information in vast observations and thus achieves breakthrough performance. It is also noteworthy that the performance of monthly and yearly contexts improves slowly and deteriorates, which may stem from increased noise and training difficulty inherent in data, which leaves a future direction to improve the context efficiency. Table 2 provides results on ERA5-S. Timer-XL consistently outperforms PatchTST on all sites, which can be credited to the maintenance of causality and token-wise supervision in the decoder-only architecture. + +Non-stationary Forecasting We delve into widespread non-stationarity in univariate tasks. It is commonly tackled by normalization (Kim et al., 2021) that greatly improves Transformer performance in previous benchmarks. However, we find it may be caused by the insufficient time span and training samples in these datasets. While normalization simplifies learning by aligning series with different means and variances to the same distribution, it limits the model capacity of Transformers, preventing them from learning variations among windows. The by-product can be mode collapse and oversmooth predictions. In Table 2 and Table 16, we evaluate the performance on ERA5 and datasets from Wu et al. (2022), which validates that Timer-XL can achieve better results even without instance normalization. + +Table 2: Univariate forecasting (input-3072-pred-96) of ERA5-S, encompassing 117k time points in each station (40-years). We evaluate PatchTST and Timer-XL with and without normalization (Kim et al., 2021). +Norm. indicates using the normalization. We train one model for each site separately. + +
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTST0.07910.2210.1890.3270.2770.4150.1860.3340.2660.4070.09400.2380.1370.2890.1750.319
+ Norm.0.07970.2200.1910.3230.2810.4190.1840.3340.2720.4110.09140.2330.1360.2870.1760.319
Timer-XL0.07390.2100.1790.3160.2620.4040.1820.3270.2540.3990.09010.2290.1340.2820.1680.310
+ Norm.0.07420.2100.1830.3170.2780.4180.1810.3300.2640.4070.08960.2270.1330.2810.1720.313
+ +# 4.2 MULTIVARIATE TIME SERIES FORECASTING + +**Setup** We follow iTransformer (Liu et al., 2023) to evaluate multivariate forecasting performance. Toward a one-for-all forecaster, we evaluate performance of rolling forecast, that is, we trained one model for all prediction horizons by integrating the previous prediction into the lookback window in the next iteration. We further establish long-context multivariate forecasting benchmarks: ERA5 multi-station land-surface temperature prediction (ERA5-MS), and the global temperature and wind speed forecasting challenge (GTWSF) (Wu et al., 2023), to learn complex temporal dynamics and variable correlations with sufficient training samples. + +Results As shown in Tables 3-4 and Figure 4, Timer-XL achieves the best results on both previous and new benchmarks. Essentially, Transformers that explicitly capture inter-series dependencies, such as UniTST (Liu et al., 2024a) and iTransformer, reasonably achieve decent performance in Table 3. Beyond iTransformer, Timer-XL can model fine-grained patch-wise temporal dependencies. With + +TimeAttention, Timer-XL outperforms Timer especially on high-dimensional time series (13.2% in ECL and 6.3% in Traffic, with thousands of tokens in the context). Compared with the encoder-only UniTST, decoder-only Transformers excel at generalizing across varying prediction lengths in Table 4. + +![](images/7d45ba03ee83b18b2f20d6f15575a4d2df1639ef3ffab172a100980176335e37.jpg) +Figure 4: Multivariate forecasting of GTWSF (2-day-pred-1-day), involving 3850 worldwide stations spanning two years. Results of the baseline models are officially reported by Ding et al. (2024). + +Table 3: Multivariate forecasting (96-pred-96) of well-acknowledged benchmarks. All models are trained from scratch. Results of baseline models are officially reported by Liu et al. (2023). + +
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1380.2330.1590.2440.1390.2350.1480.2400.1970.2820.1810.2700.1680.2720.1690.2730.2010.317
ETTh10.3810.3990.3860.4010.3850.4020.3860.4050.3860.4000.4140.4190.3840.4020.5130.4910.4490.459
Traffic0.3870.2600.4130.2650.3890.2650.3950.2680.6500.3960.4620.2950.5930.3210.6120.3380.6130.388
Weather0.1650.2090.1760.2150.1650.2100.1740.2140.1960.2550.1770.2180.1720.2200.1730.2230.2660.336
Solar-Energy0.2000.2290.2040.2340.2030.2320.2030.2370.2900.3780.2340.2860.2500.2920.2150.2490.8840.711
+ +Table 4: Multivariate forecasting (672-pred-{96, 192, 336, 720}) of well-acknowledged benchmarks. We evaluate one-for-all forecasters following Liu et al. (2024b): rolling forecasting for four forecast lengths with one model. Averaged results are reported here and full results are provided in Table 12. + +
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
ETTh10.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
Traffic0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
+ +Ablation Study Patching (Nie et al., 2022) has been demonstrated as an effective tokenization approach for time series, leading to the boom of Transformers in supervised deep forecasters and large time series models. To better cope with multivariate time series forecasting, we compared typical models on real-world benchmarks to address key questions: (1) whether to conduct explicit inter-series modeling or not (channel independence) and (2) whether to use decoder-only or encoder-only Transformers. The combination presents four Transformers in Table 5, which shows that Timer-XL combines the advantages of explicit inter-series modeling and the decoder-only architecture, which is suitable for multivariate time series forecasting with sufficient training samples. + +# 4.3 COVARIATE-INFORMED TIME SERIES FORECASTING + +**Setup** For the covariate-informed forecasting, we adopt the well-acknowledged electricity price forecasting (EPF) task (Lago et al., 2021). Each subset contains electricity price as the endogenous variable and two exogenous variables. Therefore, the variable dependency for Timer-XL is formulated + +Table 5: Multivariate forecasting (input-3072-pred-96) of ERA5-MS (40 years and 7 stations). We fairly evaluate Transformers that adopt patched time series. CI. indicates whether the Transformer uses channel independence (Nie et al., 2022). Arch. categorizes them into the encoder-only (E) and decoder-only (D) architectures. Different from ERA5-S in Table 2, we train one model for all sites. + +
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelCI.Arch.MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTSTYesE0.08150.2220.1900.3260.2750.4140.1850.3330.2650.4070.09770.2400.1390.2900.1760.319
UniTSTNoE0.07530.2130.1790.3180.2690.4100.1850.3300.2560.4010.09010.2300.1350.2840.1700.312
TimerYesD0.07340.2100.1820.3190.2680.4070.1830.3290.2550.3990.08770.2260.1320.2810.1690.310
Timer-XLNoD0.07360.2090.1740.3090.2630.4040.1820.3270.2520.3960.08720.2250.1300.2780.1660.307
+ +as $\mathcal{C} = [[1,1,1],[0,1,0],[0,0,1]]$ . To investigate whether to learn causal or noncausal patch-wise dependencies in covariates, we implement two versions of Timer-XL: the original one with temporal causal mask $\mathcal{T}$ , and the noncausal one with $\mathcal{T}$ replaced by an all-one matrix. + +Results As shown in Table 6, Timer-XL outperforms state-of-the-art models in covariate-informed tasks. Compared with TimeXer (Wang et al., 2024b), which treats an entire covariate as a token, Timer-XL learns fine-grained patch-wise dependencies. By the noncausal version of Timer-XL, we surprisingly find consistent conclusions with endogenous variables: results will be better if Timer-XL learns causal dependencies within exogenous variables. It again validates that next token prediction that maintains causality has a higher upper limit of performance. + +Table 6: Covariate-informed forecasting (168-pred-24) of EPF. We implement two versions of TimerXL: Noncausal indicates that we do not maintain the causality within covariates by replacing temporal causal mask with all-one matrix. Results of baselines are officially reported by Wang et al. (2024b). + +
ModelsTimer-XL (Ours)Timer-XL (Noncausal)TimeXer (2024b)iTransformer (2023)DLinear (2023)PatchTST (2022)Crossformer (2022)TimesNet (2022)Autoformer (2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
NP0.2340.2620.2370.2650.2380.2680.2650.3000.3090.3210.2670.2840.2450.2890.2500.2890.4020.398
PJM0.0890.1870.0920.1880.0880.1880.0970.1970.1080.2150.1060.2090.1490.1980.0970.1950.1680.267
BE0.3710.2430.4100.2790.3790.2430.3940.2700.4630.3130.4030.2640.4360.2940.4190.2880.5000.333
FR0.3810.2040.4060.2200.3840.2080.4390.2330.4290.2600.4110.2200.4400.2160.4310.2340.5190.295
DE0.4340.4150.4350.4150.4400.4180.4790.4430.5200.4630.4610.4320.5400.4230.5020.4460.6740.544
Average0.3020.2620.3160.2730.3060.2650.3350.2890.3660.3140.3300.2820.3620.2840.3400.2900.4530.368
+ +# 4.4 PRE-TRAINED TIME-SERIES TRANSFORMERS + +**Setup** Pre-training enriches time-series Transformers with generalizable forecasting capabilities. The outcome large time series model can cope with widespread challenges of few-shot and zero-shot forecasting. In this section, we conduct univariate pre-training on UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024) and evaluate zero-shot performance on benchmarks from Wu et al. (2022). We further conduct large-scale multivariate pre-training on our ERA5-Large dataset, which spans 40 years and encompasses 4920 stations. Subsequently, we evaluate three types of generalization results comparing PatchTST (encoder-only Transformer) and Timer-XL (decoder-only Transformer): pre-training on $80\%$ stations and $80\%$ time span and then forecast on the remaining stations (variable generalization), remaining time span (temporal generalization), and remaining split of time span and stations (variable and temporal generalization). To evaluate the benefit of pre-training with longer context, we compare the zero-shot performance of Timer (2024c) and Timer-XL, where the context length of pre-training is increased from 1440 to 2880. + +Results We compare generalization performance on ERA5-Large in the middle of Figure 5 (a). Timer-XL achieves better results than PatchTST in all cases, revealing that decoder-only architecture has stronger generalization capability. Figure 5 (b) compares zero-shot performance of two pretrained Transformers with different context lengths, where Timer-XL outperforms previous Timer on + +all benchmark datasets, validating that long-context pre-training enhances large time series models. In Table 7, we provide a comprehensive zero-shot evaluation under a comparable pre-training scale and model size, where Timer-XL achieves notable performance with better sample efficiency. The versatility and scalability make it a promising backbone of foundation models. + +![](images/f809df11e6c77a504c9d6611bd294faa583499ade1ad86527e9586aeba9856e7.jpg) +Figure 5: Illustration of one-for-all generalization (left). Based on the contextual flexibility, Timer-XL can predict heterogeneous time series, indicating three directions of generalization shown on the left. We compare performance when generalizing across the time and variables (middle), and zero-shot results across datasets (right), emphasizing the benefit of long-context pre-training. + +![](images/9bf8e911d60cef9234044b26c6892c245b20cac153d33c0d445242aa07ca1ca9.jpg) + +![](images/8d040fb43b89d37fe62e4580b2b8519c97cb6723bc57bc269df004eaa12a00a8.jpg) + +Table 7: Averaged results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. Corresponding prediction lengths include $\{96,192,336,720\}$ . Full results of all prediction lengths are provided in Table 13. $1^{\text{st}}$ Count represents the number of wins achieved by a model under all prediction lengths and datasets. The detailed configuration of Timer-XLBase is provided in Table 11. + +
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoEUltra(2024)MoiraiSmall(2024)MoiraiBase(2024)MoiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm10.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm20.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh10.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh20.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL0.1740.278------0.2180.3030.1870.2740.1860.270--0.7650.6860.2140.2780.2040.273
Weather0.2560.2940.2650.2970.2700.3000.2560.2880.2750.2860.2870.2810.2640.273--0.2940.3260.2920.3150.2790.306
\( 1^{st} \) Count15102130107000511001200002
+ +* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-). +* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here. +* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m. + +# 4.5 MODEL ANALYSIS + +Model Efficiency To evaluate the model efficiency of Timer-XL with respect to the context length, it is essential to recognize the distinct characteristics of time series data compared to 1D sequences. Unlike natural language, the time series modality is characterized by the variable number $N$ and the input length. We adopt two representative multivariate datasets with different $N$ , and provide the memory footprint and training speed under gradually prolonged input. We evaluate typical approaches to handle multivariate series: (1) Timer-XL and Moiria that adopt channel dependence; (2) Timer that adopts channel independence. Intuitively, the complexity of the first type is $\mathcal{O}(N^2 T^2)$ while the complexity of self-attention under channel independence is $\mathcal{O}(NT^2)$ . However, results shown in Figure 6 reveal that measured overheads of Timer-XL is much less than $N$ times of Timer. + +Since the previous analysis of model efficiency on time-series Transformer predominantly focuses on self-attention on 1D time series, we initially present a theoretical derivation of the computational complexity of Transformers on 2D time series, including the parameter counts, memory footprint, and FLOPs in Table 8. We find that other parts of Transformers, such as feed-forward network, have a complexity of $\mathcal{O}(NT)$ no matter which approach is adopted to handle multivariate time series. They also account for dominant overheads in existing benchmarks since the context length is not large enough, confirming our empirical results. Further, we introduce FlashAttention (Dao et al., 2022) to + +improve the model efficiency, which is computationally equivalent and reduces the overall memory footprint of Timer-XL to $\mathcal{O}(NT)$ without affecting performance. + +![](images/553584e7183e72b2a768d3fda64cf4138bca7bc170d557ad9865987ffb71461b.jpg) +Weather (21 Variables) + +![](images/4a2d17d81021b75adf6eab2fc8be89ebd6b9c6f957e6ade84aeff1196f10832b.jpg) +Weather (21 Variables) + +![](images/5ef83dc7f21995e29764acb949b98abb763fe9ee4fd5749b588be0ac111d733f.jpg) +ECL (321 Variables) +Figure 6: Efficiency analysis. We compare representative time-series Transformers on multivariate datasets with variable numbers ranging from ten to hundred and increase the lookback length. + +![](images/8ede19e9b3c61c84a7349767d6ba8a18094a3bb91cf8ce891ea014c63de2403a.jpg) +ECL (321 Variables) + +![](images/80e46e2ca1351ea7849005b58e526640e11d4b5e879f7a64bde7ae578895fec0.jpg) +Learned Attention +Figure 7: Visualization of TimeAttention. It is from the first sample of a length 672 in the test split of Traffic. We visualize the last 10 variables with each contains 7 tokens. We present auto-correlation function plot. Auto-correlation can be reflected by the distribution of attention scores (bottom right). We average TimeAttention across sub-blocks, which indicates Pearson correlations (upper right). + +![](images/3a06aff90573f0a821016699aae13f8670884469cca930d8e1961cdd607296d9.jpg) + +![](images/06321afc1ca0e3eb0b4cc245978620970bd5a8d172f6f7db5b451bbe63abb12b.jpg) +Sub-Block(3, 3) + +Representation Analysis In addition to the enhanced performance, fine-grained token dependencies offer improved interpretability. We present a showcase visualization from Traffic in Figure 7. It is observed that sub-matrices along the diagonal generally receive greater attention, which reasonably reveals predominant dependencies within the endogenous variable. By zooming in a sub-block that corresponds to Variable-3, we observe that the attention distribution of the last row can indicate certain strong dependencies among patch tokens. This observation is also supported by the auto-correlation function plot (ACF), which reveals auto-correlations with certain lags and thus the model pays special attention to these tokens. Furthermore, we average each sub-matrix into one scalar. The outcome matrix can also illustrate Pearson correlations presented in the raw data. + +# 5 CONCLUSION AND FUTURE WORK + +In this paper, we emphasize the efficacy of causal Transformers in the forecasting of long-context time series. To facilitate long-context Transformers on diverse tasks, we propose multivariate next token prediction, a novel paradigm to predict multidimensional series with covariates. We present Timer-XL enhanced by TimeAttention as an extra-long version of pre-trained time-series Transformers. It simultaneously captures temporal dynamics and variable correlations by enhanced self-attention. In addition to achieving state-of-the-art performance on extensive benchmarks, we establish challenging benchmarks for long-context forecasting. By pre-training on large-scale heterogeneous time series, Timer-XL demonstrates notable zero-shot performance as a large time-series model. In the future, we will improve computational efficiency and build large domain-specific models with Timer-XL. + +# ACKNOWLEDGMENTS + +This work was supported by the National Natural Science Foundation of China (U2342217 and 62021002), the BNRist Project, and the National Engineering Research Center for Big Data Software. + +# REFERENCES + +Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815, 2024. +Yoshua Bengio, Réjean Ducharme, and Pascal Vincent. A neural probabilistic language model. Advances in neural information processing systems, 13, 2000. +George Box. Box and jenkins: time series analysis, forecasting and control. In A Very British Affair: Six Britons and the Development of Time Series Analysis During the 20th Century, pp. 161-215. Springer, 2013. +Defu Cao, Yujing Wang, Juanyong Duan, Ce Zhang, Xia Zhu, Congrui Huang, Yunhai Tong, Bixiong Xu, Jing Bai, Jie Tong, et al. Spectral temporal graph neural network for multivariate time-series forecasting. Advances in neural information processing systems, 33:17766-17778, 2020. +Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems, 35:16344-16359, 2022. +Abhimanyu Das, Weihao Kong, Rajat Sen, and Yichen Zhou. A decoder-only foundation model for time-series forecasting. arXiv preprint arXiv:2310.10688, 2023. +Xiaohan Ding, Yiyuan Zhang, Yixiao Ge, Sijie Zhao, Lin Song, Xiangyu Yue, and Ying Shan. Unireplknet: A universal perception large-kernel convnet for audio video point cloud time-series and image recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5513-5524, 2024. +Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. arXiv preprint arXiv:2402.03885, 2024. +Hans Hersbach, Bill Bell, Paul Berrisford, Shoji Hirahara, András Horányi, Joaquín Muñoz-Sabater, Julien Nicolas, Carole Peubey, Raluca Radu, Dinand Schepers, et al. The era5 global reanalysis. Quarterly Journal of the Royal Meteorological Society, 146(730):1999-2049, 2020. +RJ Hyndman. Forecasting: principles and practice. OTexts, 2018. +Taesung Kim, Jinhee Kim, Yunwon Tae, Cheonbok Park, Jang-Ho Choi, and Jaegul Choo. Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations, 2021. +Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. +Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023. +Jesus Lago, Grzegorz Marcjasz, Bart De Schutter, and Rafal Weron. Forecasting day-ahead electricity prices: A review of state-of-the-art algorithms, best practices and an open-access benchmark. Applied Energy, 293:116983, 2021. +Guokun Lai, Wei-Cheng Chang, Yiming Yang, and Hanxiao Liu. Modeling long-and short-term temporal patterns with deep neural networks. In The 41st international ACM SIGIR conference on research & development in information retrieval, pp. 95-104, 2018. + +Shiyang Li, Xiaoyong Jin, Yao Xuan, Xiyou Zhou, Wenhu Chen, Yu-Xiang Wang, and Xifeng Yan. Enhancing the locality and breaking the memory bottleneck of transformer on time series forecasting. Advances in neural information processing systems, 32, 2019. +Bryan Lim, Sercan Ö Arık, Nicolas Loeff, and Tomas Pfister. Temporal fusion transformers for interpretable multi-horizon time series forecasting. International Journal of Forecasting, 37(4): 1748-1764, 2021. +Juncheng Liu, Chenghao Liu, Gerald Woo, Yiwei Wang, Bryan Hooi, Caiming Xiong, and Doyen Sahoo. Unitst: Effectively modeling inter-series and intra-series dependencies for multivariate time series forecasting. arXiv preprint arXiv:2406.04975, 2024a. +Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022a. +Shizhan Liu, Hang Yu, Cong Liao, Jianguo Li, Weiyao Lin, Alex X Liu, and Schahram Dustar. Pyraformer: Low-complexity pyramidal attention for long-range time series modeling and forecasting. In International conference on learning representations, 2021. +Yong Liu, Haixu Wu, Jianmin Wang, and Mingsheng Long. Non-stationary transformers: Exploring the stationarity in time series forecasting. Advances in Neural Information Processing Systems, 35: 9881-9893, 2022b. +Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. arXiv preprint arXiv:2310.06625, 2023. +Yong Liu, Guo Qin, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Autotimes: Autoregressive time series forecasters via large language models. arXiv preprint arXiv:2402.02370, 2024b. +Yong Liu, Haoran Zhang, Chenyu Li, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Timer: Generative pre-trained transformers are large time series models. In *Forty-first International Conference on Machine Learning*, 2024c. +Spyros Makridakis, Evangelos Spiliotis, and Vassilios Assimakopoulos. The m4 competition: 100,000 time series and 61 forecasting methods. International Journal of Forecasting, 36(1):54-74, 2020. +Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. arXiv preprint arXiv:2211.14730, 2022. +R OpenAI. Gpt-4 technical report. arxiv 2303.08774. View in Article, 2:13, 2023. +Boris N Oreshkin, Dmitri Carpov, Nicolas Chapados, and Yoshua Bengio. N-beats: Neural basis expansion analysis for interpretable time series forecasting. arXiv preprint arXiv:1905.10437, 2019. +Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. +PEMS. Traffic Dataset. http://pems.dot.ca.gov/. +Ofir Press, Noah A Smith, and Mike Lewis. Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409, 2021. +Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551, 2020. + +Kashif Rasul, Arjun Ashok, Andrew Robert Williams, Arian Khorasani, George Adamopoulos, Rishika Bhagwatkar, Marin Biloš, Hera Ghonia, Nadhir Vincent Hassen, Anderson Schneider, et al. Lag-llama: Towards foundation models for time series forecasting. arXiv preprint arXiv:2310.08278, 2023. +David Salinas, Valentin Flunkert, Jan Gasthaus, and Tim Januschowski. Deeper: Probabilistic forecasting with autoregressive recurrent networks. International journal of forecasting, 36(3): 1181-1191, 2020. +Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. Time-moe: Billion-scale time series foundation models with mixture of experts. arXiv preprint arXiv:2409.16040, 2024. +Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. +Huihui Sun and Xiaofeng Zhang. Study on coded permutation entropy of finite length gaussian white noise time series. Chinese Journal of Electronics, 33(1):185-194, 2024. +Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. +Xindi Wang, Mahsa Salmani, Parsa Omidi, Xiangyu Ren, Mehdi Rezagholizadeh, and Armaghan Eshaghi. Beyond the limits: A survey of techniques to extend the context length in large language models. arXiv preprint arXiv:2402.02244, 2024a. +Yuxuan Wang, Haixu Wu, Jiaxiang Dong, Yong Liu, Yunzhong Qiu, Haoran Zhang, Jianmin Wang, and Mingsheng Long. Timexer: Empowering transformers for time series forecasting with exogenous variables. arXiv preprint arXiv:2402.19072, 2024b. +Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Unified training of universal time series forecasting transformers. arXiv preprint arXiv:2402.02592, 2024. +Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. Advances in Neural Information Processing Systems, 34:22419-22430, 2021. +Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. arXiv preprint arXiv:2210.02186, 2022. +Haixu Wu, Hang Zhou, Mingsheng Long, and Jianmin Wang. Interpretable weather forecasting for worldwide stations with a unified deep model. Nature Machine Intelligence, 5(6):602-611, 2023. +Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023. +Manzil Zaheer, Satwik Kottur, Siamak Ravanbakhsh, Barnabas Poczos, Russ R Salakhutdinov, and Alexander J Smola. Deep sets. Advances in neural information processing systems, 30, 2017. +Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023. +Yunhao Zhang and Junchi Yan. Crossformer: Transformer utilizing cross-dimension dependency for multivariate time series forecasting. In The Eleventh International Conference on Learning Representations, 2022. + +Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. A survey of large language models. arXiv preprint arXiv:2303.18223, 2023. + +Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021. + +# A PROOF OF MODEL EFFICIENCY + +# A.1 SETUPS + +Given an input univariate time series divided into $T$ tokens according to the patch size $P$ , which is fed into the vanilla Transformer. The training objective is to predict the next token of $P$ time points. We will generalize the derivation from 1D sequences to 2D time series based on different approaches to handle multivariate data with the variable number $N$ . We adopt the same denotations as before: Transformer consists of $L$ blocks with model dimension $D$ . The multi-head attention mechanism has $H$ heads, each with a dimension of $d_{k}$ for query, key, and value, and $d_{k} = \frac{D}{H}$ . The intermediate dimension of feed-forward network is set as $D_{\mathrm{ff}} = \alpha D$ . The results are summarized in Table 8, we provide the detailed proof in the following sections. + +Table 8: Parameters count and computational complexity of Transformers for multivariate time series. + +
MetricTypeCountComplexity
FLOPs(Training Speed)Channel Independence12(PDNT + L(D + H)NT2 + (2 + α)LD2NT)O(LDNT(D + T))
Channel Dependence12(PDNT + L(D + H)N2T2 + (2 + α)LD2NT)O(LDNT(D + NT))
ParametersEncoder-Only(4 + 2α)LD2 + 4LD + (1 + T)PDO(LD2)
Decoder-Only(4 + 2α)LD2 + 4LD + 2PDO(LD2)
Memory FootprintSelf-Attention4(D + P)NT + (32 + 8α)LDNT + 4LHN2T2O(LHN2T2)
FlashAttention4(D + P)NT + (32 + 8α)LDNTO(LDNT)
+ +* $L$ is the block number of Transformers. $D$ is the dimension of embeddings (the hidden dimension of FFN $D_{\mathrm{ff}}$ is set as $\alpha D$ ). $H$ is the head number and the dimension of query, key, and value $d_k = D / H$ . The overhead is to train on a multivariate time series ( $N$ -variables and $TP$ time points) with patch token length $P$ and context length $T$ . Set $N = 1$ for training on univariate time series. + +# A.2 FLOPs + +As a preliminary, the multiplication between matrix $\mathbf{A} \in \mathbb{R}^{n \times m}$ and matrix $\mathbf{C} \in \mathbb{R}^{m \times p}$ requires $mnp$ multiplications and $mnp$ additions, resulting in $2mnp$ floating-point operations. Given batched matrices $\mathbf{A} \in \mathbb{R}^{B \times n \times m}$ and $\mathbf{C} \in \mathbb{R}^{B \times m \times p}$ , $B$ times matrix multiplications will be performed. It is evident that the batch size is a linear multiplier. Thus, we first omit $B$ to calculate the operations of dealing with one univariate series, and then we will reintroduce it to analyze channel independence. + +The computational cost of Transformers can be primarily categorized into two types: (1) multi-head attention calculation and (2) linear transformations. In contrast, the operations of layer normalization, residual connection, activation functions, and position embedding with the complexity of $\mathcal{O}(TD)$ are less significant. Therefore, we derive the computational complexity mainly with respect to the above two types by delving into the forwarding process of one univariate series. + +Patch Embedding The tokenized time series $\{\mathbf{x}_i\} \in \mathbb{R}^{T\times P}$ is mapped into the embedding space through the patch-wise embedding $\mathbf{W}_e\in \mathbb{R}^{D\times P}$ , resulting in $2PDT$ operations. + +Self-Attention The calculation of self-attention begins with the computation of query, key and value by multiplying the patch embeddings with matrices $\mathbf{W}_q$ , $\mathbf{W}_k$ , $\mathbf{W}_v \in \mathbb{R}^{D \times d_k}$ respectively in $H$ heads, which incurs a computational cost of $6HDd_kT = 6D^2T$ and yields $\mathbf{Q}$ , $\mathbf{K}$ , $\mathbf{V} \in \mathbb{R}^{H \times T \times d_k}$ . Next, the dot product $\mathbf{Q}\mathbf{K}^\top \in \mathbb{R}^{H \times T \times T}$ is conducted in each head, leading to $2Hd_kT^2 = 2DT^2$ operations. Following this, the Pre-Softmax map is divided by $\sqrt{d_k}$ and processed through Softmax, which includes exponentiation, summation, and normalization of each element, resulting in $4HT^2$ operations. The subsequent multiplication with $\mathbf{V}$ incurs $2Hd_kT^2 = 2DT^2$ operations. Finally, multiple heads are concatenated and multiplied by $\mathbf{W}_o \in \mathbb{R}^{D \times D}$ , contributing $2D^2T$ operations. + +Feed-Forward Network It first projects the token representations into the dimension of $D_{ff}$ and subsequently projects it back to the dimension $D$ , resulting in a total operation of $4\alpha D^2 T$ . + +Patch Projection For encoder-only models, all token representations are flattened and mapped directly to $P$ time points by $\mathbf{W}_d\in \mathbb{R}^{TD\times P}$ . In contrast, token-wise projector $\mathbf{W}_d\in \mathbb{R}^{D\times P}$ in decoder-only models independently map each token to the predicted next token. In both cases, the number of operations is $2PDT$ , but the token-wise projector will result in a smaller parameter count. + +The forwarding operations in $L$ -layers Transformer is $4PDT + 4L(D + H)T^2 + (8 + 4\alpha)LD^2 T$ in sum. Considering that the majority of operations in Transformers are binary operations (e.g., matrix multiplications), the gradients for both matrices are computed separately. As a result, the number of operations in backpropagation is the twice of forwarding. Therefore, the total operations of training a Transformer on a univariate series consisting of $T$ patches, each of length $P$ , is derived as: + +$$ +f (T) = 1 2 P D T + 1 2 L (D + H) T ^ {2} + (2 4 + 1 2 \alpha) L D ^ {2} T. +$$ + +We plug typical hyperparameters in the current time-series Transformers and forecasting benchmarks: $D = 512$ , $H = 8$ , $L = 4$ , $\alpha = 4$ , $T = 7$ , and $P = 96$ , we obtain that: + +$$ +f (T) = 2 4 9 6 0 T ^ {2} + 7 6 0 8 7 2 9 6 T \propto 3. 2 8 * 1 0 ^ {- 4} T ^ {2} + T. +$$ + +Due to the prevalence of short contexts in the time series field, where $T \ll D$ leads to a significant coefficient in $\mathcal{O}(T)$ , we find the primary computational burden of time-series Transformer lies in linear transformations with $\mathcal{O}(T)$ , rather than in multi-head self-attention with the $\mathcal{O}(T^2)$ complexity. + +For multivariate series with $N$ variables, FLOPs is influenced by the handling of multivariate data. When adopting channel independence (Timer and PatchTST), $N$ can be regarded as the batch size $B$ : + +$$ +N f (T) = 1 2 P D N T + 1 2 L (D + H) N T ^ {2} + (2 4 + 1 2 \alpha) L D ^ {2} N T. \tag {9} +$$ + +For models that capture fine-grained intra- and inter-series dependencies (Timer-XL and UniTST) in multivariate series, $N$ is reflected as the enlarged number of tokens: + +$$ +f (N T) = 1 2 P D N T + 1 2 L (D + H) N ^ {2} T ^ {2} + (2 4 + 1 2 \alpha) L D ^ {2} N T. \tag {10} +$$ + +Notably, FLOPs is not entirely equivalent to actual runtime. While FlashAttention increases the overall FLOPs due to its recomputation process, it reduces the number of memory reads and writes. Given that on GPUs, computation is significantly faster than memory access, using FlashAttention can actually lead to further improvements in runtime performance. + +# A.3 PARAMETER COUNT + +From the above analysis, we observe that the parameter count of Transformers includes the following: + +Patch Embedding $\mathbf{W}_e\in \mathbb{R}^{D\times P}$ to obtain patch embeddings. + +Self-Attention $\mathbf{W}_q, \mathbf{W}_k, \mathbf{W}_v \in \mathbb{R}^{D \times d_k}$ of $H$ heads and $\mathbf{W}_o \in \mathbb{R}^{D \times D}$ for all heads. + +Feed-Forward Network $\mathbf{W}_{\mathrm{ffn1}}, \mathbf{W}_{\mathrm{ffn2}} \in \mathbb{R}^{D \times D_{\mathrm{ff}}} \text{ in feed-forward network.}$ + +Layer Normalization It contains the weight $\mathbf{W} \in \mathbb{R}^D$ and the bias $\mathbf{b} \in \mathbb{R}^D$ . Every Transformer block includes two normalizations after multi-head attention and feed-forward network respectively. + +Patch Projection $\mathbf{W}_d\in \mathbb{R}^{TD\times P}$ in flatten head and $\mathbf{W}_d\in \mathbb{R}^{D\times P}$ in token-wise projection. + +In sum, the total count of parameters in time-series Transformers can be expressed as: + +$$ +\text {P a r a m e t e r C o u n t} = \left\{ \begin{array}{l l} (4 + 2 \alpha) L D ^ {2} + 4 L D + (1 + T) P D, & \text {u s i n g f l a t t e n h e a d}, \\ (4 + 2 \alpha) L D ^ {2} + 4 L D + 2 P D, & \text {u s i n g t o k e n - w i s e p r o j e c t i o n}. \end{array} \right. \tag {11} +$$ + +# A.4 MEMORY FOOTPRINT + +The memory footprint during training can be primarily categorized into three parts: activation values stored for backpropagation, model parameters, and optimizer parameters. + +Regardless of other precision types (e.g., FP16), model parameters and gradients are typically stored as 32-bit floating-point numbers, with each parameter occupying 4 bytes of memory. For time-series Transformers, memory footprint of activation values is given as follows: + +Patch Embedding Gradient computation for $\mathbf{W}_e$ preserves its input $\{\mathbf{x}_i\} \in \mathbb{R}^{T\times P}$ of $4PT$ bytes. + +Self-Attention Gradient calculation for $\mathbf{W}_q, \mathbf{W}_k, \mathbf{W}_v \in \mathbb{R}^{D \times d_k}$ requires their inputs $\mathbf{H} \in \mathbb{R}^{T \times D}$ , amounting to a total of $4DT$ bytes. The dot product for attention map also needs to store $\mathbf{Q}, \mathbf{K}, \mathbf{V} \in \mathbb{R}^{H \times T \times d_k}$ , which collectively require a total of $12DT$ bytes of memory. Gradient computation of $\mathbf{W}_o \in \mathbb{R}^{D \times D}$ necessitates the concatenated multi-head attention representations $\mathbf{H} \in \mathbb{R}^{T \times D}$ , which occupies $4DT$ bytes. If memory-efficient attention mechanisms like FlashAttention (Dao et al., 2022) is not applied, the outcome $\mathbf{Q}\mathbf{K}^\top$ will be stored and occupy $4HT^2$ bytes. Instead, if FlashAttention is adopted, the storage overhead can be avoided. + +Feed-Forward Network ReLU activation function is typically employed in this module. The input $\mathbf{H} \in \mathbb{R}^{T \times D}$ must be retained, requiring a total of $4DT$ bytes. Additionally, the product $\mathbf{W}_{\mathrm{fin1}}\mathbf{H}$ also needs to be stored, amounting to $4D_{\mathrm{ff}}T$ bytes. Similarly, the output activations of ReLU, which serve as the input for subsequent linear transformations, necessitate another $4D_{\mathrm{ff}}T$ bytes. + +Layer Normalization Each block of Transformer encompasses two layer normalizations, with each normalization retaining its input, resulting in the memory requirement of $8DT$ bytes. + +Patch Projection To perform backpropagation for $W_{d} \in \mathbb{R}^{D \times P}$ , it is necessary to retain its input $\mathbf{H} \in \mathbb{R}^{T \times D}$ , resulting in a total memory requirement of $4DT$ bytes. + +The formula for the total activation values of the entire model occupying GPU memory is as follows: + +$$ +\text {M e m o r y F o o t p r i n t} = \left\{ \begin{array}{l l} 4 (D + P) T + (3 2 + 8 \alpha) L D T + 4 L H T ^ {2}, & \mathrm {w / o F l a s h A t t e n t i o n ,} \\ 4 (D + P) T + (3 2 + 8 \alpha) L D T, & \text {w i t h F l a s h A t t e n t i o n .} \end{array} \right. \tag {12} +$$ + +The derived occupancy of activation values increases proportionally with the batch size $B$ . For multivariate series, $N$ can be used as a multiplier in channel independence. For channel independence models, we can substitute $T$ with $NT$ as before. The total memory footprint is the sum of activation values and parameters of model and optimizer, which are proportional to the parameter count derived in Equation 11. Due to the limited model size in the time series field, the memory consumption of parameters is minimal and can be considered negligible in practice. Therefore, the overall memory footprint can be predominantly determined by the occupied memory of activation values. + +# B EXPERIMENTAL DETAILS + +# B.1 DATASETS + +We conduct experiments on well-acknowledged benchmarks to evaluate performance of the proposed Timer-XL, which includes (1) ETT (Zhou et al., 2021) contains 7 factors of electricity transformers from July 2016 to July 2018, which is recorded every hour or 15 minutes. (2) Weather (Wu et al., 2021) includes 21 meteorological factors collected every 10 minutes from the Max Planck Biogeochemistry Institute Weather Station in 2020. (3) ECL (Wu et al., 2021) records the hourly electricity consumption data of 321 clients. (4) Traffic (Wu et al., 2021) collects hourly road occupancy rates measured by 862 sensors on the San Francisco Bay area highways from January 2015 to December 2016. (5) Solar-Energy (Lai et al., 2018) records the solar power production of 137 PV plants in 2006, which are sampled every 10 minutes. (7) PEMS (Liu et al., 2022a) contains records from the public traffic network in California collected in 5-minute time windows. (8) EPF (Lago et al., 2021) includes five subsets that span six years. Each contains the electricity price as the endogenous variable to be predicted and two exogenous variables of the day-ahead electricity markets. (9) GTWSF (Wu et al., + +2023) is a dataset collected from the National Centers for Environmental Information (NCEI). This large-scale collection contains hourly averaged wind speed and temperature data from 3850 stations with different geographical scales and densities each, spanning from 2019 to 2021. (10) UTSD (Liu et al., 2024c) is a multi-domain time series dataset, which includes seven domains with a hierarchy of four volumes. We adopt the largest volume that encompasses 1 billion time points for pre-training. + +We further establish challenging forecasting benchmarks based on the ECMWF Reanalysis v5 (ERA5) dataset (Hersbach et al., 2020) to prevent potential overfitting and performance saturation of deep forecasters in existing benchmarks. Concretely, ERA5 is the fifth generation ECMWF atmospheric reanalysis of the global climate covering the period from January 1940 to the present, which provides hourly estimates of a large number of atmospheric, land, and oceanic climate variables, and includes information about uncertainties for all variables at reduced spatial and temporal resolutions. Due to its pattern sufficiency of temporal dynamics and variable correlations, we could establish practical benchmarks to thoroughly evaluate the performance for univariate and multivariate forecasting, as well as adopt it for large-scale pre-training to develop domain-specific large time series models. + +Our datasets are constructed as follows: + +- ERA5-S: To establish a realistic univariate forecasting benchmark, we start from the basic principle of forecastability and make the prediction on sufficient lookback lengths. Instead of the short time span of training in previous benchmarks (generally no more than 2 years), we curated a three-hour frequency dataset spanning 40 years (January 1979 to December 2018) from ERA5, encompassing 116880 time points. In order to prevent overfitting on a single time series, we selected worldwide stations to form seven subsets. +- ERA5-MS: Each univariate series of ERA5-S provides partial observations governed by the spatio-temporal global weather system. Since discovering the global spatio-temporal correlations presents a fundamental challenge in meteorology, we convert ERA5-S into ERA5-MS by using seven subsets as a challenging multivariate forecasting benchmark. Based on the average results in Tables 2 and 5, we can validate the existence of multi-station correlations among selected stations, which have enhanced the average prediction accuracy. +- ERA5-Large: To explore the pure data-driven approach to build domain-specific large time series models, we further expanded the number of stations as ERA5-Large, a dataset that evenly covers meteorological 4920 worldwide stations and spans 40 years. We establish the dataset for pre-training, which is expected to generalize across the time (train on the past observations and generalize to the future) and across stations (train on partial stations and generalize to other unseen stations). The total number of time points is around half a billion. + +We follow the same data processing and train-validation-test split protocol used in TimesNet (Wu et al., 2022), where the train, validation, and test datasets are divided according to chronological order to prevent data leakage. Detailed dataset descriptions and prediction settings are provided in Table 9. + +# B.2 BASELINE MODELS + +We aim to present Timer-XL as a foundation model for unified time series forecasting. We thoroughly include well-acknowledged and advanced models in each forecasting task. For univariate time series forecasting, we compare Timer-XL with PatchTST (Nie et al., 2022) under channel independence. For multivariate time series prediction, we report official results from Liu et al. (2023; 2024b); Ding et al. (2024), including UniRepLKNet (2024), iTransformer (2023), Corrformer (2023), DLinear (2023), TimesNet (2022), Non-stationary Transformer (2022b), Pyraformer (2021), Autoformer (2021), StemGNN (2020), DeepAR (2020), and N-BEATS (2019). We further reproduce the performance of related Transformers: Timer (2024c) and UniTST (2024a) based on their official repositories. For covariate-informed time series forecasting, we report the official results of TimeXer (2024b). For zero-shot forecasting, we follow Liu et al. (2024c) that predicts future length-96 windows in well-acknowledged datasets. Totally, more than 20 baselines are included for a complete comparison. + +# B.3 IMPLEMENTATION DETAILS + +All the experiments are implemented by PyTorch (Paszke et al., 2019) on NVIDIA A100 Tensor Core GPUs. We employ the Adam optimizer (Kingma & Ba, 2014) and MSE loss for model optimization. + +Table 9: Dataset descriptions. Dim. denotes the number of variables (For univariate forecasting, we adopt channel independence (Nie et al., 2022) or train separate models on each variable). Dataset Length denotes the number of time points in the (train, validation, test) splits. + +
TasksDatasetDim.Training SettingDataset LengthInformation (Frequency)
Univariate ForecastingETTh17{24, 96, 168, 672, 2880}→96(8545, 2881, 2881)Electricity (Hourly)
ECL321{24, 96, 168, 672, 2880, 8832}→96(18317, 2633, 5261)Electricity (Hourly)
Traffic862{24, 96, 168, 672, 2880, 8832}→96(12185, 1757, 3509)Transportation (Hourly)
PEMS03358{96, 288, 1152, 2016, 8064}→96(15617, 5135, 5135)Transportation (5 mins)
ERA5-S73072→96(81816, 11688, 23376)Climate (3 Hours)
Multivariate ForecastingETTh1, ETTh27{96, 672}→{96, 192, 336, 720}(8545, 2881, 2881)Electricity (Hourly)
ETTm1, ETTm27{96, 672}→{96, 192, 336, 720}(34465, 11521, 11521)Electricity (15 mins)
ECL321{96, 672}→{96, 192, 336, 720}(18317, 2633, 5261)Electricity (Hourly)
Traffic862{96, 672}→{96, 192, 336, 720}(12185, 1757, 3509)Transportation (Hourly)
Weather21{96, 672}→{96, 192, 336, 720}(36792, 5271, 10540)Climate (10 mins)
Solar-Energy137{96, 672}→{96, 192, 336, 720}(36601, 5161, 10417)Energy (10 mins)
ERA5-MS73072→96(81816, 11688, 23376)Climate (3 Hours)
GTWSF385048→24(12280, 1755, 3509)Wu et al. (2023)
Forecasting with CovariatesNP1+2168→24(36500, 5219, 10460)Electricity (Hourly)
PJM1+2168→24(36500, 5219, 10460)Electricity (Hourly)
BE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
FR1+2168→24(36500, 5219, 10460)Electricity (Hourly)
DE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
Pre-trainingERA5-Large49203072→96(81816, 11688, 23376)Climate (3 Hours)
UTSD-2880→96(868778970, 96530996, -)Liu et al. (2024c)
LOTSA-2880→96(231082956489, -, -)Woo et al. (2024)
+ +Table 10: Performance robustness of Timer-XL. The prediction settings and results keep the same with Table 12. The standard deviation is obtained from three random seeds. + +
DatasetECLETTh1Traffic
HorizonMSEMAEMSEMAEMSEMAE
960.127±0.0010.219±0.0010.364±0.0020.397±0.0010.340±0.0020.238±0.001
1920.145±0.0010.236±0.0010.405±0.0020.424±0.0010.360±0.0010.247±0.001
3360.159±0.0010.252±0.0010.427±0.0030.439±0.0020.377±0.0020.256±0.002
7200.187±0.0030.277±0.0030.439±0.0020.459±0.0040.418±0.0030.279±0.002
DatasetSolar-EnergyWeatherERA5-MS
HorizonMSEMAEMSEMAEMSEMAE
960.162±0.0030.221±0.0020.157±0.0020.205±0.0010.164±0.0010.307±0.000
1920.187±0.0030.239±0.0020.206±0.0030.250±0.002
3360.205±0.0030.255±0.0020.259±0.0030.291±0.003
7200.238±0.0030.279±0.0030.337±0.0020.344±0.002
+ +We adopt channel independence from Nie et al. (2022) in univariate time series forecasting. Based on the prevalence of patch-level tokenization in the time series field, we reproduce typical Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a) based on their official repositories, and keep their model hyperparameters and training configurations the same to evaluate the inherent capability of base models. The results of other baselines are based on the benchmark provided by Liu et al. (2023; 2024b); Ding et al. (2024); Wang et al. (2024b), which is fairly built on the configurations provided by their original paper. Detailed experimental configurations are provided in Table 11. We also report the standard deviations under three runs with different random seeds in Table 10, which exhibits that the performance of Timer-XL is stable. + +For the metrics, we adopt the symmetric mean absolute percentage error (SMAPE), a metric that is independent of the numerical range, to evaluate one-for-all generalization performance on ERA5-Large. For other experiments, we adopt the root mean square error (MSE) and mean absolute error (MAE) that follows previous work. These metrics can be calculated as follows: + +$$ +\mathrm {S M A P E} = \frac {2 0 0}{T} \sum_ {i = 1} ^ {T} \frac {| \mathbf {X} _ {i} - \widehat {\mathbf {X}} _ {i} |}{| \mathbf {X} _ {i} | + | \widehat {\mathbf {X}} _ {i} |}, \mathrm {M S E} = \sum_ {i = 1} ^ {T} | \mathbf {X} _ {i} - \widehat {\mathbf {X}} _ {i} | ^ {2}, \mathrm {M A E} = \sum_ {i = 1} ^ {T} | \mathbf {X} _ {i} - \widehat {\mathbf {X}} _ {i} |. +$$ + +Here $\mathbf{X} \in \mathbb{R}^T$ is a univariate time series and $\widehat{\mathbf{X}}$ is the corresponding prediction. For multivariate time series, we further calculate the mean metric in the variable dimension. + +Table 11: Experimental configurations of Timer-XL and other baseline Transformers. All the experiments adopt the ADAM (2014) optimizer with the default hyperparameter $(\beta_{1},\beta_{2}) = (0.9,0.999)$ . + +
ExperimentModelDatasetConfigurationTraining Process
LDdkHPLRLossBatch SizeEpochs
Univariate ForecastingTimer-XLECL3512648960.0005MSE204810
Traffic3512648960.001MSE204810
PatchTSTETTh11512648960.0005MSE25610
PEMS033512648960.0005MSE204810
ERA5-S1512648960.0005MSE204810
Multivariate ForecastingTimer-XLGlobal Temp.310241288240.0001MSE810
Global Wind310241288240.0001MSE810
ECL5512648960.0005MSE410
UniTSTTraffic4512648960.0005MSE410
TimerETTh1110241288960.0001MSE3210
PatchTSTWeather4512648960.0005MSE3210
Solar.6512648960.0001MSE1610
ERA5-MS3512648960.0001MSE25610
Forecasting with CovariatesTimer-XLNP3512648240.0001MSE410
TimeXerPJM2512648240.0001MSE1610
TimerBE2512648240.0001MSE1610
PatchTSTFR2512648240.0001MSE1610
DE2512648240.0001MSE1610
Pre-trainingTimer-XLERA5-Large4512648960.0001MSE4096010
PatchTST4512648960.0001MSE4096010
Timer-XLUTSD810241288960.00005MSE1638410
Timer(Liu et al., 2024c)810241288960.00005MSE1638410
Timer-XL810241288960.001MSE32768-
MoiraiSmallLOTSA6384646-
MoiraiBase(Woo et al., 2024)127686412-
MoiraiLarge2410246416-
+ +* $L$ is the layer number of Transformers, $D$ is the dimension of token embedding (the hidden dimension of FFN is set as $4D$ ), $d_k$ is the dimension of query, key, and value, $H$ is the multi-head number, $P$ is the patch size, and LR is the initial learning rate. + +# C HYPERPARAMETER SENSITIVITY + +We evaluate the hyperparameter sensitivity of Timer-XL on the ERA5-MS benchmark, as illustrated in Figure 8, concerning the following factors: the number of layers $L$ , the patch size $P$ , and the lookback length during inference. Our findings indicate that performance of Timer-XL generally improves with increases with $L$ , suggesting that Timer-XL is a scalable deep forecaster. Furthermore, our analysis of the influence of $P$ reveals that the optimal patch size is generally close to the predicted length, since it avoids multi-step error accumulations. Toward better long-term forecasting performance, it leaves a future improvement to adopt different patch sizes of input and output tokens. Finally, we investigate the impact of input length during inference. We discover that the optimal lookback length of during is not necessarily the length during training. Given that decoder-only Transformers can accommodate inference inputs shorter than those used during training, this finding is noteworthy and indicates the potential to improve the performance. + +![](images/cebfa93fdf65c5dedcd6a2129ae37dfc15aa582d985a0e942aa4fa452f60e1d1.jpg) +Figure 8: Hyperparameter sensitivity of Timer-XL (input-3072-pred-96 on ERA5-MS), including the number of Transformer blocks $L$ , the patch size $P$ , and the input lookback length during inference. + +![](images/c7726f346387b4ab3ccd5d840f7a19815834dce529f19809602030ebc9128ab9.jpg) + +![](images/a79c3fa6fec9c0be936f9ec27b8924c91c2d991884f2d7d68907de2e14823559.jpg) + +# D SHOWCASES + +To facilitate a clear comparison among various models, we present additional prediction visualization from diverse datasets in Figure 9 and 10. Showcases are randomly selected from Timer-XL and the following time-series Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a). Among them, Timer-XL presents the most accurate predictions. + +![](images/d699be3b2fd3908c3ca12fdf475d384dbb566bb4d865c10627bb733d6ddd92b3.jpg) + +![](images/6d90511cc3496cfb8d5a97248d1778df7469251c0dcc59fa940de526a409af99.jpg) + +![](images/d114e928a62ac831b1cbf006925321e2a07f555b859858ad5414ea8bba53f2e8.jpg) + +![](images/82463877c629c28c01f047e385704d22f618c0d300a023ddb12b1976be4472d0.jpg) + +![](images/bf35f570271c578659781cd8afcca3dc1a7fbec741cbb0d7c973b65e5eb075ed.jpg) +Figure 9: Visualization results on univariate time series dataset. We adopt the forecasting setting of 2880-pred-96 on ECL, ETTh1 and Traffic, and 2016-pred-96 on PEMS. + +![](images/435d076294cf93f1f7b0cab8c0ee6ea66ae7d35e0c25c4e48e63d5433090c1d7.jpg) + +![](images/8e3c7732312931f766822c9df0458873ad18b2a188a44071c71b027028749804.jpg) + +![](images/32d52704c334011db11f7cb6fdf4573af0300500b3a921eda8fb2459beb77dbd.jpg) + +# E SUPPLEMENTARY RESULTS + +# E.1 FULL RESULT OF MULTIVARIATE FORECASTING + +Table 12 provides the complete results of the one-for-all multivariate forecasting benchmark across well-acknowledged datasets. We evaluate Timer-XL and baseline models by rolling forecasting: each + +![](images/5e8296caa3edd268c7052da86f65f090ebbf6acf60789400e80184b721ab16b9.jpg) +Figure 10: Visualization results on multivariate time series dataset. We adopt the forecasting setting of 672-pred-96 on ETTh1 (7 Variables) and Traffic (862 Variables). + +model is trained with input length 672 and output length 96, and the predicted values are integrated as part of the input in the next iteration until reaching the desired forecast length in \{96, 192, 336, 720\}. + +We highlight that this benchmark evaluates the fundamental model versatility of deep forecasters, which aims to break the awkward situation of extensive training and model storage in pursuit of better practice for real-world forecasting requirements. On this benchmark, time-series Transformers significantly stand out from other baseline models, and our proposed Timer-XL can achieve state-of-the-art performance, making it a nice fundamental backbone of a one-for-all forecaster. + +# E.2 FULL RESULT OF ZERO-SHOT FORECASTING + +Table 13 provides the full results of zero-shot forecasting on the benchmark from Wu et al. (2022). We build Timer-XL based on the configuration in Table 11, which is pre-trained on the aggregated datasets of UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024). The patch size of Timer-XL is set as 96 and we conduct rolling forecast to obtain the desired forecast length in $\{96, 192, 336, 720\}$ . + +We evaluate most advanced large models based on their official model checkpoints, including TimeMoE (Shi et al., 2024), Moirai (Woo et al., 2024), TimesFM (Das et al., 2023), MOMENT Goswami et al. (2024), and Chronos (Ansari et al., 2024). We conduct zero-shot evaluations on datasets that are not included during the pre-training of corresponding models. For each of the evaluated model, we use their maximum input length during inference. The metric (MSE/MAE) is averaged from all predicted windows in the test split. + +# E.3 ABLATION STUDY OF TIMEATTENTION + +We conduct evaluations on TimeAttention to validate the effectiveness of position embeddings. As for variable embedding, the distinction between endogenous and exogenous variables can improve performance. Based on our observation of the learned $u > v$ , we find that the token reasonably pays more attention to tokens of the endogenous variable. It leaves a prior to mask out minor dependencies that focuses less on exogenous variables. For the temporal dimension, other position embeddings are inferior to RoPE, since it uses the affine transformation, while others are additive, and thereby less confused with the same additive embedding for variables. + +# E.4 SUPPLEMENTARY RESULTS OF LONG-CONTEXT FORECASTING + +Long context is a basic indicator of foundation models, which can support emergence capabilities such as prompting, in-context learning, retrieval-augmented generation, etc. However, the long-context forecasting paradigm receives less attention in the current community, which can be due to the lack of benchmarks. In the meteorological ERA5, it is necessary to support the context of more than years to contain a specific cycle (such as El Nino). In Table 15, the performance of Timer-XL and DLinear generally improves with the increased context length. By contrast, it reveals the performance + +Table 12: Full multivariate forecasting results: we conduct rolling forecast with a single model trained on each dataset (lookback length is 672) and accomplish four forecast lengths in $\{96, 192, 336, 720\}$ . + +
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTh1960.3640.3970.3710.4040.3790.4150.3870.4180.3690.4000.3730.4030.4520.4630.4520.4780.4670.499
1920.4050.4240.4070.4290.4150.4380.4160.4370.4050.4220.4050.4250.4740.4770.4840.5100.4920.523
3360.4270.4390.4340.4450.4400.4540.4340.4500.4350.4450.4230.4400.4930.4890.5110.5220.5190.531
7200.4390.4590.4610.4660.4820.4820.4470.4730.4930.5080.4450.4710.5600.5340.5710.5430.5890.560
Avg0.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
ETTh2960.2770.3430.2850.3440.3430.3980.3040.3620.3050.3710.2890.3470.3400.3740.3480.4030.3580.397
1920.3480.3910.3650.4000.3760.4200.3720.4070.4120.4390.3600.3930.4020.4140.4080.4480.4350.451
3360.3750.4180.4120.4400.3990.4350.4180.4400.5270.5080.3890.4200.4520.4520.4240.4570.4540.475
7200.4090.4580.4680.4870.4190.4570.4630.4760.8300.6530.3980.4400.4620.4680.4480.4760.4790.492
Avg0.3520.4020.3820.4180.3840.4280.3890.4210.5180.4930.3590.4000.4140.4270.4070.4460.4310.454
ETTm1960.2900.3410.2810.3380.2890.3480.3110.3650.3070.3500.2850.3460.3380.3750.4140.4140.4660.466
1920.3370.3690.3300.3680.3320.3750.3530.3900.3370.3680.3290.3720.3710.3870.5240.4820.5040.496
3360.3740.3920.3670.3930.3650.3970.3870.4110.3660.3870.3630.3940.4100.4110.5410.4970.5740.530
7200.4370.4280.4320.4330.4210.4310.4520.4450.4190.4190.4210.4260.4780.4500.5780.5090.5960.558
Avg0.3590.3820.3520.3830.3520.3880.3760.4030.3570.3810.3490.3850.3990.4060.5140.4750.5350.512
ETTm2960.1750.2570.1750.2570.1710.2600.1830.2720.1670.2630.1720.2590.1870.2670.2370.3060.2550.339
1920.2420.3010.2390.3010.2280.2300.2500.3150.2300.3110.2330.2990.2490.3090.3300.3870.2790.335
3360.2930.3370.2930.3420.2820.3360.3110.3560.2980.3610.2800.3310.3210.3510.4040.4240.3310.374
7200.3760.3900.3920.4070.3800.3980.4170.4190.4320.4460.3570.3820.4970.4030.5250.4860.4130.450
Avg0.2710.3220.2750.3270.2650.3060.2900.3400.2820.3450.2610.3180.3140.3330.3740.4010.3200.374
ECL960.1270.2190.1290.2210.1300.2250.1330.2290.1380.2380.1320.2320.1840.2880.1850.2870.2560.357
1920.1450.2360.1480.2390.1500.2440.1580.2580.1520.2510.1510.2500.1920.2950.2820.3680.2910.376
3360.1590.2520.1640.2560.1660.2620.1680.2620.1670.2680.1710.2720.2000.3030.2890.3770.2900.379
7200.1870.2770.2010.2890.2060.2970.2050.2940.2030.3020.2220.3180.2280.3250.3050.3990.3200.403
Avg0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
Traffic960.3400.2380.3480.2400.3590.2500.3530.2590.3990.2850.3590.2550.5930.3150.6100.3220.6750.412
1920.3600.2470.3690.2500.3730.2570.3730.2670.4090.2900.3770.2650.5960.3170.6260.3460.6790.423
3360.3770.2560.3880.2600.3860.2650.3860.2750.4220.2970.3930.2760.6000.3190.6330.3520.6880.440
7200.4180.2790.4310.2850.4210.2860.4250.2960.4610.3190.4360.3050.6190.3350.6510.3660.6930.457
Avg0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather960.1570.2050.1510.2020.1520.2060.1740.2250.1690.2290.1490.2020.1690.2280.1850.2410.3550.409
1920.2060.2500.1960.2450.1980.2490.2270.2680.2110.2680.1940.2450.2220.2690.2860.3250.4210.450
3360.2590.2910.2490.2880.2510.2910.2900.3090.2580.3060.2440.2850.2900.3100.3230.3470.4520.465
7200.3370.3440.3300.3440.3220.3400.3740.3600.3200.3620.3170.3380.3760.3640.4360.4010.5130.496
Avg0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy960.1620.2210.2120.2300.1900.2400.1830.2650.1930.2580.1680.2370.1800.2720.1990.2900.2060.296
1920.1870.2390.2320.2460.2230.2640.2050.2830.2140.2740.1890.2570.1990.2860.2430.3070.2540.328
3360.2050.2550.2370.2530.2500.2830.2240.2990.2330.2910.2120.2770.2200.3010.2640.3220.2720.330
7200.2380.2790.2520.2660.2920.3110.2390.3160.2460.3070.2400.3050.2510.3210.3100.3390.3260.347
Avg0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
\( 1^{\text{st}} \)Count23
+ +Table 13: Full results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. $1^{\mathrm{st}}$ Count represents the number of wins achieved by a model under all prediction lengths and datasets. + +
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoELuTRA(2024)MoiiraiSmall(2024)MoiiraiBase(2024)MoiiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm1960.3170.3560.3380.3680.3090.3570.2810.3410.4180.3920.3630.3560.3800.3610.3610.3700.6540.5270.4540.4080.4570.403
1920.3580.3810.3530.3880.3460.3810.3050.3580.4310.4050.3880.3750.4120.3830.4140.4050.6620.5320.5670.4770.5300.450
3360.3860.4010.3810.4130.3730.4080.3690.3950.4330.4120.4160.3920.4360.4000.4450.4290.6720.5370.6620.5250.5770.481
7200.4300.4310.5040.4930.4750.4770.4690.4720.4620.4320.4600.4180.4620.4200.5120.4710.6920.5510.9000.5910.6600.526
Avg0.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm2960.1890.2770.2010.2910.1970.2860.1980.2880.2140.2880.2050.2730.2110.2740.2020.2700.2600.3350.1990.2740.1970.271
1920.2410.3150.2580.3340.2500.3220.2350.3120.2840.3320.2750.3160.2810.3180.2890.3210.2890.3500.2610.3220.2540.314
3360.2860.3480.3240.3730.3370.3750.2930.3480.3310.3620.3290.3500.3410.3550.3600.3660.3240.3690.3260.3660.3130.353
7200.3750.4020.4880.4640.4800.4610.4270.4280.4020.4080.4370.4110.4850.4280.4620.4300.3940.4090.4550.4390.4160.415
Avg0.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh1960.3690.3910.3570.3810.3500.3820.3490.3790.4010.4020.3760.3920.3810.3880.4140.4040.6880.5570.4400.3930.4410.390
1920.4050.4130.3840.4040.3880.4120.3950.4130.4350.4210.4120.4130.4340.4150.4650.4340.6880.5600.4920.4260.5020.524
3360.4180.4230.4110.4340.4110.4300.4470.4530.4380.4340.4330.4280.4850.4450.5030.4560.6750.5630.5500.4620.5760.467
7200.4230.4410.4490.4770.4270.4550.4570.4620.4390.4540.4470.4440.6110.5100.5110.4810.6830.5850.8820.5910.8350.583
Avg0.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh2960.2830.3420.3050.3590.3020.3540.2920.3520.2970.3360.2940.3300.2960.3300.3150.3490.3420.3960.3080.3430.3200.345
1920.3400.3790.3510.3860.3640.3850.3470.3790.3680.3810.3650.3750.3610.3710.3880.3950.3540.4020.3840.3920.4060.399
3360.3660.4000.3910.4180.4170.4250.4060.4190.3700.3930.3760.3900.3900.3900.4220.4270.3560.4070.4290.4300.4920.453
7200.3970.4310.4190.4540.5370.4960.4390.4470.4110.4260.4160.4330.4230.4180.4430.4540.3950.4340.5010.4770.6030.511
Avg0.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL960.1410.237
1920.1590.254
3360.1770.272
7200.2190.308
Avg0.1740.278
Weather960.1710.225
1920.2210.271
3360.2740.311
7200.3560.370
Avg0.2560.294
\( 1^stCount \)1510213010700051100120002
+ +* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-). +* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here. +* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m. + +Table 14: Embedding ablation in TimeAttention. For the temporal dimension, we compare prevalent relative and absolute position embeddings. As for the variable dimension, we explore the effectiveness of the variable embedding that distinguishes endogenous and exogenous variables. + +
DesignTemporalVariableTrafficWeatherSolar-EnergyERA5-MS
MSEMAEMSEMAEMSEMAEMSEMAE
Timer-XLRoPE (2024)with0.3400.2380.1570.2050.1620.2210.1640.307
ReplaceALiBi (2021)with0.3510.2460.1620.2120.1880.2100.1670.308
Relative (2020)with0.3610.2500.1630.2140.1970.2150.1680.309
Absolute (2017)with0.3810.2700.1590.2070.1710.2040.1650.306
w/oRoPE (2024)w/o0.3610.2540.1710.2170.1810.2210.2350.373
w/ow/o0.3630.2530.1640.2150.1940.2150.1670.309
+ +![](images/ea692af7f9c14e27355f9cc560e2556cfcb2e4f62c0bebaeeebdf1a1b68d30dd.jpg) + +![](images/0528daa29f797b75cecd95ce4f5e853b4bfa4258b386d370e67434d2bcdd22bf.jpg) + +![](images/b48dafd63fd63593b1db526a2162e08aafa9da846564383f156ed3bc3af3dfa1.jpg) +Figure 11: Case studies of learned attention in encoder-/decoder-only Transformers. + +![](images/d1f3b533ee13e593845504f471dde99e153a915b60dcf400c032ae496a7eac2b.jpg) + +degradation of PatchTST. Similar to the observations in Figure 3, the encoder-only architecture produces inferior predictions after thousands of time points, which can be concealed due to the short context adopted in previous benchmarks. Although PatchTST has conducted an initial exploration in the context of hundreds of time points, it inappropriately works in ever-long contexts. Therefore, we believe that context bottlenecks deserve further exploration in this community. + +Table 15: Performance on ERA5 (pred-1day). Lookback lengths vary from daily to yearly contexts. + +
ModelsTimer-XLPatchTSTDLinear
MetricMSEMAEMSEMAEMSEMAE
Lookback-8 (1 Day)0.08470.21000.08970.21960.09700.2276
Lookback-32 (4 Day)0.07130.19280.07780.20800.08410.2113
Lookback-56 (1 Week)0.06880.18910.07850.20820.08140.2081
Lookback-224 (1 Month)0.06750.18680.07450.20420.07880.2048
Lookback-960 (4 Month)0.06670.18630.11940.26960.07730.2031
Lookback-2944 (1 Year)0.06630.18570.11090.26380.07630.2024
+ +Representation Analysis We further delve into long-context modeling from the perspective of learned representations. As shown in Figure 11, the decoder-only model can selectively focus on the previous context while PatchTST wrongly focuses on noisy parts. Since causality is the basis of forecasting, using causal masks leads to coherent token embeddings, while the unmasked attention mechanism may break the causality and prevent the model from telling each tokens. + +Normalization Section 4.1 has discussed instance normalization (Kim et al., 2021). It generally improves the performance of the previous encoder-only Transformers but leads to special problems in decoder-only Transformers (e.g., unmatched statistics in multi-step autoregression). However, it is indicative that Timer-XL without ReVIN can achieve competitive performance on well-acknowledged benchmarks in Table 16, while the performance of PatchTST may heavily rely on this normalization. + +# E.5 ILLUSTRATION OF TIMEATTENTION + +Although the formulation to generalize from 1D sequences to multivariate time series is straightforward, Timer-XL is built on a decoder-only Transformer, an underexploited backbone among current time series models. As shown in Figure 12, challenges lie in capturing fine-grained dependencies between all variables in the patch level, while maintaining temporal causality in multiple sequences. + +Table 16: Evaluations (672-pred-96) on the effect of ReVIN (Kim et al., 2021) on Transformers. + +
ModelsTimer-XL with ReVINTimer-XL w/o ReVINPatchTST with ReVINPatchTST w/o ReVIN
MetricMSE | MAEMSE | MAEMSE | MAEMSE | MAE
ETTh10.364 | 0.3970.370 | 0.4010.370 | 0.3990.421 | 0.448
Weather0.157 | 0.2050.151 | 0.2050.149 | 0.1980.173 | 0.242
ECL0.127 | 0.2190.130 | 0.2250.129 | 0.2220.138 | 0.244
+ +![](images/a576faa6ff22bb6735f90db34194db5a7caf1608a31e20c518c7337fc4a9bf76.jpg) +(a) Univariate + +![](images/c661a2004600bd871e7770694566693df277b6790453406217428e8dfd8c73a7.jpg) +(b) Multivariate + +![](images/79a2f6434987c9c6c32267b735d53096628ec361f836171c9f619020a6e96774.jpg) +Figure 12: Illustration of TimeAttention for modeling univariate and multivariate time series. + +Technically, we introduce the masking formulation, whose key lies in the grouped causality of flattened 2D sequences. We derive it based on the Kronecker Product, which disentangles the large attention map into formalizable temporal and variable dependencies. It can be naturally extended to covariates or pre-defined variable dependencies, which may inspire a lot of future explorations. + +# F LIMITATIONS + +Timer-XL is a unified model for time series forecasting. It can be used for task-specific training or scalable pre-training, handling varying-length and multivariate time series. As an autoregressive model, Timer-XL necessitates iterative generation for long-term forecasting, which may lead to error accumulation and inflexibility in the output length. In the future, we plan to incorporate multi-resolution patches for input and output series. Furthermore, given that Timer-XL explicitly captures fine-grained token dependencies, there remains significant potential to reduce the complexity of TimeAttention, particularly in high-dimensional and lengthy time series. Finally, we will investigate the factors contributing to the stagnation of Transformer performance in extremely long contexts, and seek insights in the time series modality to improve context efficiency. \ No newline at end of file diff --git a/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/images.zip b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..40b448998fe8274a7e51fef8065b28fd00af4362 --- /dev/null +++ b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dc527dbf5bd882acce1f9ec585f3d8af98d55758e88b34d59beb2fb018d1354 +size 2690738 diff --git a/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/layout.json b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9f2078bb0e02522ad297f014b32c3b7a29205aa9 --- /dev/null +++ b/2025/Timer-XL_ Long-Context Transformers for Unified Time Series Forecasting/layout.json @@ -0,0 +1,16663 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 481, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 481, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 481, + 116 + ], + "type": "text", + "content": "TIMER-XL: LONG-CONTEXT TRANSFORMERS FOR UNIFIED TIME SERIES FORECASTING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 134, + 428, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 134, + 428, + 180 + ], + "spans": [ + { + "bbox": [ + 110, + 134, + 428, + 180 + ], + "type": "text", + "content": "Yong Liu\\*, Guo Qin\\*, Xiangdong Huang, Jianmin Wang, Mingsheng Long\\* \nSchool of Software, BNrist, Tsinghua University, Beijing 100084, China \n{liuyong21, qinguo24}@ mails.tsinghua.edu.cn \n{huangxdong, jimwang, mingsheng}@tsinghua.edu.cn" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 276, + 208, + 335, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 208, + 335, + 220 + ], + "spans": [ + { + "bbox": [ + 276, + 208, + 335, + 220 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "spans": [ + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "text", + "content": "We present Timer-XL, a causal Transformer for unified time series forecasting. To uniformly predict multidimensional time series, we generalize next token prediction, predominantly adopted for 1D token sequences, to multivariate next token prediction. The paradigm formulates various forecasting tasks as a long-context prediction problem. We opt for decoder-only Transformers that capture causal dependencies from varying-length contexts for unified forecasting, making predictions on non-stationary univariate time series, multivariate series with complicated dynamics and correlations, as well as covariate-informed contexts that include exogenous variables. Technically, we propose a universal TimeAttention to capture fine-grained intra- and inter-series dependencies of flattened time series tokens (patches), which is further enhanced by deft position embedding for temporal causality and variable equivalence. Timer-XL achieves state-of-the-art performance across task-specific forecasting benchmarks through a unified approach. Based on large-scale pre-training, Timer-XL achieves state-of-the-art zero-shot performance, making it a promising architecture for pre-trained time series models. Code is available at this repository: https://github.com/thuml/Timer-XL." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 433, + 206, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 433, + 206, + 445 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 206, + 445 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 458, + 506, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 458, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 506, + 537 + ], + "type": "text", + "content": "Transformers have been extensively applied to time series forecasting, becoming the backbone of task-specific models (Zhou et al., 2021; Wu et al., 2021) and pre-trained models (Das et al., 2023). While the majority of prior works have focused on long-term forecasting, reliable predictions are made by considering endogenous variations and exogenous correlations in the context (Box, 2013). Besides, the context length of pre-trained Transformers determines the maximum input and output length during inference. Therefore, long-context Transformers are more versatile than shorter ones, facilitating long-sequence and high-resolution generation (Yin et al., 2023; Wang et al., 2024a)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 540, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 641 + ], + "type": "text", + "content": "However, existing Transformers in the time series field crucially encounter the context bottleneck. As shown in Figure 1, unlike Transformers for natural language and vision that learn dependencies among thousands to millions of tokens (Kirillov et al., 2023; OpenAI, 2023), time-series Transformers typically operate around limited contexts of up to hundreds of time series tokens (patches) (Nie et al., 2022). For univariate forecasting, a short-context input leads to insufficient learning of global tendencies, struggling to address non-stationarity in real-world time series (Hyndman, 2018). For multivariate forecasting, increasing research has demonstrated the effectiveness of explicitly capturing intra- and inter-channel dependencies (Zhang & Yan, 2022; Liu et al., 2023; 2024a), highlighting the practical urgency of extending the context length to encompass inter-correlated time series." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 645, + 507, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 507, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 507, + 715 + ], + "type": "text", + "content": "Recently, causal Transformers characterized by the decoder-only architecture have become a predominant choice of large language models (Zhao et al., 2023) and garnered increasing attention in the development of large time series models (Rasul et al., 2023; Ansari et al., 2024). Based on contextual flexibility and autoregressive next token prediction, one model can accommodate varying lookback and prediction lengths (Liu et al., 2024b). Therefore, pre-training on longer contexts not only empowers them with the fundamental capability to incorporate more contextual information but" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 721, + 194, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 721, + 194, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 721, + 194, + 732 + ], + "type": "text", + "content": "*Equal Contribution" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 83, + 211, + 225 + ], + "blocks": [ + { + "bbox": [ + 106, + 83, + 211, + 225 + ], + "lines": [ + { + "bbox": [ + 106, + 83, + 211, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 83, + 211, + 225 + ], + "type": "image", + "image_path": "0a23332ffe30348e29683222f624e59a9642b12c38b219e0830c57188039601e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "lines": [ + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "type": "text", + "content": "Figure 1: We compare the context length (measured by token number) of Transformers in different modalities and propose Timer-XL that increases the length to thousands of patch tokens. Given the generality across contexts, Timer-XL is a versatile solution for various forecasting tasks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 230, + 79, + 506, + 228 + ], + "blocks": [ + { + "bbox": [ + 230, + 79, + 506, + 228 + ], + "lines": [ + { + "bbox": [ + 230, + 79, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 230, + 79, + 506, + 228 + ], + "type": "image", + "image_path": "be99a7a17398a5e28835a3acb6957e447d5b6e901b66ad0a133862b2f5676c6a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 281, + 506, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 337 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 337 + ], + "type": "text", + "content": "also enhances the model versatility toward a one-for-all foundation model. Regarding any-variate and any-length time series as one context, previous work (Liu et al., 2024a) has achieved unified modeling on flattened tokens based on noncausal Transformers. However, our empirical results (Figure 3) reveal that encoder-only forecasters may encounter performance degradation in long-context forecasting, while decoder-only Transformers can mitigate this degradation well." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 342, + 506, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 506, + 463 + ], + "type": "text", + "content": "In this work, we generalize the training objective of language modeling to multivariate next token prediction, achieving unified time series forecasting that covers tasks in Figure 1 (right). Based on the decoder-only architecture, we propose TimeAttention to facilitate Transformers on multidimensional time series, presenting Kronecker-based masking mechanism to train time-series Transformers in a channel-dependent approach. With specialized position embedding for multivariate series, TimeAttention is aware of the chronological order of time points and achieves permutation-equivalence (Zaheer et al., 2017) on variables. We enlarge the context to thousands of patch tokens and achieve state-of-the-art on univariate, multivariate, and covariate-informed forecasting benchmarks. By pre-training on large-scale datasets, we present Timer-XL as an extra long version of pre-trained time-series Transformers (Timer) (Liu et al., 2024c), which outperforms recent large models in zero-shot forecasting. Our contributions lie in three aspects:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 474, + 505, + 574 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 132, + 474, + 505, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 474, + 505, + 497 + ], + "spans": [ + { + "bbox": [ + 132, + 474, + 505, + 497 + ], + "type": "text", + "content": "- We propose multivariate next token prediction and unified time series forecasting, strengthening Transformers with enlarged contexts to make information-complete predictions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 501, + 504, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 501, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 132, + 501, + 504, + 534 + ], + "type": "text", + "content": "- We introduce TimeAttention, a novel causal self-attention tailored for multidimensional time series, facilitating intra- and inter-series modeling with positional awareness and maintaining causality and scalability of Transformers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 539, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 539, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 132, + 539, + 504, + 574 + ], + "type": "text", + "content": "- We propose Timer-XL, a versatile Transformer for one-for-all forecasting, which mitigates performance degradation in long-context time series, achieves state-of-the-art performance in task-specific benchmarks, and presents notable zero-shot performance by pre-training." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 596, + 212, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 212, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 212, + 608 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "content": "Transformers (Vaswani et al., 2017) for time series forecasting have undergone rapid advancements. Initial Transformer-based forecasters primarily focused on long-term forecasting (Li et al., 2019; Zhou et al., 2021; Wu et al., 2021; Sun & Zhang, 2024). However, the context length is not growing in pace, which hinders Transformers from making information-complete predictions. Another advancement has focused on multivariate forecasting. Unlike natural language, time series are multidimensional and inherently correlated (Hyndman, 2018). To learn intra- and inter-series dependencies, different tokenization of time-series Transformers has been proposed, including point-wise (Lim et al., 2021), patch-wise (Nie et al., 2022), and variable-wise (Liu et al., 2023) approaches, with deftly tailored architectures (Zhang & Yan, 2022; Wang et al., 2024b). However, few works highlight that multidimensional time series can be uniformly tackled by long-context Transformers without architectural" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "modification. In this work, we leverage causal Transformers, which excel at handling long-context sequences, and unify time series forecasting tasks into multivariate next token prediction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "content": "Recently, time-series Transformers have experienced the evolution from small task-specific models to pre-trained large models (Das et al., 2023; Woo et al., 2024; Ansari et al., 2024). Among them, decoder-only Transformer is predominantly adopted as the backbone of large language models (Touvron et al., 2023; OpenAI, 2023), positioning as a scalable choice for general time series analysis (Liu et al., 2024c). By independently predicting each token with supervision, decoder-only models are also multi-length forecasters (Liu et al., 2024b), avoiding resource-intensive training and lookback-search. However, existing decoder-only Transformers are generally pre-trained in a channel-independent approach, making them inaccessible to inter-series dependencies." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 204, + 506, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 506, + 281 + ], + "type": "text", + "content": "Prior work has employed encoder-only Transformers to capture dependencies of multivariate time series (Liu et al., 2024a). However, our empirical study found that this architecture can be incompatible with causal forecasting, limiting the performance of Transformers. To implement next token prediction and multivariate forecasting in a single Transformer, we renovate the attention module, which disentangles fine-grained token dependencies into variable dependencies and temporal causal masks, capturing intra- and inter-series dependencies with causality and scalability maintained. In Table 1, we list representative time-series Transformers and highlight their differences." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 106, + 307, + 504, + 378 + ], + "blocks": [ + { + "bbox": [ + 165, + 294, + 444, + 305 + ], + "lines": [ + { + "bbox": [ + 165, + 294, + 444, + 305 + ], + "spans": [ + { + "bbox": [ + 165, + 294, + 444, + 305 + ], + "type": "text", + "content": "Table 1: Comparison among representative time-series Transformers." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 307, + 504, + 378 + ], + "lines": [ + { + "bbox": [ + 106, + 307, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 106, + 307, + 504, + 378 + ], + "type": "table", + "html": "
ModelPatchTST (2022)iTTrans. (2023)TimeXer (2024b)UniTST (2024a)Moirai (2024)Timer (2024c)Timer-XL (Ours)
Intra-Series
Inter-Series
Causal Trm.
Pre-Trained
", + "image_path": "53d14dbf58273f8a2925249ca212ea2e7d06a99b9b5f75655a5b81019e1094cb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 398, + 183, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 398, + 183, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 398, + 183, + 410 + ], + "type": "text", + "content": "3 APPROACH" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "type": "text", + "content": "In this section, we first introduce a decoder-only Transformer to illustrate the procedure of next token prediction on univariate time series. As an extension, we design TimeAttention and propose Timer-XL for unified time series forecasting. It is applicable to univariate, multivariate, and covariate-informed scenarios by generalizing the context from 1D sequences to 2D time series." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 479, + 161, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 161, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 161, + 490 + ], + "type": "text", + "content": "3.1 TIMER" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 500, + 506, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 500, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 500, + 506, + 523 + ], + "type": "text", + "content": "Timer (Liu et al., 2024c) is a time-series Transformer trained by next token prediction (Bengio et al., 2000), which regards single-dimensional time series as non-overlapping patch tokens." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "text", + "content": "Next Token Prediction Given an univariate time series " + }, + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{X} = \\{x_{1},\\dots ,x_{TP}\\}" + }, + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "inline_equation", + "content": "TP" + }, + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "text", + "content": ", a time series token is defined as " + }, + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 533, + 504, + 556 + ], + "type": "text", + "content": " consecutive time points, also termed as the patch token:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 203, + 559, + 504, + 573 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 559, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 203, + 559, + 504, + 573 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {i} = \\left\\{x _ {(i - 1) P + 1}, \\dots , x _ {i P} \\right\\} \\in \\mathbb {R} ^ {P}, i = 1, \\dots , T. \\tag {1}", + "image_path": "0ed71aa530c4c9cb7bd55d45a3099a6578cff0b0e4c285044093215c38f2d718.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 574, + 499, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 499, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 499, + 586 + ], + "type": "text", + "content": "The training objective is to independently predict the next patch token to maximize the likelihood:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 250, + 589, + 505, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 589, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 250, + 589, + 505, + 622 + ], + "type": "interline_equation", + "content": "P (\\mathbf {X}) = \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {i + 1} \\mid \\mathbf {x} _ {\\leq i}\\right), \\tag {2}", + "image_path": "5c5dbb6c053391f5287c2ddb8a4bebc9eda3fb9fc4b15c65be02b96e03ef8c5c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 624, + 503, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 503, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 503, + 635 + ], + "type": "text", + "content": "which is realized by a decoder-only architecture with the block number " + }, + { + "bbox": [ + 104, + 624, + 503, + 635 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 624, + 503, + 635 + ], + "type": "text", + "content": " and model dimension " + }, + { + "bbox": [ + 104, + 624, + 503, + 635 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 624, + 503, + 635 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 233, + 637, + 347, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 637, + 347, + 651 + ], + "spans": [ + { + "bbox": [ + 233, + 637, + 347, + 651 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {i} ^ {0} = \\mathbf {W} _ {e} \\mathbf {x} _ {i}, i = 1, \\dots , T,", + "image_path": "2b8dc5ee2377d5e466a4043dde5f629737430cb37ef4716a13ad33875556acbc.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 233, + 653, + 504, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 653, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 233, + 653, + 504, + 667 + ], + "type": "interline_equation", + "content": "\\mathbf {H} ^ {l} = \\operatorname {T r m B l o c k} \\left(\\mathbf {H} ^ {l - 1}\\right), l = 1, \\dots , L, \\tag {3}", + "image_path": "844b9759392fb0ba22599ba0253760b069773c31eab0aca06008bfb7289ab68b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 216, + 670, + 353, + 684 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 670, + 353, + 684 + ], + "spans": [ + { + "bbox": [ + 216, + 670, + 353, + 684 + ], + "type": "interline_equation", + "content": "\\{\\hat {\\mathbf {x}} _ {i + 1} \\} = \\mathbf {H} ^ {L} \\mathbf {W} _ {d}, i = 1, \\dots , T.", + "image_path": "b7c56ad0918b38b260a524909c953304f9818bf8bdda74de8682bf94dcc7ec66.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "For simplicity, we omit the block index " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ". Timer adopts " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_e" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_d \\in \\mathbb{R}^{D \\times P}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " that independently embed and project the token embeddings as " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{H} = \\{\\mathbf{h}_i\\} \\in \\mathbb{R}^{T \\times D}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ". TrmBlock includes feed-forward network and self-attention with the temporal causal mask " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{T} \\in \\mathbb{R}^{T \\times T}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_i \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " is the context representation of the previous " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " tokens. All predicted " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_{i+1}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " are supervised with ground truth via MSE loss." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 346, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 346, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 346, + 94 + ], + "type": "text", + "content": "3.2 GENERALIZE 1D SEQUENCES TO 2D TIME SERIES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 138 + ], + "type": "text", + "content": "For the enlarged context with the additional dimension, our proposed attention mechanism aims to (1) thoroughly capture intra- and inter-series dependencies and (2) preserve causality within the temporal dimension. Without loss of generality, we illustrate this with the case of multivariate forecasting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "text", + "content": "Multivariate Next Token Prediction Given a multivariate time series " + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{N \\times TP}" + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "text", + "content": " with the number of variables " + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "text", + "content": ", the time series token " + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{m,i}" + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "text", + "content": " is defined as the " + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "text", + "content": "-th patch of the " + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 147, + 504, + 171 + ], + "type": "text", + "content": "-th variable:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 153, + 173, + 505, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 173, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 153, + 173, + 505, + 186 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {m, i} = \\left\\{\\mathbf {X} _ {m, (i - 1) P + 1}, \\dots , \\mathbf {X} _ {m, i P} \\right\\} \\in \\mathbb {R} ^ {P}, m = 1, \\dots , N, i = 1, \\dots , T. \\tag {4}", + "image_path": "209c17cd5f85336d691c36767347f0efd869a4ba34e41f7b1b02a85589b68607.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": "The training objective is still to independently predict the next token. Unlike before, each prediction is made based on tokens of the previous time " + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "inline_equation", + "content": "(\\leq i)" + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": " from all " + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": " variables:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 153, + 213, + 505, + 245 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 213, + 505, + 245 + ], + "spans": [ + { + "bbox": [ + 153, + 213, + 505, + 245 + ], + "type": "interline_equation", + "content": "P (\\mathbf {X}) = \\prod_ {m = 1} ^ {N} \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {m, i + 1} \\mid \\mathbf {x} _ {:, \\leq i}\\right) = \\prod_ {m = 1} ^ {N} \\prod_ {i = 1} ^ {T} p \\left(\\mathbf {x} _ {m, i + 1} \\mid \\mathbf {x} _ {1, \\leq i}, \\dots , \\mathbf {x} _ {N, \\leq i}\\right). \\tag {5}", + "image_path": "52bda684ba0809d1dd23927a694f322216ef2d227b5bcb13e4fc73c2480751e6.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 247, + 505, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 505, + 292 + ], + "type": "text", + "content": "Compared with Equation 2, the multivariate context length increases from " + }, + { + "bbox": [ + 104, + 247, + 505, + 292 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 247, + 505, + 292 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 247, + 505, + 292 + ], + "type": "inline_equation", + "content": "NT" + }, + { + "bbox": [ + 104, + 247, + 505, + 292 + ], + "type": "text", + "content": ". By contrast, the benefit is that this paradigm learns causal dependencies within each sequence while incorporating exogenous variable correlations from other sequences, making it a universal forecasting paradigm that outperforms channel-independent (Nie et al., 2022) or variable-centric models (Liu et al., 2023)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "text", + "content": "Technically, we independently apply " + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}" + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "text", + "content": " on each token to obtain patch-wise representation " + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_{m,i}\\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "text", + "content": ", which will encompass contextual information from " + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "inline_equation", + "content": "N_i" + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "text", + "content": " tokens through Transformer blocks and be eventually projected by " + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}" + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "text", + "content": " into the predicted patch token " + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_{m,i + 1}" + }, + { + "bbox": [ + 104, + 296, + 505, + 333 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 342, + 506, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 506, + 399 + ], + "type": "text", + "content": "Position Embedding Position embedding has not been sufficiently explored in time-series Transformers. To avoid inherent permutation-invariance of self-attention, positional embedding is required to reflect the chronological order of tokens on the temporal dimension. As for the variable dimension, shuffling the input order of variables should not affect anything other than the output order of variables. Formally, the processing on multiple variables should be permutation-equivalent (Zaheer et al., 2017)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 403, + 505, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 505, + 449 + ], + "type": "text", + "content": "To meet the above requirements, we adopt RoPE (Su et al., 2024), a widely utilized position embedding on the temporal dimension. For the variable dimension, we use two learnable scalars in each head to keep the permutation-equivalence of variables (Woo et al., 2024). Beyond simply incorporating them together, we provide detailed ablations in Section E.3 to demonstrate the effectiveness:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 163, + 449, + 504, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 449, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 163, + 449, + 504, + 464 + ], + "type": "interline_equation", + "content": "\\mathcal {A} _ {m n, i j} = \\mathbf {h} _ {m, i} ^ {\\top} \\mathbf {W} _ {q} \\mathbf {R} _ {\\theta , i - j} \\mathbf {W} _ {k} ^ {\\top} \\mathbf {h} _ {n, j} + u \\cdot \\mathbb {1} (m = n) + v \\cdot \\mathbb {1} (m \\neq n), \\tag {6}", + "image_path": "f87f279cbbfb0a9447e9bb3cf369bd1d670da73fea8dcb5a4d3af7eb7c906e8a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "d_k" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": " is the dimension of the query, key, and value. " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{\\theta,t} \\in \\mathbb{R}^{d_k \\times d_k}" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": " is the rotary matrix with rotation degree " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "t \\cdot \\theta" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\mathbb{1}(\\cdot)" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": " is the indicator function, and " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "u, v \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": " are learnable parameters for the token to distinguish its endogenous and exogenous time series." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 512, + 505, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 505, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 505, + 568 + ], + "type": "text", + "content": "TimeAttention In contrast to variable-wise (Liu et al., 2023) and non-causal patch-wise tokens (Nie et al., 2022; Woo et al., 2024), our TimeAttention aims to capture causal patch-wise dependencies within and among all variables. Concretely, we sort patch tokens by flattening their 2D indices into 1D indices in the temporal-first manner, which is illustrated in the upper left of Figure 2. Note that the order of variables does not matter, since Equation 6 guarantees their permutation-equivalence." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "text", + "content": "We provide an intuitive example to illustrate the causal dependencies within multivariate time series: considering the 2nd token of time series A. To predict its next token, its representation h should be exactly dependent on the tokens-{1,2,4,5}. Similarly, we provide all causal dependencies of each token in Figure 12. Based on the visualized attention mask and variable dependencies presented in Figure 2, where all variables are inter-correlated, all token dependencies in " + }, + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "text", + "content": " can be formally disentangled by the Kronecker product into (1) the adjacency matrix of the variable dependency graph " + }, + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{C} \\in \\mathbb{R}^{N \\times N}" + }, + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "text", + "content": " and (2) the causal temporal mask " + }, + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{T} \\in \\mathbb{R}^{T \\times T}" + }, + { + "bbox": [ + 104, + 572, + 505, + 651 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 159, + 653, + 505, + 679 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 653, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 159, + 653, + 505, + 679 + ], + "type": "interline_equation", + "content": "\\mathcal {T} _ {i, j} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} j \\leq i, \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right. \\mathcal {C} _ {m, n} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f v a r i a b l e} m \\text {i s d e p e n d e n t o n} n, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {7}", + "image_path": "b749110b2952d42d5659acdd494b6b405921fefa1e40c5dede14ddde98a1b5e8.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "text", + "content": "Let the Kronecker product " + }, + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "inline_equation", + "content": "\\otimes : (\\mathbb{R}^{N \\times N}, \\mathbb{R}^{T \\times T}) \\mapsto \\mathbb{R}^{NT \\times NT}" + }, + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "text", + "content": " take two matrices and produce a block matrix. Consequently, TimeAttention is formulated as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 706, + 505, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 706, + 505, + 736 + ], + "spans": [ + { + "bbox": [ + 111, + 706, + 505, + 736 + ], + "type": "interline_equation", + "content": "\\operatorname {T i m e A t t e n t i o n} (\\mathbf {H}) = \\operatorname {S o f t m a x} \\left(\\frac {\\operatorname {M a s k} (\\mathcal {C} \\otimes \\mathcal {T}) + \\mathcal {A}}{\\sqrt {d _ {k}}}\\right) \\mathbf {H} \\mathbf {W} _ {v}, \\operatorname {M a s k} (\\mathcal {M}) = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\mathcal {M} _ {i, j} = 1, \\\\ - \\infty & \\text {i f} \\mathcal {M} _ {i, j} = 0. \\end{array} \\right. \\tag {8}", + "image_path": "a82a6d97fe6710def0d2980dd5e76d8d673634dd49968a558e214c1301d26a45.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 504, + 252 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 252 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 252 + ], + "type": "image", + "image_path": "5d7910511fbc43aba8665601afd2df9d133202af450ca9b630a9f2b1ffa9530a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 256, + 507, + 314 + ], + "lines": [ + { + "bbox": [ + 104, + 256, + 507, + 314 + ], + "spans": [ + { + "bbox": [ + 104, + 256, + 507, + 314 + ], + "type": "text", + "content": "Figure 2: Illustration of TimeAttention. For univariate series, temporal mask " + }, + { + "bbox": [ + 104, + 256, + 507, + 314 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 256, + 507, + 314 + ], + "type": "text", + "content": " keeps the causality. Given multivariate patch tokens sorted in a temporal-first order, we adopt the variable dependencies " + }, + { + "bbox": [ + 104, + 256, + 507, + 314 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 256, + 507, + 314 + ], + "type": "text", + "content": ", an all-one matrix, as the left-operand of Kronecker product, expanding temporal mask to a block matrix, which exactly reflects dependencies of multivariate next token prediction. The formulation is also generalizable to univariate and covariate-informed contexts with pre-defined variable dependency." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 327, + 504, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 504, + 351 + ], + "type": "text", + "content": "Eventually, token representations in " + }, + { + "bbox": [ + 104, + 327, + 504, + 351 + ], + "type": "inline_equation", + "content": "\\mathbf{H} = \\{\\mathbf{h}_{m,i}\\} \\in \\mathbb{R}^{NT\\times D}" + }, + { + "bbox": [ + 104, + 327, + 504, + 351 + ], + "type": "text", + "content": " will be independently processed by feed-forward network and layer normalization, and fed into the next Transformer block." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "text", + "content": "Unified Time Series Forecasting In multivariate forecasting, the variable dependency forms the complete graph, presenting an all-one matrix " + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "text", + "content": ". By generalizing TimeAttention on multiple sequences, Transformers can leverage its length-flexibility to encompass relevant covariates as well. In this case, Timer-XL is adapted in two steps: (1) formulate the customized variable dependency as " + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "text", + "content": " and (2) optimize the model using the supervision of target variables. An example (target-" + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "text", + "content": "-covariate-" + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 363, + 506, + 453 + ], + "type": "text", + "content": ") of TimeAttention is illustrated on the right of Figure 2. In a nutshell, we adopt position embeddings for the temporal and variable dimensions. To achieve unified time series forecasting, we flatten 2D time series into a unified context and capture fine-grained causal token dependencies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 468, + 201, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 201, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 201, + 480 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "type": "text", + "content": "We conduct evaluations of Timer-XL in three aspects, including (1) supervised training as a task-specific forecaster, (2) large-scale pre-training as a zero-shot forecaster, and (3) assessing the effectiveness of TimeAttention and model efficiency. Given that the long-context forecasting paradigm receives less attention in the community, which can be concealed due to the performance saturation on previous benchmarks (Makridakis et al., 2020; Wu et al., 2022), we established new long-context forecasting benchmarks. Detailed experimental configurations are provided in Appendix B." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 576, + 308, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 308, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 308, + 586 + ], + "type": "text", + "content": "4.1 UNIVARIATE TIME SERIES FORECASTING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 597, + 506, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 686 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 686 + ], + "type": "text", + "content": "**Setup** Due to the insufficient dataset length when extending contexts in univariate datasets (Makridakis et al., 2020), we adopt multivariate datasets from Liu et al. (2023). Although these datasets are originally multivariate, they aim to be predicted in a univariate approach with the implementation of channel independence. Different from the previous long-term forecasting setting, we focus on reliable prediction based on a long context. Therefore, we fix the prediction horizon and increase the lookback length to monthly and yearly levels. We also establish a long-context univariate benchmark based on the challenging 40-year ECMWF Reanalysis v5 dataset (Hersbach et al., 2020), where yearly contexts are adopted to predict the land-surface temperature of a single site (ERA5-S)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Results As shown in Figure 3, the accuracy of univariate prediction can generally be improved by extending the daily context to monthly. We draw a similar conclusion on ERA5 (Table 15), where extending the context consistently helps in the specific model architecture. Notably, Timer-XL with" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 306, + 135 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 306, + 135 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 306, + 135 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 306, + 135 + ], + "type": "image", + "image_path": "79fb7a53377762632a2d1083e3b63f76c73377f78a711bb58b721826bb32661e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 306, + 79, + 504, + 135 + ], + "blocks": [ + { + "bbox": [ + 306, + 79, + 504, + 135 + ], + "lines": [ + { + "bbox": [ + 306, + 79, + 504, + 135 + ], + "spans": [ + { + "bbox": [ + 306, + 79, + 504, + 135 + ], + "type": "image", + "image_path": "a9b005998cd76436a1c88c4b9d2a8d95352de906af3c71c720b28e89a67c1a9e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 137, + 306, + 194 + ], + "blocks": [ + { + "bbox": [ + 108, + 137, + 306, + 194 + ], + "lines": [ + { + "bbox": [ + 108, + 137, + 306, + 194 + ], + "spans": [ + { + "bbox": [ + 108, + 137, + 306, + 194 + ], + "type": "image", + "image_path": "d100c225ac9464f9239272523ac5c6506ac7f3812e369816cdc3d45165ebed62.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 199, + 506, + 222 + ], + "lines": [ + { + "bbox": [ + 104, + 199, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 506, + 222 + ], + "type": "text", + "content": "Figure 3: Univariate forecasting (pred-96) of well-acknowledged benchmarks under channel independence (Nie et al., 2022). We increase the lookback length to encompass monthly and yearly contexts." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 137, + 503, + 194 + ], + "blocks": [ + { + "bbox": [ + 307, + 137, + 503, + 194 + ], + "lines": [ + { + "bbox": [ + 307, + 137, + 503, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 137, + 503, + 194 + ], + "type": "image", + "image_path": "c92323559b403877fc1b283efc799de5a0b7712866bb329b6d210cb0b798ac4b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 233, + 506, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 233, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 506, + 321 + ], + "type": "text", + "content": "decoder-only architecture outperforms encoder-only Transformer and linear forecaster in excessively long contexts. Further, we conduct representation analysis in Appendix E.4, revealing that Timer-XL is proficient at adaptively selecting information in vast observations and thus achieves breakthrough performance. It is also noteworthy that the performance of monthly and yearly contexts improves slowly and deteriorates, which may stem from increased noise and training difficulty inherent in data, which leaves a future direction to improve the context efficiency. Table 2 provides results on ERA5-S. Timer-XL consistently outperforms PatchTST on all sites, which can be credited to the maintenance of causality and token-wise supervision in the decoder-only architecture." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 334, + 506, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 506, + 422 + ], + "type": "text", + "content": "Non-stationary Forecasting We delve into widespread non-stationarity in univariate tasks. It is commonly tackled by normalization (Kim et al., 2021) that greatly improves Transformer performance in previous benchmarks. However, we find it may be caused by the insufficient time span and training samples in these datasets. While normalization simplifies learning by aligning series with different means and variances to the same distribution, it limits the model capacity of Transformers, preventing them from learning variations among windows. The by-product can be mode collapse and oversmooth predictions. In Table 2 and Table 16, we evaluate the performance on ERA5 and datasets from Wu et al. (2022), which validates that Timer-XL can achieve better results even without instance normalization." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 108, + 475, + 503, + 555 + ], + "blocks": [ + { + "bbox": [ + 104, + 437, + 506, + 471 + ], + "lines": [ + { + "bbox": [ + 104, + 437, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 506, + 471 + ], + "type": "text", + "content": "Table 2: Univariate forecasting (input-3072-pred-96) of ERA5-S, encompassing 117k time points in each station (40-years). We evaluate PatchTST and Timer-XL with and without normalization (Kim et al., 2021). +Norm. indicates using the normalization. We train one model for each site separately." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 475, + 503, + 555 + ], + "lines": [ + { + "bbox": [ + 108, + 475, + 503, + 555 + ], + "spans": [ + { + "bbox": [ + 108, + 475, + 503, + 555 + ], + "type": "table", + "html": "
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTST0.07910.2210.1890.3270.2770.4150.1860.3340.2660.4070.09400.2380.1370.2890.1750.319
+ Norm.0.07970.2200.1910.3230.2810.4190.1840.3340.2720.4110.09140.2330.1360.2870.1760.319
Timer-XL0.07390.2100.1790.3160.2620.4040.1820.3270.2540.3990.09010.2290.1340.2820.1680.310
+ Norm.0.07420.2100.1830.3170.2780.4180.1810.3300.2640.4070.08960.2270.1330.2810.1720.313
", + "image_path": "67e09f08fcc44247e4817ba8accddf80c0f8503a9eae5f83da73f3b0fc50d4d5.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 576, + 320, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 320, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 320, + 586 + ], + "type": "text", + "content": "4.2 MULTIVARIATE TIME SERIES FORECASTING" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 597, + 506, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 675 + ], + "type": "text", + "content": "**Setup** We follow iTransformer (Liu et al., 2023) to evaluate multivariate forecasting performance. Toward a one-for-all forecaster, we evaluate performance of rolling forecast, that is, we trained one model for all prediction horizons by integrating the previous prediction into the lookback window in the next iteration. We further establish long-context multivariate forecasting benchmarks: ERA5 multi-station land-surface temperature prediction (ERA5-MS), and the global temperature and wind speed forecasting challenge (GTWSF) (Wu et al., 2023), to learn complex temporal dynamics and variable correlations with sufficient training samples." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Results As shown in Tables 3-4 and Figure 4, Timer-XL achieves the best results on both previous and new benchmarks. Essentially, Transformers that explicitly capture inter-series dependencies, such as UniTST (Liu et al., 2024a) and iTransformer, reasonably achieve decent performance in Table 3. Beyond iTransformer, Timer-XL can model fine-grained patch-wise temporal dependencies. With" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": "TimeAttention, Timer-XL outperforms Timer especially on high-dimensional time series (13.2% in ECL and 6.3% in Traffic, with thousands of tokens in the context). Compared with the encoder-only UniTST, decoder-only Transformers excel at generalizing across varying prediction lengths in Table 4." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 109, + 123, + 503, + 238 + ], + "blocks": [ + { + "bbox": [ + 109, + 123, + 503, + 238 + ], + "lines": [ + { + "bbox": [ + 109, + 123, + 503, + 238 + ], + "spans": [ + { + "bbox": [ + 109, + 123, + 503, + 238 + ], + "type": "image", + "image_path": "7d45ba03ee83b18b2f20d6f15575a4d2df1639ef3ffab172a100980176335e37.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 243, + 504, + 268 + ], + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 268 + ], + "type": "text", + "content": "Figure 4: Multivariate forecasting of GTWSF (2-day-pred-1-day), involving 3850 worldwide stations spanning two years. Results of the baseline models are officially reported by Ding et al. (2024)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 108, + 306, + 504, + 412 + ], + "blocks": [ + { + "bbox": [ + 104, + 280, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 104, + 280, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 280, + 504, + 304 + ], + "type": "text", + "content": "Table 3: Multivariate forecasting (96-pred-96) of well-acknowledged benchmarks. All models are trained from scratch. Results of baseline models are officially reported by Liu et al. (2023)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 306, + 504, + 412 + ], + "lines": [ + { + "bbox": [ + 108, + 306, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 108, + 306, + 504, + 412 + ], + "type": "table", + "html": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1380.2330.1590.2440.1390.2350.1480.2400.1970.2820.1810.2700.1680.2720.1690.2730.2010.317
ETTh10.3810.3990.3860.4010.3850.4020.3860.4050.3860.4000.4140.4190.3840.4020.5130.4910.4490.459
Traffic0.3870.2600.4130.2650.3890.2650.3950.2680.6500.3960.4620.2950.5930.3210.6120.3380.6130.388
Weather0.1650.2090.1760.2150.1650.2100.1740.2140.1960.2550.1770.2180.1720.2200.1730.2230.2660.336
Solar-Energy0.2000.2290.2040.2340.2030.2320.2030.2370.2900.3780.2340.2860.2500.2920.2150.2490.8840.711
", + "image_path": "9ab83abc4d6716d5cf2526e9ca5a34b057878018d220d3b1183925fb12fdfc7b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 108, + 458, + 503, + 564 + ], + "blocks": [ + { + "bbox": [ + 104, + 421, + 506, + 456 + ], + "lines": [ + { + "bbox": [ + 104, + 421, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 421, + 506, + 456 + ], + "type": "text", + "content": "Table 4: Multivariate forecasting (672-pred-{96, 192, 336, 720}) of well-acknowledged benchmarks. We evaluate one-for-all forecasters following Liu et al. (2024b): rolling forecasting for four forecast lengths with one model. Averaged results are reported here and full results are provided in Table 12." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 458, + 503, + 564 + ], + "lines": [ + { + "bbox": [ + 108, + 458, + 503, + 564 + ], + "spans": [ + { + "bbox": [ + 108, + 458, + 503, + 564 + ], + "type": "table", + "html": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ECL0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
ETTh10.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
Traffic0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
", + "image_path": "9392748096ddc1333b1f929d4867af98b1d21e6efbde5736e65cc4d816ed5949.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 576, + 506, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 506, + 666 + ], + "type": "text", + "content": "Ablation Study Patching (Nie et al., 2022) has been demonstrated as an effective tokenization approach for time series, leading to the boom of Transformers in supervised deep forecasters and large time series models. To better cope with multivariate time series forecasting, we compared typical models on real-world benchmarks to address key questions: (1) whether to conduct explicit inter-series modeling or not (channel independence) and (2) whether to use decoder-only or encoder-only Transformers. The combination presents four Transformers in Table 5, which shows that Timer-XL combines the advantages of explicit inter-series modeling and the decoder-only architecture, which is suitable for multivariate time series forecasting with sufficient training samples." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 678, + 354, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 354, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 354, + 690 + ], + "type": "text", + "content": "4.3 COVARIATE-INFORMED TIME SERIES FORECASTING" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "**Setup** For the covariate-informed forecasting, we adopt the well-acknowledged electricity price forecasting (EPF) task (Lago et al., 2021). Each subset contains electricity price as the endogenous variable and two exogenous variables. Therefore, the variable dependency for Timer-XL is formulated" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 133, + 504, + 220 + ], + "blocks": [ + { + "bbox": [ + 104, + 83, + 504, + 129 + ], + "lines": [ + { + "bbox": [ + 104, + 83, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 504, + 129 + ], + "type": "text", + "content": "Table 5: Multivariate forecasting (input-3072-pred-96) of ERA5-MS (40 years and 7 stations). We fairly evaluate Transformers that adopt patched time series. CI. indicates whether the Transformer uses channel independence (Nie et al., 2022). Arch. categorizes them into the encoder-only (E) and decoder-only (D) architectures. Different from ERA5-S in Table 2, we train one model for all sites." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 133, + 504, + 220 + ], + "lines": [ + { + "bbox": [ + 108, + 133, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 108, + 133, + 504, + 220 + ], + "type": "table", + "html": "
StationBeijingHongkongLondonNew YorkParisSeoulShanghaiAverage
ModelCI.Arch.MSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
PatchTSTYesE0.08150.2220.1900.3260.2750.4140.1850.3330.2650.4070.09770.2400.1390.2900.1760.319
UniTSTNoE0.07530.2130.1790.3180.2690.4100.1850.3300.2560.4010.09010.2300.1350.2840.1700.312
TimerYesD0.07340.2100.1820.3190.2680.4070.1830.3290.2550.3990.08770.2260.1320.2810.1690.310
Timer-XLNoD0.07360.2090.1740.3090.2630.4040.1820.3270.2520.3960.08720.2250.1300.2780.1660.307
", + "image_path": "d222f793e029a06ec91e2b4412e138e04eed2191dab3f4edefb70c5797760874.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "text", + "content": "as " + }, + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{C} = [[1,1,1],[0,1,0],[0,0,1]]" + }, + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "text", + "content": ". To investigate whether to learn causal or noncausal patch-wise dependencies in covariates, we implement two versions of Timer-XL: the original one with temporal causal mask " + }, + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "text", + "content": ", and the noncausal one with " + }, + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 228, + 504, + 264 + ], + "type": "text", + "content": " replaced by an all-one matrix." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 274, + 506, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 342 + ], + "type": "text", + "content": "Results As shown in Table 6, Timer-XL outperforms state-of-the-art models in covariate-informed tasks. Compared with TimeXer (Wang et al., 2024b), which treats an entire covariate as a token, Timer-XL learns fine-grained patch-wise dependencies. By the noncausal version of Timer-XL, we surprisingly find consistent conclusions with endogenous variables: results will be better if Timer-XL learns causal dependencies within exogenous variables. It again validates that next token prediction that maintains causality has a higher upper limit of performance." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 108, + 388, + 503, + 510 + ], + "blocks": [ + { + "bbox": [ + 104, + 350, + 506, + 385 + ], + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 385 + ], + "type": "text", + "content": "Table 6: Covariate-informed forecasting (168-pred-24) of EPF. We implement two versions of TimerXL: Noncausal indicates that we do not maintain the causality within covariates by replacing temporal causal mask with all-one matrix. Results of baselines are officially reported by Wang et al. (2024b)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 388, + 503, + 510 + ], + "lines": [ + { + "bbox": [ + 108, + 388, + 503, + 510 + ], + "spans": [ + { + "bbox": [ + 108, + 388, + 503, + 510 + ], + "type": "table", + "html": "
ModelsTimer-XL (Ours)Timer-XL (Noncausal)TimeXer (2024b)iTransformer (2023)DLinear (2023)PatchTST (2022)Crossformer (2022)TimesNet (2022)Autoformer (2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
NP0.2340.2620.2370.2650.2380.2680.2650.3000.3090.3210.2670.2840.2450.2890.2500.2890.4020.398
PJM0.0890.1870.0920.1880.0880.1880.0970.1970.1080.2150.1060.2090.1490.1980.0970.1950.1680.267
BE0.3710.2430.4100.2790.3790.2430.3940.2700.4630.3130.4030.2640.4360.2940.4190.2880.5000.333
FR0.3810.2040.4060.2200.3840.2080.4390.2330.4290.2600.4110.2200.4400.2160.4310.2340.5190.295
DE0.4340.4150.4350.4150.4400.4180.4790.4430.5200.4630.4610.4320.5400.4230.5020.4460.6740.544
Average0.3020.2620.3160.2730.3060.2650.3350.2890.3660.3140.3300.2820.3620.2840.3400.2900.4530.368
", + "image_path": "04c943f1b98e55267a342571b7185f18ea5f6aa7e389f37d4635b3ad9e249b59.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 523, + 326, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 523, + 326, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 326, + 534 + ], + "type": "text", + "content": "4.4 PRE-TRAINED TIME-SERIES TRANSFORMERS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "text", + "content": "**Setup** Pre-training enriches time-series Transformers with generalizable forecasting capabilities. The outcome large time series model can cope with widespread challenges of few-shot and zero-shot forecasting. In this section, we conduct univariate pre-training on UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024) and evaluate zero-shot performance on benchmarks from Wu et al. (2022). We further conduct large-scale multivariate pre-training on our ERA5-Large dataset, which spans 40 years and encompasses 4920 stations. Subsequently, we evaluate three types of generalization results comparing PatchTST (encoder-only Transformer) and Timer-XL (decoder-only Transformer): pre-training on " + }, + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "text", + "content": " stations and " + }, + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 544, + 506, + 677 + ], + "type": "text", + "content": " time span and then forecast on the remaining stations (variable generalization), remaining time span (temporal generalization), and remaining split of time span and stations (variable and temporal generalization). To evaluate the benefit of pre-training with longer context, we compare the zero-shot performance of Timer (2024c) and Timer-XL, where the context length of pre-training is increased from 1440 to 2880." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Results We compare generalization performance on ERA5-Large in the middle of Figure 5 (a). Timer-XL achieves better results than PatchTST in all cases, revealing that decoder-only architecture has stronger generalization capability. Figure 5 (b) compares zero-shot performance of two pretrained Transformers with different context lengths, where Timer-XL outperforms previous Timer on" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": "all benchmark datasets, validating that long-context pre-training enhances large time series models. In Table 7, we provide a comprehensive zero-shot evaluation under a comparable pre-training scale and model size, where Timer-XL achieves notable performance with better sample efficiency. The versatility and scalability make it a promising backbone of foundation models." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 111, + 141, + 253, + 247 + ], + "blocks": [ + { + "bbox": [ + 111, + 141, + 253, + 247 + ], + "lines": [ + { + "bbox": [ + 111, + 141, + 253, + 247 + ], + "spans": [ + { + "bbox": [ + 111, + 141, + 253, + 247 + ], + "type": "image", + "image_path": "f809df11e6c77a504c9d6611bd294faa583499ade1ad86527e9586aeba9856e7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 255, + 506, + 301 + ], + "lines": [ + { + "bbox": [ + 104, + 255, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 506, + 301 + ], + "type": "text", + "content": "Figure 5: Illustration of one-for-all generalization (left). Based on the contextual flexibility, Timer-XL can predict heterogeneous time series, indicating three directions of generalization shown on the left. We compare performance when generalizing across the time and variables (middle), and zero-shot results across datasets (right), emphasizing the benefit of long-context pre-training." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 254, + 141, + 364, + 249 + ], + "blocks": [ + { + "bbox": [ + 254, + 141, + 364, + 249 + ], + "lines": [ + { + "bbox": [ + 254, + 141, + 364, + 249 + ], + "spans": [ + { + "bbox": [ + 254, + 141, + 364, + 249 + ], + "type": "image", + "image_path": "9bf8e911d60cef9234044b26c6892c245b20cac153d33c0d445242aa07ca1ca9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 365, + 141, + 499, + 249 + ], + "blocks": [ + { + "bbox": [ + 365, + 141, + 499, + 249 + ], + "lines": [ + { + "bbox": [ + 365, + 141, + 499, + 249 + ], + "spans": [ + { + "bbox": [ + 365, + 141, + 499, + 249 + ], + "type": "image", + "image_path": "8d040fb43b89d37fe62e4580b2b8519c97cb6723bc57bc269df004eaa12a00a8.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 108, + 366, + 504, + 482 + ], + "blocks": [ + { + "bbox": [ + 104, + 314, + 506, + 361 + ], + "lines": [ + { + "bbox": [ + 104, + 314, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 506, + 361 + ], + "type": "text", + "content": "Table 7: Averaged results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. Corresponding prediction lengths include " + }, + { + "bbox": [ + 104, + 314, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\{96,192,336,720\\}" + }, + { + "bbox": [ + 104, + 314, + 506, + 361 + ], + "type": "text", + "content": ". Full results of all prediction lengths are provided in Table 13. " + }, + { + "bbox": [ + 104, + 314, + 506, + 361 + ], + "type": "inline_equation", + "content": "1^{\\text{st}}" + }, + { + "bbox": [ + 104, + 314, + 506, + 361 + ], + "type": "text", + "content": " Count represents the number of wins achieved by a model under all prediction lengths and datasets. The detailed configuration of Timer-XLBase is provided in Table 11." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 366, + 504, + 482 + ], + "lines": [ + { + "bbox": [ + 108, + 366, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 108, + 366, + 504, + 482 + ], + "type": "table", + "html": "
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoEUltra(2024)MoiraiSmall(2024)MoiraiBase(2024)MoiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm10.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm20.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh10.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh20.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL0.1740.278------0.2180.3030.1870.2740.1860.270--0.7650.6860.2140.2780.2040.273
Weather0.2560.2940.2650.2970.2700.3000.2560.2880.2750.2860.2870.2810.2640.273--0.2940.3260.2920.3150.2790.306
\\( 1^{st} \\) Count15102130107000511001200002
", + "image_path": "e633e516bc4779be5567e3700747af241a230c792a27c7811d8fe1bd3fe08b09.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 120, + 483, + 441, + 492 + ], + "lines": [ + { + "bbox": [ + 120, + 483, + 441, + 492 + ], + "spans": [ + { + "bbox": [ + 120, + 483, + 441, + 492 + ], + "type": "text", + "content": "* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 121, + 492, + 465, + 501 + ], + "lines": [ + { + "bbox": [ + 121, + 492, + 465, + 501 + ], + "spans": [ + { + "bbox": [ + 121, + 492, + 465, + 501 + ], + "type": "text", + "content": "* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 121, + 501, + 392, + 510 + ], + "lines": [ + { + "bbox": [ + 121, + 501, + 392, + 510 + ], + "spans": [ + { + "bbox": [ + 121, + 501, + 392, + 510 + ], + "type": "text", + "content": "* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 529, + 211, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 211, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 211, + 540 + ], + "type": "text", + "content": "4.5 MODEL ANALYSIS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "text", + "content": "Model Efficiency To evaluate the model efficiency of Timer-XL with respect to the context length, it is essential to recognize the distinct characteristics of time series data compared to 1D sequences. Unlike natural language, the time series modality is characterized by the variable number " + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "text", + "content": " and the input length. We adopt two representative multivariate datasets with different " + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "text", + "content": ", and provide the memory footprint and training speed under gradually prolonged input. We evaluate typical approaches to handle multivariate series: (1) Timer-XL and Moiria that adopt channel dependence; (2) Timer that adopts channel independence. Intuitively, the complexity of the first type is " + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(N^2 T^2)" + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "text", + "content": " while the complexity of self-attention under channel independence is " + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(NT^2)" + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "text", + "content": ". However, results shown in Figure 6 reveal that measured overheads of Timer-XL is much less than " + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 550, + 506, + 649 + ], + "type": "text", + "content": " times of Timer." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "Since the previous analysis of model efficiency on time-series Transformer predominantly focuses on self-attention on 1D time series, we initially present a theoretical derivation of the computational complexity of Transformers on 2D time series, including the parameter counts, memory footprint, and FLOPs in Table 8. We find that other parts of Transformers, such as feed-forward network, have a complexity of " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(NT)" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " no matter which approach is adopted to handle multivariate time series. They also account for dominant overheads in existing benchmarks since the context length is not large enough, confirming our empirical results. Further, we introduce FlashAttention (Dao et al., 2022) to" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "improve the model efficiency, which is computationally equivalent and reduces the overall memory footprint of Timer-XL to " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(NT)" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " without affecting performance." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 114, + 120, + 306, + 186 + ], + "blocks": [ + { + "bbox": [ + 180, + 112, + 246, + 120 + ], + "lines": [ + { + "bbox": [ + 180, + 112, + 246, + 120 + ], + "spans": [ + { + "bbox": [ + 180, + 112, + 246, + 120 + ], + "type": "text", + "content": "Weather (21 Variables)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 114, + 120, + 306, + 186 + ], + "lines": [ + { + "bbox": [ + 114, + 120, + 306, + 186 + ], + "spans": [ + { + "bbox": [ + 114, + 120, + 306, + 186 + ], + "type": "image", + "image_path": "553584e7183e72b2a768d3fda64cf4138bca7bc170d557ad9865987ffb71461b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 120, + 500, + 186 + ], + "blocks": [ + { + "bbox": [ + 373, + 112, + 440, + 120 + ], + "lines": [ + { + "bbox": [ + 373, + 112, + 440, + 120 + ], + "spans": [ + { + "bbox": [ + 373, + 112, + 440, + 120 + ], + "type": "text", + "content": "Weather (21 Variables)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 120, + 500, + 186 + ], + "lines": [ + { + "bbox": [ + 309, + 120, + 500, + 186 + ], + "spans": [ + { + "bbox": [ + 309, + 120, + 500, + 186 + ], + "type": "image", + "image_path": "4a2d17d81021b75adf6eab2fc8be89ebd6b9c6f957e6ade84aeff1196f10832b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 111, + 196, + 306, + 262 + ], + "blocks": [ + { + "bbox": [ + 187, + 188, + 246, + 196 + ], + "lines": [ + { + "bbox": [ + 187, + 188, + 246, + 196 + ], + "spans": [ + { + "bbox": [ + 187, + 188, + 246, + 196 + ], + "type": "text", + "content": "ECL (321 Variables)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 111, + 196, + 306, + 262 + ], + "lines": [ + { + "bbox": [ + 111, + 196, + 306, + 262 + ], + "spans": [ + { + "bbox": [ + 111, + 196, + 306, + 262 + ], + "type": "image", + "image_path": "5ef83dc7f21995e29764acb949b98abb763fe9ee4fd5749b588be0ac111d733f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 265, + 504, + 288 + ], + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 288 + ], + "type": "text", + "content": "Figure 6: Efficiency analysis. We compare representative time-series Transformers on multivariate datasets with variable numbers ranging from ten to hundred and increase the lookback length." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 196, + 500, + 262 + ], + "blocks": [ + { + "bbox": [ + 380, + 188, + 440, + 196 + ], + "lines": [ + { + "bbox": [ + 380, + 188, + 440, + 196 + ], + "spans": [ + { + "bbox": [ + 380, + 188, + 440, + 196 + ], + "type": "text", + "content": "ECL (321 Variables)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 196, + 500, + 262 + ], + "lines": [ + { + "bbox": [ + 310, + 196, + 500, + 262 + ], + "spans": [ + { + "bbox": [ + 310, + 196, + 500, + 262 + ], + "type": "image", + "image_path": "8ede19e9b3c61c84a7349767d6ba8a18094a3bb91cf8ce891ea014c63de2403a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 109, + 303, + 284, + 437 + ], + "blocks": [ + { + "bbox": [ + 166, + 296, + 219, + 303 + ], + "lines": [ + { + "bbox": [ + 166, + 296, + 219, + 303 + ], + "spans": [ + { + "bbox": [ + 166, + 296, + 219, + 303 + ], + "type": "text", + "content": "Learned Attention" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 303, + 284, + 437 + ], + "lines": [ + { + "bbox": [ + 109, + 303, + 284, + 437 + ], + "spans": [ + { + "bbox": [ + 109, + 303, + 284, + 437 + ], + "type": "image", + "image_path": "80e46e2ca1351ea7849005b58e526640e11d4b5e879f7a64bde7ae578895fec0.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 441, + 506, + 486 + ], + "lines": [ + { + "bbox": [ + 104, + 441, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 506, + 486 + ], + "type": "text", + "content": "Figure 7: Visualization of TimeAttention. It is from the first sample of a length 672 in the test split of Traffic. We visualize the last 10 variables with each contains 7 tokens. We present auto-correlation function plot. Auto-correlation can be reflected by the distribution of attention scores (bottom right). We average TimeAttention across sub-blocks, which indicates Pearson correlations (upper right)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 284, + 295, + 503, + 369 + ], + "blocks": [ + { + "bbox": [ + 284, + 295, + 503, + 369 + ], + "lines": [ + { + "bbox": [ + 284, + 295, + 503, + 369 + ], + "spans": [ + { + "bbox": [ + 284, + 295, + 503, + 369 + ], + "type": "image", + "image_path": "3a06aff90573f0a821016699aae13f8670884469cca930d8e1961cdd607296d9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 296, + 378, + 501, + 437 + ], + "blocks": [ + { + "bbox": [ + 301, + 371, + 347, + 379 + ], + "lines": [ + { + "bbox": [ + 301, + 371, + 347, + 379 + ], + "spans": [ + { + "bbox": [ + 301, + 371, + 347, + 379 + ], + "type": "text", + "content": "Sub-Block(3, 3)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 296, + 378, + 501, + 437 + ], + "lines": [ + { + "bbox": [ + 296, + 378, + 501, + 437 + ], + "spans": [ + { + "bbox": [ + 296, + 378, + 501, + 437 + ], + "type": "image", + "image_path": "06321afc1ca0e3eb0b4cc245978620970bd5a8d172f6f7db5b451bbe63abb12b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 494, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 506, + 594 + ], + "type": "text", + "content": "Representation Analysis In addition to the enhanced performance, fine-grained token dependencies offer improved interpretability. We present a showcase visualization from Traffic in Figure 7. It is observed that sub-matrices along the diagonal generally receive greater attention, which reasonably reveals predominant dependencies within the endogenous variable. By zooming in a sub-block that corresponds to Variable-3, we observe that the attention distribution of the last row can indicate certain strong dependencies among patch tokens. This observation is also supported by the auto-correlation function plot (ACF), which reveals auto-correlations with certain lags and thus the model pays special attention to these tokens. Furthermore, we average each sub-matrix into one scalar. The outcome matrix can also illustrate Pearson correlations presented in the raw data." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 609, + 303, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 303, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 303, + 620 + ], + "type": "text", + "content": "5 CONCLUSION AND FUTURE WORK" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 633, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 506, + 733 + ], + "type": "text", + "content": "In this paper, we emphasize the efficacy of causal Transformers in the forecasting of long-context time series. To facilitate long-context Transformers on diverse tasks, we propose multivariate next token prediction, a novel paradigm to predict multidimensional series with covariates. We present Timer-XL enhanced by TimeAttention as an extra-long version of pre-trained time-series Transformers. It simultaneously captures temporal dynamics and variable correlations by enhanced self-attention. In addition to achieving state-of-the-art performance on extensive benchmarks, we establish challenging benchmarks for long-context forecasting. By pre-training on large-scale heterogeneous time series, Timer-XL demonstrates notable zero-shot performance as a large time-series model. In the future, we will improve computational efficiency and build large domain-specific models with Timer-XL." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 218, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 218, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 218, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 129 + ], + "type": "text", + "content": "This work was supported by the National Natural Science Foundation of China (U2342217 and 62021002), the BNRist Project, and the National Engineering Research Center for Big Data Software." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 145, + 176, + 157 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 145, + 176, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 176, + 157 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 163, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 163, + 507, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 507, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 507, + 198 + ], + "type": "text", + "content": "Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 205, + 507, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 507, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 507, + 228 + ], + "type": "text", + "content": "Yoshua Bengio, Réjean Ducharme, and Pascal Vincent. A neural probabilistic language model. Advances in neural information processing systems, 13, 2000." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 236, + 507, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 507, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 507, + 270 + ], + "type": "text", + "content": "George Box. Box and jenkins: time series analysis, forecasting and control. In A Very British Affair: Six Britons and the Development of Time Series Analysis During the 20th Century, pp. 161-215. Springer, 2013." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 277, + 507, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 507, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 507, + 311 + ], + "type": "text", + "content": "Defu Cao, Yujing Wang, Juanyong Duan, Ce Zhang, Xia Zhu, Congrui Huang, Yunhai Tong, Bixiong Xu, Jing Bai, Jie Tong, et al. Spectral temporal graph neural network for multivariate time-series forecasting. Advances in neural information processing systems, 33:17766-17778, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 318, + 507, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 318, + 507, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 507, + 351 + ], + "type": "text", + "content": "Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems, 35:16344-16359, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 358, + 507, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 507, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 507, + 383 + ], + "type": "text", + "content": "Abhimanyu Das, Weihao Kong, Rajat Sen, and Yichen Zhou. A decoder-only foundation model for time-series forecasting. arXiv preprint arXiv:2310.10688, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 389, + 507, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 507, + 434 + ], + "type": "text", + "content": "Xiaohan Ding, Yiyuan Zhang, Yixiao Ge, Sijie Zhao, Lin Song, Xiangyu Yue, and Ying Shan. Unireplknet: A universal perception large-kernel convnet for audio video point cloud time-series and image recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5513-5524, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 441, + 507, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 507, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 507, + 475 + ], + "type": "text", + "content": "Mononito Goswami, Konrad Szafer, Arjun Choudhry, Yifu Cai, Shuo Li, and Artur Dubrawski. Moment: A family of open time-series foundation models. arXiv preprint arXiv:2402.03885, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 483, + 507, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 507, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 507, + 517 + ], + "type": "text", + "content": "Hans Hersbach, Bill Bell, Paul Berrisford, Shoji Hirahara, András Horányi, Joaquín Muñoz-Sabater, Julien Nicolas, Carole Peubey, Raluca Radu, Dinand Schepers, et al. The era5 global reanalysis. Quarterly Journal of the Royal Meteorological Society, 146(730):1999-2049, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 524, + 373, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 524, + 373, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 373, + 537 + ], + "type": "text", + "content": "RJ Hyndman. Forecasting: principles and practice. OTexts, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 544, + 507, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 507, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 507, + 578 + ], + "type": "text", + "content": "Taesung Kim, Jinhee Kim, Yunwon Tae, Cheonbok Park, Jang-Ho Choi, and Jaegul Choo. Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 585, + 507, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 507, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 507, + 608 + ], + "type": "text", + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 615, + 507, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 615, + 507, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 507, + 650 + ], + "type": "text", + "content": "Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 657, + 507, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 507, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 507, + 691 + ], + "type": "text", + "content": "Jesus Lago, Grzegorz Marcjasz, Bart De Schutter, and Rafal Weron. Forecasting day-ahead electricity prices: A review of state-of-the-art algorithms, best practices and an open-access benchmark. Applied Energy, 293:116983, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 698, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 507, + 732 + ], + "type": "text", + "content": "Guokun Lai, Wei-Cheng Chang, Yiming Yang, and Hanxiao Liu. Modeling long-and short-term temporal patterns with deep neural networks. In The 41st international ACM SIGIR conference on research & development in information retrieval, pp. 95-104, 2018." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Shiyang Li, Xiaoyong Jin, Yao Xuan, Xiyou Zhou, Wenhu Chen, Yu-Xiang Wang, and Xifeng Yan. Enhancing the locality and breaking the memory bottleneck of transformer on time series forecasting. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "type": "text", + "content": "Bryan Lim, Sercan Ö Arık, Nicolas Loeff, and Tomas Pfister. Temporal fusion transformers for interpretable multi-horizon time series forecasting. International Journal of Forecasting, 37(4): 1748-1764, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 167, + 504, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 167, + 504, + 202 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 504, + 202 + ], + "type": "text", + "content": "Juncheng Liu, Chenghao Liu, Gerald Woo, Yiwei Wang, Bryan Hooi, Caiming Xiong, and Doyen Sahoo. Unitst: Effectively modeling inter-series and intra-series dependencies for multivariate time series forecasting. arXiv preprint arXiv:2406.04975, 2024a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 209, + 506, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 209, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 506, + 244 + ], + "type": "text", + "content": "Minhao Liu, Ailing Zeng, Muxi Chen, Zhijian Xu, Qiuxia Lai, Lingna Ma, and Qiang Xu. Scinet: Time series modeling and forecasting with sample convolution and interaction. Advances in Neural Information Processing Systems, 35:5816-5828, 2022a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 252, + 506, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 252, + 506, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 287 + ], + "type": "text", + "content": "Shizhan Liu, Hang Yu, Cong Liao, Jianguo Li, Weiyao Lin, Alex X Liu, and Schahram Dustar. Pyraformer: Low-complexity pyramidal attention for long-range time series modeling and forecasting. In International conference on learning representations, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 295, + 506, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 295, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 506, + 328 + ], + "type": "text", + "content": "Yong Liu, Haixu Wu, Jianmin Wang, and Mingsheng Long. Non-stationary transformers: Exploring the stationarity in time series forecasting. Advances in Neural Information Processing Systems, 35: 9881-9893, 2022b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 337, + 506, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 337, + 506, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 506, + 371 + ], + "type": "text", + "content": "Yong Liu, Tengge Hu, Haoran Zhang, Haixu Wu, Shiyu Wang, Lintao Ma, and Mingsheng Long. itransformer: Inverted transformers are effective for time series forecasting. arXiv preprint arXiv:2310.06625, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 380, + 506, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 506, + 414 + ], + "type": "text", + "content": "Yong Liu, Guo Qin, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Autotimes: Autoregressive time series forecasters via large language models. arXiv preprint arXiv:2402.02370, 2024b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 422, + 506, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 506, + 457 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 506, + 457 + ], + "type": "text", + "content": "Yong Liu, Haoran Zhang, Chenyu Li, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. Timer: Generative pre-trained transformers are large time series models. In *Forty-first International Conference on Machine Learning*, 2024c." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 465, + 506, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 465, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 506, + 489 + ], + "type": "text", + "content": "Spyros Makridakis, Evangelos Spiliotis, and Vassilios Assimakopoulos. The m4 competition: 100,000 time series and 61 forecasting methods. International Journal of Forecasting, 36(1):54-74, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 497, + 504, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 497, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 504, + 521 + ], + "type": "text", + "content": "Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. A time series is worth 64 words: Long-term forecasting with transformers. arXiv preprint arXiv:2211.14730, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 529, + 434, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 434, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 434, + 541 + ], + "type": "text", + "content": "R OpenAI. Gpt-4 technical report. arxiv 2303.08774. View in Article, 2:13, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 548, + 506, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 506, + 582 + ], + "type": "text", + "content": "Boris N Oreshkin, Dmitri Carpov, Nicolas Chapados, and Yoshua Bengio. N-beats: Neural basis expansion analysis for interpretable time series forecasting. arXiv preprint arXiv:1905.10437, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 591, + 506, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 591, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 506, + 636 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 646, + 345, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 646, + 345, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 345, + 658 + ], + "type": "text", + "content": "PEMS. Traffic Dataset. http://pems.dot.ca.gov/." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 666, + 504, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 504, + 690 + ], + "type": "text", + "content": "Ofir Press, Noah A Smith, and Mike Lewis. Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551, 2020." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "text", + "content": "Kashif Rasul, Arjun Ashok, Andrew Robert Williams, Arian Khorasani, George Adamopoulos, Rishika Bhagwatkar, Marin Biloš, Hera Ghonia, Nadhir Vincent Hassen, Anderson Schneider, et al. Lag-llama: Towards foundation models for time series forecasting. arXiv preprint arXiv:2310.08278, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 168 + ], + "type": "text", + "content": "David Salinas, Valentin Flunkert, Jan Gasthaus, and Tim Januschowski. Deeper: Probabilistic forecasting with autoregressive recurrent networks. International journal of forecasting, 36(3): 1181-1191, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 175, + 506, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 506, + 210 + ], + "type": "text", + "content": "Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. Time-moe: Billion-scale time series foundation models with mixture of experts. arXiv preprint arXiv:2409.16040, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 216, + 504, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 216, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 504, + 242 + ], + "type": "text", + "content": "Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 246, + 504, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 246, + 504, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 504, + 272 + ], + "type": "text", + "content": "Huihui Sun and Xiaofeng Zhang. Study on coded permutation entropy of finite length gaussian white noise time series. Chinese Journal of Electronics, 33(1):185-194, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 277, + 504, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 504, + 312 + ], + "type": "text", + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 318, + 504, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 318, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 504, + 353 + ], + "type": "text", + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 359, + 504, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 359, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 359, + 504, + 395 + ], + "type": "text", + "content": "Xindi Wang, Mahsa Salmani, Parsa Omidi, Xiangyu Ren, Mehdi Rezagholizadeh, and Armaghan Eshaghi. Beyond the limits: A survey of techniques to extend the context length in large language models. arXiv preprint arXiv:2402.02244, 2024a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 400, + 506, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 400, + 506, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 506, + 436 + ], + "type": "text", + "content": "Yuxuan Wang, Haixu Wu, Jiaxiang Dong, Yong Liu, Yunzhong Qiu, Haoran Zhang, Jianmin Wang, and Mingsheng Long. Timexer: Empowering transformers for time series forecasting with exogenous variables. arXiv preprint arXiv:2402.19072, 2024b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 441, + 506, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 506, + 476 + ], + "type": "text", + "content": "Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Unified training of universal time series forecasting transformers. arXiv preprint arXiv:2402.02592, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 483, + 504, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 504, + 518 + ], + "type": "text", + "content": "Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. Advances in Neural Information Processing Systems, 34:22419-22430, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 525, + 506, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 506, + 558 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 506, + 558 + ], + "type": "text", + "content": "Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. Timesnet: Temporal 2d-variation modeling for general time series analysis. arXiv preprint arXiv:2210.02186, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 565, + 506, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 565, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 506, + 590 + ], + "type": "text", + "content": "Haixu Wu, Hang Zhou, Mingsheng Long, and Jianmin Wang. Interpretable weather forecasting for worldwide stations with a unified deep model. Nature Machine Intelligence, 5(6):602-611, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 596, + 504, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 504, + 621 + ], + "type": "text", + "content": "Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 626, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 626, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 504, + 651 + ], + "type": "text", + "content": "Manzil Zaheer, Satwik Kottur, Siamak Ravanbakhsh, Barnabas Poczos, Russ R Salakhutdinov, and Alexander J Smola. Deep sets. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "type": "text", + "content": "Ailing Zeng, Muxi Chen, Lei Zhang, and Qiang Xu. Are transformers effective for time series forecasting? In Proceedings of the AAAI conference on artificial intelligence, volume 37, pp. 11121-11128, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 697, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 504, + 733 + ], + "type": "text", + "content": "Yunhao Zhang and Junchi Yan. Crossformer: Transformer utilizing cross-dimension dependency for multivariate time series forecasting. In The Eleventh International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 506, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 506, + 117 + ], + "type": "text", + "content": "Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. A survey of large language models. arXiv preprint arXiv:2303.18223, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 506, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 506, + 157 + ], + "type": "text", + "content": "Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang. Informer: Beyond efficient transformer for long sequence time-series forecasting. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 11106-11115, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 175, + 288, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 288, + 188 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 288, + 188 + ], + "type": "text", + "content": "A PROOF OF MODEL EFFICIENCY" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 200, + 167, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 200, + 167, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 200, + 167, + 211 + ], + "type": "text", + "content": "A.1 SETUPS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": "Given an input univariate time series divided into " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": " tokens according to the patch size " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": ", which is fed into the vanilla Transformer. The training objective is to predict the next token of " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": " time points. We will generalize the derivation from 1D sequences to 2D time series based on different approaches to handle multivariate data with the variable number " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": ". We adopt the same denotations as before: Transformer consists of " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": " blocks with model dimension " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": ". The multi-head attention mechanism has " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": " heads, each with a dimension of " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": " for query, key, and value, and " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "d_{k} = \\frac{D}{H}" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": ". The intermediate dimension of feed-forward network is set as " + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{ff}} = \\alpha D" + }, + { + "bbox": [ + 104, + 220, + 506, + 310 + ], + "type": "text", + "content": ". The results are summarized in Table 8, we provide the detailed proof in the following sections." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 106, + 342, + 504, + 418 + ], + "blocks": [ + { + "bbox": [ + 105, + 327, + 506, + 339 + ], + "lines": [ + { + "bbox": [ + 105, + 327, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 506, + 339 + ], + "type": "text", + "content": "Table 8: Parameters count and computational complexity of Transformers for multivariate time series." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 342, + 504, + 418 + ], + "lines": [ + { + "bbox": [ + 106, + 342, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 342, + 504, + 418 + ], + "type": "table", + "html": "
MetricTypeCountComplexity
FLOPs(Training Speed)Channel Independence12(PDNT + L(D + H)NT2 + (2 + α)LD2NT)O(LDNT(D + T))
Channel Dependence12(PDNT + L(D + H)N2T2 + (2 + α)LD2NT)O(LDNT(D + NT))
ParametersEncoder-Only(4 + 2α)LD2 + 4LD + (1 + T)PDO(LD2)
Decoder-Only(4 + 2α)LD2 + 4LD + 2PDO(LD2)
Memory FootprintSelf-Attention4(D + P)NT + (32 + 8α)LDNT + 4LHN2T2O(LHN2T2)
FlashAttention4(D + P)NT + (32 + 8α)LDNTO(LDNT)
", + "image_path": "aee443b20369e87818db55b6a02afcebd785e671a0ec2689101915dc48c905af.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "lines": [ + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": "* " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": " is the block number of Transformers. " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": " is the dimension of embeddings (the hidden dimension of FFN " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{ff}}" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": " is set as " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "\\alpha D" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": "). " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": " is the head number and the dimension of query, key, and value " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "d_k = D / H" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": ". The overhead is to train on a multivariate time series (" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": "-variables and " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "TP" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": " time points) with patch token length " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": " and context length " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": ". Set " + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "inline_equation", + "content": "N = 1" + }, + { + "bbox": [ + 115, + 419, + 505, + 445 + ], + "type": "text", + "content": " for training on univariate time series." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 458, + 167, + 469 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 458, + 167, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 167, + 469 + ], + "type": "text", + "content": "A.2 FLOPs" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": "As a preliminary, the multiplication between matrix " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "\\mathbf{A} \\in \\mathbb{R}^{n \\times m}" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " and matrix " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "\\mathbf{C} \\in \\mathbb{R}^{m \\times p}" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " requires " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "mnp" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " multiplications and " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "mnp" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " additions, resulting in " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "2mnp" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " floating-point operations. Given batched matrices " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "\\mathbf{A} \\in \\mathbb{R}^{B \\times n \\times m}" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "\\mathbf{C} \\in \\mathbb{R}^{B \\times m \\times p}" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " times matrix multiplications will be performed. It is evident that the batch size is a linear multiplier. Thus, we first omit " + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": " to calculate the operations of dealing with one univariate series, and then we will reintroduce it to analyze channel independence." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 539, + 506, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 506, + 596 + ], + "type": "text", + "content": "The computational cost of Transformers can be primarily categorized into two types: (1) multi-head attention calculation and (2) linear transformations. In contrast, the operations of layer normalization, residual connection, activation functions, and position embedding with the complexity of " + }, + { + "bbox": [ + 104, + 539, + 506, + 596 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(TD)" + }, + { + "bbox": [ + 104, + 539, + 506, + 596 + ], + "type": "text", + "content": " are less significant. Therefore, we derive the computational complexity mainly with respect to the above two types by delving into the forwarding process of one univariate series." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "text", + "content": "Patch Embedding The tokenized time series " + }, + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{x}_i\\} \\in \\mathbb{R}^{T\\times P}" + }, + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "text", + "content": " is mapped into the embedding space through the patch-wise embedding " + }, + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}" + }, + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "text", + "content": ", resulting in " + }, + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "inline_equation", + "content": "2PDT" + }, + { + "bbox": [ + 104, + 606, + 504, + 631 + ], + "type": "text", + "content": " operations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": "Self-Attention The calculation of self-attention begins with the computation of query, key and value by multiplying the patch embeddings with matrices " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_q" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_k" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " respectively in " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " heads, which incurs a computational cost of " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "6HDd_kT = 6D^2T" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " and yields " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{V} \\in \\mathbb{R}^{H \\times T \\times d_k}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": ". Next, the dot product " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}\\mathbf{K}^\\top \\in \\mathbb{R}^{H \\times T \\times T}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " is conducted in each head, leading to " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "2Hd_kT^2 = 2DT^2" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " operations. Following this, the Pre-Softmax map is divided by " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\sqrt{d_k}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " and processed through Softmax, which includes exponentiation, summation, and normalization of each element, resulting in " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "4HT^2" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " operations. The subsequent multiplication with " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " incurs " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "2Hd_kT^2 = 2DT^2" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " operations. Finally, multiple heads are concatenated and multiplied by " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": ", contributing " + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "inline_equation", + "content": "2D^2T" + }, + { + "bbox": [ + 104, + 641, + 507, + 734 + ], + "type": "text", + "content": " operations." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Feed-Forward Network It first projects the token representations into the dimension of " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "D_{ff}" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " and subsequently projects it back to the dimension " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": ", resulting in a total operation of " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "4\\alpha D^2 T" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "text", + "content": "Patch Projection For encoder-only models, all token representations are flattened and mapped directly to " + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "text", + "content": " time points by " + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_d\\in \\mathbb{R}^{TD\\times P}" + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "text", + "content": ". In contrast, token-wise projector " + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}" + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "text", + "content": " in decoder-only models independently map each token to the predicted next token. In both cases, the number of operations is " + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "inline_equation", + "content": "2PDT" + }, + { + "bbox": [ + 104, + 120, + 506, + 165 + ], + "type": "text", + "content": ", but the token-wise projector will result in a smaller parameter count." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "text", + "content": "The forwarding operations in " + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "text", + "content": "-layers Transformer is " + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "inline_equation", + "content": "4PDT + 4L(D + H)T^2 + (8 + 4\\alpha)LD^2 T" + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "text", + "content": " in sum. Considering that the majority of operations in Transformers are binary operations (e.g., matrix multiplications), the gradients for both matrices are computed separately. As a result, the number of operations in backpropagation is the twice of forwarding. Therefore, the total operations of training a Transformer on a univariate series consisting of " + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "text", + "content": " patches, each of length " + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 170, + 506, + 227 + ], + "type": "text", + "content": ", is derived as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 186, + 232, + 423, + 247 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 232, + 423, + 247 + ], + "spans": [ + { + "bbox": [ + 186, + 232, + 423, + 247 + ], + "type": "interline_equation", + "content": "f (T) = 1 2 P D T + 1 2 L (D + H) T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} T.", + "image_path": "5597e688369c6682060a466f406bd79137da8648bfa61ec54e5a46d4b81f1729.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "content": "We plug typical hyperparameters in the current time-series Transformers and forecasting benchmarks: " + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "inline_equation", + "content": "D = 512" + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "inline_equation", + "content": "H = 8" + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "inline_equation", + "content": "\\alpha = 4" + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "inline_equation", + "content": "T = 7" + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "inline_equation", + "content": "P = 96" + }, + { + "bbox": [ + 104, + 254, + 505, + 277 + ], + "type": "text", + "content": ", we obtain that:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 190, + 284, + 419, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 284, + 419, + 297 + ], + "spans": [ + { + "bbox": [ + 190, + 284, + 419, + 297 + ], + "type": "interline_equation", + "content": "f (T) = 2 4 9 6 0 T ^ {2} + 7 6 0 8 7 2 9 6 T \\propto 3. 2 8 * 1 0 ^ {- 4} T ^ {2} + T.", + "image_path": "00405431c4e8015f4c4f298014e1cca21de0bf5ec3bc3dc0cad6700871fe076e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "text", + "content": "Due to the prevalence of short contexts in the time series field, where " + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "inline_equation", + "content": "T \\ll D" + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "text", + "content": " leads to a significant coefficient in " + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(T)" + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "text", + "content": ", we find the primary computational burden of time-series Transformer lies in linear transformations with " + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(T)" + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "text", + "content": ", rather than in multi-head self-attention with the " + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(T^2)" + }, + { + "bbox": [ + 104, + 305, + 506, + 340 + ], + "type": "text", + "content": " complexity." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "text", + "content": "For multivariate series with " + }, + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "text", + "content": " variables, FLOPs is influenced by the handling of multivariate data. When adopting channel independence (Timer and PatchTST), " + }, + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "text", + "content": " can be regarded as the batch size " + }, + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 344, + 506, + 367 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 168, + 373, + 505, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 373, + 505, + 388 + ], + "spans": [ + { + "bbox": [ + 168, + 373, + 505, + 388 + ], + "type": "interline_equation", + "content": "N f (T) = 1 2 P D N T + 1 2 L (D + H) N T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} N T. \\tag {9}", + "image_path": "112c299da808a744d68170f3974c367c886419fe6934aefbaad644a68c5c10eb.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "type": "text", + "content": "For models that capture fine-grained intra- and inter-series dependencies (Timer-XL and UniTST) in multivariate series, " + }, + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "type": "text", + "content": " is reflected as the enlarged number of tokens:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 165, + 425, + 505, + 438 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 425, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 165, + 425, + 505, + 438 + ], + "type": "interline_equation", + "content": "f (N T) = 1 2 P D N T + 1 2 L (D + H) N ^ {2} T ^ {2} + (2 4 + 1 2 \\alpha) L D ^ {2} N T. \\tag {10}", + "image_path": "dfc5c2714445e9929502dab0d8b78a21bee80dac94f4a59d5db9db3934d2ccf5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 445, + 505, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 505, + 491 + ], + "type": "text", + "content": "Notably, FLOPs is not entirely equivalent to actual runtime. While FlashAttention increases the overall FLOPs due to its recomputation process, it reduces the number of memory reads and writes. Given that on GPUs, computation is significantly faster than memory access, using FlashAttention can actually lead to further improvements in runtime performance." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 506, + 221, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 221, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 221, + 517 + ], + "type": "text", + "content": "A.3 PARAMETER COUNT" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 528, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 540 + ], + "type": "text", + "content": "From the above analysis, we observe that the parameter count of Transformers includes the following:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 553, + 365, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 553, + 365, + 566 + ], + "spans": [ + { + "bbox": [ + 104, + 553, + 365, + 566 + ], + "type": "text", + "content": "Patch Embedding " + }, + { + "bbox": [ + 104, + 553, + 365, + 566 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_e\\in \\mathbb{R}^{D\\times P}" + }, + { + "bbox": [ + 104, + 553, + 365, + 566 + ], + "type": "text", + "content": " to obtain patch embeddings." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "text", + "content": "Self-Attention " + }, + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}" + }, + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "text", + "content": " heads and " + }, + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}" + }, + { + "bbox": [ + 104, + 578, + 454, + 592 + ], + "type": "text", + "content": " for all heads." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 606, + 416, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 606, + 416, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 416, + 619 + ], + "type": "text", + "content": "Feed-Forward Network " + }, + { + "bbox": [ + 104, + 606, + 416, + 619 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathrm{ffn1}}, \\mathbf{W}_{\\mathrm{ffn2}} \\in \\mathbb{R}^{D \\times D_{\\mathrm{ff}}} \\text{ in feed-forward network.}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 632, + 506, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 657 + ], + "type": "text", + "content": "Layer Normalization It contains the weight " + }, + { + "bbox": [ + 104, + 632, + 506, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{W} \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 632, + 506, + 657 + ], + "type": "text", + "content": " and the bias " + }, + { + "bbox": [ + 104, + 632, + 506, + 657 + ], + "type": "inline_equation", + "content": "\\mathbf{b} \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 632, + 506, + 657 + ], + "type": "text", + "content": ". Every Transformer block includes two normalizations after multi-head attention and feed-forward network respectively." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 669, + 486, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 486, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 486, + 682 + ], + "type": "text", + "content": "Patch Projection " + }, + { + "bbox": [ + 104, + 669, + 486, + 682 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_d\\in \\mathbb{R}^{TD\\times P}" + }, + { + "bbox": [ + 104, + 669, + 486, + 682 + ], + "type": "text", + "content": " in flatten head and " + }, + { + "bbox": [ + 104, + 669, + 486, + 682 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_d\\in \\mathbb{R}^{D\\times P}" + }, + { + "bbox": [ + 104, + 669, + 486, + 682 + ], + "type": "text", + "content": " in token-wise projection." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 687, + 451, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 451, + 699 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 451, + 699 + ], + "type": "text", + "content": "In sum, the total count of parameters in time-series Transformers can be expressed as:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 112, + 708, + 505, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 708, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 112, + 708, + 505, + 734 + ], + "type": "interline_equation", + "content": "\\text {P a r a m e t e r C o u n t} = \\left\\{ \\begin{array}{l l} (4 + 2 \\alpha) L D ^ {2} + 4 L D + (1 + T) P D, & \\text {u s i n g f l a t t e n h e a d}, \\\\ (4 + 2 \\alpha) L D ^ {2} + 4 L D + 2 P D, & \\text {u s i n g t o k e n - w i s e p r o j e c t i o n}. \\end{array} \\right. \\tag {11}", + "image_path": "7c19bfea9ac3e14373d3030252d1a51ffb4ed4955923182ed27f1a39e7387a7b.jpg" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 227, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 227, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 227, + 94 + ], + "type": "text", + "content": "A.4 MEMORY FOOTPRINT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "type": "text", + "content": "The memory footprint during training can be primarily categorized into three parts: activation values stored for backpropagation, model parameters, and optimizer parameters." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 131, + 505, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 131, + 505, + 164 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 505, + 164 + ], + "type": "text", + "content": "Regardless of other precision types (e.g., FP16), model parameters and gradients are typically stored as 32-bit floating-point numbers, with each parameter occupying 4 bytes of memory. For time-series Transformers, memory footprint of activation values is given as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "text", + "content": "Patch Embedding Gradient computation for " + }, + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_e" + }, + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "text", + "content": " preserves its input " + }, + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{x}_i\\} \\in \\mathbb{R}^{T\\times P}" + }, + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "inline_equation", + "content": "4PT" + }, + { + "bbox": [ + 104, + 175, + 506, + 189 + ], + "type": "text", + "content": " bytes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": "Self-Attention Gradient calculation for " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_q, \\mathbf{W}_k, \\mathbf{W}_v \\in \\mathbb{R}^{D \\times d_k}" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": " requires their inputs " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{T \\times D}" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": ", amounting to a total of " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "4DT" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": " bytes. The dot product for attention map also needs to store " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}, \\mathbf{K}, \\mathbf{V} \\in \\mathbb{R}^{H \\times T \\times d_k}" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": ", which collectively require a total of " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "12DT" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": " bytes of memory. Gradient computation of " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_o \\in \\mathbb{R}^{D \\times D}" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": " necessitates the concatenated multi-head attention representations " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{T \\times D}" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": ", which occupies " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "4DT" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": " bytes. If memory-efficient attention mechanisms like FlashAttention (Dao et al., 2022) is not applied, the outcome " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}\\mathbf{K}^\\top" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": " will be stored and occupy " + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "inline_equation", + "content": "4HT^2" + }, + { + "bbox": [ + 104, + 199, + 506, + 279 + ], + "type": "text", + "content": " bytes. Instead, if FlashAttention is adopted, the storage overhead can be avoided." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "text", + "content": "Feed-Forward Network ReLU activation function is typically employed in this module. The input " + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{T \\times D}" + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "text", + "content": " must be retained, requiring a total of " + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "inline_equation", + "content": "4DT" + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "text", + "content": " bytes. Additionally, the product " + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathrm{fin1}}\\mathbf{H}" + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "text", + "content": " also needs to be stored, amounting to " + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "inline_equation", + "content": "4D_{\\mathrm{ff}}T" + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "text", + "content": " bytes. Similarly, the output activations of ReLU, which serve as the input for subsequent linear transformations, necessitate another " + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "inline_equation", + "content": "4D_{\\mathrm{ff}}T" + }, + { + "bbox": [ + 104, + 290, + 504, + 336 + ], + "type": "text", + "content": " bytes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "type": "text", + "content": "Layer Normalization Each block of Transformer encompasses two layer normalizations, with each normalization retaining its input, resulting in the memory requirement of " + }, + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "type": "inline_equation", + "content": "8DT" + }, + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "type": "text", + "content": " bytes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "text", + "content": "Patch Projection To perform backpropagation for " + }, + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "inline_equation", + "content": "W_{d} \\in \\mathbb{R}^{D \\times P}" + }, + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "text", + "content": ", it is necessary to retain its input " + }, + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{T \\times D}" + }, + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "text", + "content": ", resulting in a total memory requirement of " + }, + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "inline_equation", + "content": "4DT" + }, + { + "bbox": [ + 104, + 381, + 504, + 406 + ], + "type": "text", + "content": " bytes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 410, + 506, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 506, + 422 + ], + "type": "text", + "content": "The formula for the total activation values of the entire model occupying GPU memory is as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 428, + 505, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 428, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 115, + 428, + 505, + 455 + ], + "type": "interline_equation", + "content": "\\text {M e m o r y F o o t p r i n t} = \\left\\{ \\begin{array}{l l} 4 (D + P) T + (3 2 + 8 \\alpha) L D T + 4 L H T ^ {2}, & \\mathrm {w / o F l a s h A t t e n t i o n ,} \\\\ 4 (D + P) T + (3 2 + 8 \\alpha) L D T, & \\text {w i t h F l a s h A t t e n t i o n .} \\end{array} \\right. \\tag {12}", + "image_path": "4f667306c73eb6b9675a35caf3c106371a324eb15e1a061d4381dec6b7624d75.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "text", + "content": "The derived occupancy of activation values increases proportionally with the batch size " + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "text", + "content": ". For multivariate series, " + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "text", + "content": " can be used as a multiplier in channel independence. For channel independence models, we can substitute " + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "inline_equation", + "content": "NT" + }, + { + "bbox": [ + 104, + 461, + 505, + 539 + ], + "type": "text", + "content": " as before. The total memory footprint is the sum of activation values and parameters of model and optimizer, which are proportional to the parameter count derived in Equation 11. Due to the limited model size in the time series field, the memory consumption of parameters is minimal and can be considered negligible in practice. Therefore, the overall memory footprint can be predominantly determined by the occupied memory of activation values." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 555, + 257, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 257, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 257, + 567 + ], + "type": "text", + "content": "B EXPERIMENTAL DETAILS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 579, + 178, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 579, + 178, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 178, + 590 + ], + "type": "text", + "content": "B.1 DATASETS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": "We conduct experiments on well-acknowledged benchmarks to evaluate performance of the proposed Timer-XL, which includes (1) ETT (Zhou et al., 2021) contains 7 factors of electricity transformers from July 2016 to July 2018, which is recorded every hour or 15 minutes. (2) Weather (Wu et al., 2021) includes 21 meteorological factors collected every 10 minutes from the Max Planck Biogeochemistry Institute Weather Station in 2020. (3) ECL (Wu et al., 2021) records the hourly electricity consumption data of 321 clients. (4) Traffic (Wu et al., 2021) collects hourly road occupancy rates measured by 862 sensors on the San Francisco Bay area highways from January 2015 to December 2016. (5) Solar-Energy (Lai et al., 2018) records the solar power production of 137 PV plants in 2006, which are sampled every 10 minutes. (7) PEMS (Liu et al., 2022a) contains records from the public traffic network in California collected in 5-minute time windows. (8) EPF (Lago et al., 2021) includes five subsets that span six years. Each contains the electricity price as the endogenous variable to be predicted and two exogenous variables of the day-ahead electricity markets. (9) GTWSF (Wu et al.," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "type": "text", + "content": "2023) is a dataset collected from the National Centers for Environmental Information (NCEI). This large-scale collection contains hourly averaged wind speed and temperature data from 3850 stations with different geographical scales and densities each, spanning from 2019 to 2021. (10) UTSD (Liu et al., 2024c) is a multi-domain time series dataset, which includes seven domains with a hierarchy of four volumes. We adopt the largest volume that encompasses 1 billion time points for pre-training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "text", + "content": "We further establish challenging forecasting benchmarks based on the ECMWF Reanalysis v5 (ERA5) dataset (Hersbach et al., 2020) to prevent potential overfitting and performance saturation of deep forecasters in existing benchmarks. Concretely, ERA5 is the fifth generation ECMWF atmospheric reanalysis of the global climate covering the period from January 1940 to the present, which provides hourly estimates of a large number of atmospheric, land, and oceanic climate variables, and includes information about uncertainties for all variables at reduced spatial and temporal resolutions. Due to its pattern sufficiency of temporal dynamics and variable correlations, we could establish practical benchmarks to thoroughly evaluate the performance for univariate and multivariate forecasting, as well as adopt it for large-scale pre-training to develop domain-specific large time series models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 247, + 267, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 267, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 267, + 258 + ], + "type": "text", + "content": "Our datasets are constructed as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 270, + 506, + 476 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 270, + 504, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 270, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 132, + 270, + 504, + 336 + ], + "type": "text", + "content": "- ERA5-S: To establish a realistic univariate forecasting benchmark, we start from the basic principle of forecastability and make the prediction on sufficient lookback lengths. Instead of the short time span of training in previous benchmarks (generally no more than 2 years), we curated a three-hour frequency dataset spanning 40 years (January 1979 to December 2018) from ERA5, encompassing 116880 time points. In order to prevent overfitting on a single time series, we selected worldwide stations to form seven subsets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 340, + 506, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 340, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 340, + 506, + 407 + ], + "type": "text", + "content": "- ERA5-MS: Each univariate series of ERA5-S provides partial observations governed by the spatio-temporal global weather system. Since discovering the global spatio-temporal correlations presents a fundamental challenge in meteorology, we convert ERA5-S into ERA5-MS by using seven subsets as a challenging multivariate forecasting benchmark. Based on the average results in Tables 2 and 5, we can validate the existence of multi-station correlations among selected stations, which have enhanced the average prediction accuracy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 410, + 506, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 410, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 132, + 410, + 506, + 476 + ], + "type": "text", + "content": "- ERA5-Large: To explore the pure data-driven approach to build domain-specific large time series models, we further expanded the number of stations as ERA5-Large, a dataset that evenly covers meteorological 4920 worldwide stations and spans 40 years. We establish the dataset for pre-training, which is expected to generalize across the time (train on the past observations and generalize to the future) and across stations (train on partial stations and generalize to other unseen stations). The total number of time points is around half a billion." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 486, + 506, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 486, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 506, + 521 + ], + "type": "text", + "content": "We follow the same data processing and train-validation-test split protocol used in TimesNet (Wu et al., 2022), where the train, validation, and test datasets are divided according to chronological order to prevent data leakage. Detailed dataset descriptions and prediction settings are provided in Table 9." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 533, + 217, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 217, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 217, + 544 + ], + "type": "text", + "content": "B.2 BASELINE MODELS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 554, + 507, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 507, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 507, + 676 + ], + "type": "text", + "content": "We aim to present Timer-XL as a foundation model for unified time series forecasting. We thoroughly include well-acknowledged and advanced models in each forecasting task. For univariate time series forecasting, we compare Timer-XL with PatchTST (Nie et al., 2022) under channel independence. For multivariate time series prediction, we report official results from Liu et al. (2023; 2024b); Ding et al. (2024), including UniRepLKNet (2024), iTransformer (2023), Corrformer (2023), DLinear (2023), TimesNet (2022), Non-stationary Transformer (2022b), Pyraformer (2021), Autoformer (2021), StemGNN (2020), DeepAR (2020), and N-BEATS (2019). We further reproduce the performance of related Transformers: Timer (2024c) and UniTST (2024a) based on their official repositories. For covariate-informed time series forecasting, we report the official results of TimeXer (2024b). For zero-shot forecasting, we follow Liu et al. (2024c) that predicts future length-96 windows in well-acknowledged datasets. Totally, more than 20 baselines are included for a complete comparison." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 689, + 251, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 251, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 251, + 700 + ], + "type": "text", + "content": "B.3 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "All the experiments are implemented by PyTorch (Paszke et al., 2019) on NVIDIA A100 Tensor Core GPUs. We employ the Adam optimizer (Kingma & Ba, 2014) and MSE loss for model optimization." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 152, + 504, + 469 + ], + "blocks": [ + { + "bbox": [ + 104, + 114, + 504, + 149 + ], + "lines": [ + { + "bbox": [ + 104, + 114, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 504, + 149 + ], + "type": "text", + "content": "Table 9: Dataset descriptions. Dim. denotes the number of variables (For univariate forecasting, we adopt channel independence (Nie et al., 2022) or train separate models on each variable). Dataset Length denotes the number of time points in the (train, validation, test) splits." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 152, + 504, + 469 + ], + "lines": [ + { + "bbox": [ + 106, + 152, + 504, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 504, + 469 + ], + "type": "table", + "html": "
TasksDatasetDim.Training SettingDataset LengthInformation (Frequency)
Univariate ForecastingETTh17{24, 96, 168, 672, 2880}→96(8545, 2881, 2881)Electricity (Hourly)
ECL321{24, 96, 168, 672, 2880, 8832}→96(18317, 2633, 5261)Electricity (Hourly)
Traffic862{24, 96, 168, 672, 2880, 8832}→96(12185, 1757, 3509)Transportation (Hourly)
PEMS03358{96, 288, 1152, 2016, 8064}→96(15617, 5135, 5135)Transportation (5 mins)
ERA5-S73072→96(81816, 11688, 23376)Climate (3 Hours)
Multivariate ForecastingETTh1, ETTh27{96, 672}→{96, 192, 336, 720}(8545, 2881, 2881)Electricity (Hourly)
ETTm1, ETTm27{96, 672}→{96, 192, 336, 720}(34465, 11521, 11521)Electricity (15 mins)
ECL321{96, 672}→{96, 192, 336, 720}(18317, 2633, 5261)Electricity (Hourly)
Traffic862{96, 672}→{96, 192, 336, 720}(12185, 1757, 3509)Transportation (Hourly)
Weather21{96, 672}→{96, 192, 336, 720}(36792, 5271, 10540)Climate (10 mins)
Solar-Energy137{96, 672}→{96, 192, 336, 720}(36601, 5161, 10417)Energy (10 mins)
ERA5-MS73072→96(81816, 11688, 23376)Climate (3 Hours)
GTWSF385048→24(12280, 1755, 3509)Wu et al. (2023)
Forecasting with CovariatesNP1+2168→24(36500, 5219, 10460)Electricity (Hourly)
PJM1+2168→24(36500, 5219, 10460)Electricity (Hourly)
BE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
FR1+2168→24(36500, 5219, 10460)Electricity (Hourly)
DE1+2168→24(36500, 5219, 10460)Electricity (Hourly)
Pre-trainingERA5-Large49203072→96(81816, 11688, 23376)Climate (3 Hours)
UTSD-2880→96(868778970, 96530996, -)Liu et al. (2024c)
LOTSA-2880→96(231082956489, -, -)Woo et al. (2024)
", + "image_path": "edeb74890cf2ec87c63a080f175c702a03198c33d618859b8de9fecd1baaa226.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 559, + 504, + 704 + ], + "blocks": [ + { + "bbox": [ + 104, + 533, + 504, + 555 + ], + "lines": [ + { + "bbox": [ + 104, + 533, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 504, + 555 + ], + "type": "text", + "content": "Table 10: Performance robustness of Timer-XL. The prediction settings and results keep the same with Table 12. The standard deviation is obtained from three random seeds." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 559, + 504, + 704 + ], + "lines": [ + { + "bbox": [ + 106, + 559, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 106, + 559, + 504, + 704 + ], + "type": "table", + "html": "
DatasetECLETTh1Traffic
HorizonMSEMAEMSEMAEMSEMAE
960.127±0.0010.219±0.0010.364±0.0020.397±0.0010.340±0.0020.238±0.001
1920.145±0.0010.236±0.0010.405±0.0020.424±0.0010.360±0.0010.247±0.001
3360.159±0.0010.252±0.0010.427±0.0030.439±0.0020.377±0.0020.256±0.002
7200.187±0.0030.277±0.0030.439±0.0020.459±0.0040.418±0.0030.279±0.002
DatasetSolar-EnergyWeatherERA5-MS
HorizonMSEMAEMSEMAEMSEMAE
960.162±0.0030.221±0.0020.157±0.0020.205±0.0010.164±0.0010.307±0.000
1920.187±0.0030.239±0.0020.206±0.0030.250±0.002
3360.205±0.0030.255±0.0020.259±0.0030.291±0.003
7200.238±0.0030.279±0.0030.337±0.0020.344±0.002
", + "image_path": "ae48b0c91207781b8299f66108f862a41b2dfc21e7c5ee0fadf0b0003ebd3f6c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": "We adopt channel independence from Nie et al. (2022) in univariate time series forecasting. Based on the prevalence of patch-level tokenization in the time series field, we reproduce typical Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a) based on their official repositories, and keep their model hyperparameters and training configurations the same to evaluate the inherent capability of base models. The results of other baselines are based on the benchmark provided by Liu et al. (2023; 2024b); Ding et al. (2024); Wang et al. (2024b), which is fairly built on the configurations provided by their original paper. Detailed experimental configurations are provided in Table 11. We also report the standard deviations under three runs with different random seeds in Table 10, which exhibits that the performance of Timer-XL is stable." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 187, + 506, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 187, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 187, + 506, + 232 + ], + "type": "text", + "content": "For the metrics, we adopt the symmetric mean absolute percentage error (SMAPE), a metric that is independent of the numerical range, to evaluate one-for-all generalization performance on ERA5-Large. For other experiments, we adopt the root mean square error (MSE) and mean absolute error (MAE) that follows previous work. These metrics can be calculated as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 134, + 245, + 476, + 279 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 245, + 476, + 279 + ], + "spans": [ + { + "bbox": [ + 134, + 245, + 476, + 279 + ], + "type": "interline_equation", + "content": "\\mathrm {S M A P E} = \\frac {2 0 0}{T} \\sum_ {i = 1} ^ {T} \\frac {| \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} |}{| \\mathbf {X} _ {i} | + | \\widehat {\\mathbf {X}} _ {i} |}, \\mathrm {M S E} = \\sum_ {i = 1} ^ {T} | \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} | ^ {2}, \\mathrm {M A E} = \\sum_ {i = 1} ^ {T} | \\mathbf {X} _ {i} - \\widehat {\\mathbf {X}} _ {i} |.", + "image_path": "57df27397e99aaa599102e2653bffa66bd3d10b92cb3c243ea78d1411c232c57.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 293, + 504, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 319 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 104, + 293, + 504, + 319 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^T" + }, + { + "bbox": [ + 104, + 293, + 504, + 319 + ], + "type": "text", + "content": " is a univariate time series and " + }, + { + "bbox": [ + 104, + 293, + 504, + 319 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{X}}" + }, + { + "bbox": [ + 104, + 293, + 504, + 319 + ], + "type": "text", + "content": " is the corresponding prediction. For multivariate time series, we further calculate the mean metric in the variable dimension." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 106, + 359, + 504, + 701 + ], + "blocks": [ + { + "bbox": [ + 105, + 334, + 506, + 357 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 506, + 357 + ], + "type": "text", + "content": "Table 11: Experimental configurations of Timer-XL and other baseline Transformers. All the experiments adopt the ADAM (2014) optimizer with the default hyperparameter " + }, + { + "bbox": [ + 105, + 334, + 506, + 357 + ], + "type": "inline_equation", + "content": "(\\beta_{1},\\beta_{2}) = (0.9,0.999)" + }, + { + "bbox": [ + 105, + 334, + 506, + 357 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 359, + 504, + 701 + ], + "lines": [ + { + "bbox": [ + 106, + 359, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 106, + 359, + 504, + 701 + ], + "type": "table", + "html": "
ExperimentModelDatasetConfigurationTraining Process
LDdkHPLRLossBatch SizeEpochs
Univariate ForecastingTimer-XLECL3512648960.0005MSE204810
Traffic3512648960.001MSE204810
PatchTSTETTh11512648960.0005MSE25610
PEMS033512648960.0005MSE204810
ERA5-S1512648960.0005MSE204810
Multivariate ForecastingTimer-XLGlobal Temp.310241288240.0001MSE810
Global Wind310241288240.0001MSE810
ECL5512648960.0005MSE410
UniTSTTraffic4512648960.0005MSE410
TimerETTh1110241288960.0001MSE3210
PatchTSTWeather4512648960.0005MSE3210
Solar.6512648960.0001MSE1610
ERA5-MS3512648960.0001MSE25610
Forecasting with CovariatesTimer-XLNP3512648240.0001MSE410
TimeXerPJM2512648240.0001MSE1610
TimerBE2512648240.0001MSE1610
PatchTSTFR2512648240.0001MSE1610
DE2512648240.0001MSE1610
Pre-trainingTimer-XLERA5-Large4512648960.0001MSE4096010
PatchTST4512648960.0001MSE4096010
Timer-XLUTSD810241288960.00005MSE1638410
Timer(Liu et al., 2024c)810241288960.00005MSE1638410
Timer-XL810241288960.001MSE32768-
MoiraiSmallLOTSA6384646-
MoiraiBase(Woo et al., 2024)127686412-
MoiraiLarge2410246416-
", + "image_path": "2281e5e515e4ed79c036c0ecad9a5c9334e13d1e07ecf79709364cd138acdc82.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "lines": [ + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "spans": [ + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "text", + "content": "* " + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "text", + "content": " is the layer number of Transformers, " + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "text", + "content": " is the dimension of token embedding (the hidden dimension of FFN is set as " + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "inline_equation", + "content": "4D" + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "text", + "content": "), " + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "inline_equation", + "content": "d_k" + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "text", + "content": " is the dimension of query, key, and value, " + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "text", + "content": " is the multi-head number, " + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 115, + 702, + 504, + 720 + ], + "type": "text", + "content": " is the patch size, and LR is the initial learning rate." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 296, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 296, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 296, + 94 + ], + "type": "text", + "content": "C HYPERPARAMETER SENSITIVITY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "content": "We evaluate the hyperparameter sensitivity of Timer-XL on the ERA5-MS benchmark, as illustrated in Figure 8, concerning the following factors: the number of layers " + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "content": ", the patch size " + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "content": ", and the lookback length during inference. Our findings indicate that performance of Timer-XL generally improves with increases with " + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "content": ", suggesting that Timer-XL is a scalable deep forecaster. Furthermore, our analysis of the influence of " + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "content": " reveals that the optimal patch size is generally close to the predicted length, since it avoids multi-step error accumulations. Toward better long-term forecasting performance, it leaves a future improvement to adopt different patch sizes of input and output tokens. Finally, we investigate the impact of input length during inference. We discover that the optimal lookback length of during is not necessarily the length during training. Given that decoder-only Transformers can accommodate inference inputs shorter than those used during training, this finding is noteworthy and indicates the potential to improve the performance." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 239, + 233, + 336 + ], + "blocks": [ + { + "bbox": [ + 106, + 239, + 233, + 336 + ], + "lines": [ + { + "bbox": [ + 106, + 239, + 233, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 233, + 336 + ], + "type": "image", + "image_path": "cebfa93fdf65c5dedcd6a2129ae37dfc15aa582d985a0e942aa4fa452f60e1d1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 339, + 504, + 361 + ], + "lines": [ + { + "bbox": [ + 104, + 339, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 504, + 361 + ], + "type": "text", + "content": "Figure 8: Hyperparameter sensitivity of Timer-XL (input-3072-pred-96 on ERA5-MS), including the number of Transformer blocks " + }, + { + "bbox": [ + 104, + 339, + 504, + 361 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 339, + 504, + 361 + ], + "type": "text", + "content": ", the patch size " + }, + { + "bbox": [ + 104, + 339, + 504, + 361 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 339, + 504, + 361 + ], + "type": "text", + "content": ", and the input lookback length during inference." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 240, + 239, + 369, + 336 + ], + "blocks": [ + { + "bbox": [ + 240, + 239, + 369, + 336 + ], + "lines": [ + { + "bbox": [ + 240, + 239, + 369, + 336 + ], + "spans": [ + { + "bbox": [ + 240, + 239, + 369, + 336 + ], + "type": "image", + "image_path": "c7726f346387b4ab3ccd5d840f7a19815834dce529f19809602030ebc9128ab9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 372, + 239, + 503, + 337 + ], + "blocks": [ + { + "bbox": [ + 372, + 239, + 503, + 337 + ], + "lines": [ + { + "bbox": [ + 372, + 239, + 503, + 337 + ], + "spans": [ + { + "bbox": [ + 372, + 239, + 503, + 337 + ], + "type": "image", + "image_path": "a79c3fa6fec9c0be936f9ec27b8924c91c2d991884f2d7d68907de2e14823559.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 379, + 194, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 379, + 194, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 194, + 391 + ], + "type": "text", + "content": "D SHOWCASES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 404, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 504, + 449 + ], + "type": "text", + "content": "To facilitate a clear comparison among various models, we present additional prediction visualization from diverse datasets in Figure 9 and 10. Showcases are randomly selected from Timer-XL and the following time-series Transformers: PatchTST (2022), Timer (2024c), and UniTST (2024a). Among them, Timer-XL presents the most accurate predictions." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 106, + 460, + 212, + 540 + ], + "blocks": [ + { + "bbox": [ + 106, + 460, + 212, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 460, + 212, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 212, + 540 + ], + "type": "image", + "image_path": "d699be3b2fd3908c3ca12fdf475d384dbb566bb4d865c10627bb733d6ddd92b3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 214, + 460, + 309, + 539 + ], + "blocks": [ + { + "bbox": [ + 214, + 460, + 309, + 539 + ], + "lines": [ + { + "bbox": [ + 214, + 460, + 309, + 539 + ], + "spans": [ + { + "bbox": [ + 214, + 460, + 309, + 539 + ], + "type": "image", + "image_path": "6d90511cc3496cfb8d5a97248d1778df7469251c0dcc59fa940de526a409af99.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 460, + 406, + 539 + ], + "blocks": [ + { + "bbox": [ + 310, + 460, + 406, + 539 + ], + "lines": [ + { + "bbox": [ + 310, + 460, + 406, + 539 + ], + "spans": [ + { + "bbox": [ + 310, + 460, + 406, + 539 + ], + "type": "image", + "image_path": "d114e928a62ac831b1cbf006925321e2a07f555b859858ad5414ea8bba53f2e8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 408, + 460, + 503, + 539 + ], + "blocks": [ + { + "bbox": [ + 408, + 460, + 503, + 539 + ], + "lines": [ + { + "bbox": [ + 408, + 460, + 503, + 539 + ], + "spans": [ + { + "bbox": [ + 408, + 460, + 503, + 539 + ], + "type": "image", + "image_path": "82463877c629c28c01f047e385704d22f618c0d300a023ddb12b1976be4472d0.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 107, + 544, + 212, + 615 + ], + "blocks": [ + { + "bbox": [ + 107, + 544, + 212, + 615 + ], + "lines": [ + { + "bbox": [ + 107, + 544, + 212, + 615 + ], + "spans": [ + { + "bbox": [ + 107, + 544, + 212, + 615 + ], + "type": "image", + "image_path": "bf35f570271c578659781cd8afcca3dc1a7fbec741cbb0d7c973b65e5eb075ed.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 624, + 504, + 646 + ], + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 646 + ], + "type": "text", + "content": "Figure 9: Visualization results on univariate time series dataset. We adopt the forecasting setting of 2880-pred-96 on ECL, ETTh1 and Traffic, and 2016-pred-96 on PEMS." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 214, + 544, + 309, + 615 + ], + "blocks": [ + { + "bbox": [ + 214, + 544, + 309, + 615 + ], + "lines": [ + { + "bbox": [ + 214, + 544, + 309, + 615 + ], + "spans": [ + { + "bbox": [ + 214, + 544, + 309, + 615 + ], + "type": "image", + "image_path": "435d076294cf93f1f7b0cab8c0ee6ea66ae7d35e0c25c4e48e63d5433090c1d7.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 310, + 544, + 406, + 615 + ], + "blocks": [ + { + "bbox": [ + 310, + 544, + 406, + 615 + ], + "lines": [ + { + "bbox": [ + 310, + 544, + 406, + 615 + ], + "spans": [ + { + "bbox": [ + 310, + 544, + 406, + 615 + ], + "type": "image", + "image_path": "8e3c7732312931f766822c9df0458873ad18b2a188a44071c71b027028749804.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 409, + 544, + 503, + 615 + ], + "blocks": [ + { + "bbox": [ + 409, + 544, + 503, + 615 + ], + "lines": [ + { + "bbox": [ + 409, + 544, + 503, + 615 + ], + "spans": [ + { + "bbox": [ + 409, + 544, + 503, + 615 + ], + "type": "image", + "image_path": "32d52704c334011db11f7cb6fdf4573af0300500b3a921eda8fb2459beb77dbd.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 663, + 266, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 663, + 266, + 675 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 266, + 675 + ], + "type": "text", + "content": "E SUPPLEMENTARY RESULTS" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 689, + 338, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 338, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 338, + 700 + ], + "type": "text", + "content": "E.1 FULL RESULT OF MULTIVARIATE FORECASTING" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Table 12 provides the complete results of the one-for-all multivariate forecasting benchmark across well-acknowledged datasets. We evaluate Timer-XL and baseline models by rolling forecasting: each" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 504, + 238 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 504, + 238 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 504, + 238 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 504, + 238 + ], + "type": "image", + "image_path": "5e8296caa3edd268c7052da86f65f090ebbf6acf60789400e80184b721ab16b9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 244, + 504, + 268 + ], + "lines": [ + { + "bbox": [ + 104, + 244, + 504, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 244, + 504, + 268 + ], + "type": "text", + "content": "Figure 10: Visualization results on multivariate time series dataset. We adopt the forecasting setting of 672-pred-96 on ETTh1 (7 Variables) and Traffic (862 Variables)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 282, + 506, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 506, + 306 + ], + "type": "text", + "content": "model is trained with input length 672 and output length 96, and the predicted values are integrated as part of the input in the next iteration until reaching the desired forecast length in \\{96, 192, 336, 720\\}." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 310, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 310, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 310, + 506, + 365 + ], + "type": "text", + "content": "We highlight that this benchmark evaluates the fundamental model versatility of deep forecasters, which aims to break the awkward situation of extensive training and model storage in pursuit of better practice for real-world forecasting requirements. On this benchmark, time-series Transformers significantly stand out from other baseline models, and our proposed Timer-XL can achieve state-of-the-art performance, making it a nice fundamental backbone of a one-for-all forecaster." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 380, + 324, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 324, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 324, + 392 + ], + "type": "text", + "content": "E.2 FULL RESULT OF ZERO-SHOT FORECASTING" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 401, + 506, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 506, + 446 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 506, + 446 + ], + "type": "text", + "content": "Table 13 provides the full results of zero-shot forecasting on the benchmark from Wu et al. (2022). We build Timer-XL based on the configuration in Table 11, which is pre-trained on the aggregated datasets of UTSD (Liu et al., 2024c) and LOTSA (Woo et al., 2024). The patch size of Timer-XL is set as 96 and we conduct rolling forecast to obtain the desired forecast length in " + }, + { + "bbox": [ + 104, + 401, + 506, + 446 + ], + "type": "inline_equation", + "content": "\\{96, 192, 336, 720\\}" + }, + { + "bbox": [ + 104, + 401, + 506, + 446 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 451, + 506, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 506, + 518 + ], + "type": "text", + "content": "We evaluate most advanced large models based on their official model checkpoints, including TimeMoE (Shi et al., 2024), Moirai (Woo et al., 2024), TimesFM (Das et al., 2023), MOMENT Goswami et al. (2024), and Chronos (Ansari et al., 2024). We conduct zero-shot evaluations on datasets that are not included during the pre-training of corresponding models. For each of the evaluated model, we use their maximum input length during inference. The metric (MSE/MAE) is averaged from all predicted windows in the test split." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 532, + 299, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 532, + 299, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 299, + 542 + ], + "type": "text", + "content": "E.3 ABLATION STUDY OF TIMEATTENTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 553, + 504, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 553, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 553, + 504, + 631 + ], + "type": "text", + "content": "We conduct evaluations on TimeAttention to validate the effectiveness of position embeddings. As for variable embedding, the distinction between endogenous and exogenous variables can improve performance. Based on our observation of the learned " + }, + { + "bbox": [ + 104, + 553, + 504, + 631 + ], + "type": "inline_equation", + "content": "u > v" + }, + { + "bbox": [ + 104, + 553, + 504, + 631 + ], + "type": "text", + "content": ", we find that the token reasonably pays more attention to tokens of the endogenous variable. It leaves a prior to mask out minor dependencies that focuses less on exogenous variables. For the temporal dimension, other position embeddings are inferior to RoPE, since it uses the affine transformation, while others are additive, and thereby less confused with the same additive embedding for variables." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 645, + 400, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 400, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 400, + 656 + ], + "type": "text", + "content": "E.4 SUPPLEMENTARY RESULTS OF LONG-CONTEXT FORECASTING" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "type": "text", + "content": "Long context is a basic indicator of foundation models, which can support emergence capabilities such as prompting, in-context learning, retrieval-augmented generation, etc. However, the long-context forecasting paradigm receives less attention in the current community, which can be due to the lack of benchmarks. In the meteorological ERA5, it is necessary to support the context of more than years to contain a specific cycle (such as El Nino). In Table 15, the performance of Timer-XL and DLinear generally improves with the increased context length. By contrast, it reveals the performance" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 180, + 502, + 665 + ], + "blocks": [ + { + "bbox": [ + 105, + 154, + 504, + 177 + ], + "lines": [ + { + "bbox": [ + 105, + 154, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 154, + 504, + 177 + ], + "type": "text", + "content": "Table 12: Full multivariate forecasting results: we conduct rolling forecast with a single model trained on each dataset (lookback length is 672) and accomplish four forecast lengths in " + }, + { + "bbox": [ + 105, + 154, + 504, + 177 + ], + "type": "inline_equation", + "content": "\\{96, 192, 336, 720\\}" + }, + { + "bbox": [ + 105, + 154, + 504, + 177 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 180, + 502, + 665 + ], + "lines": [ + { + "bbox": [ + 108, + 180, + 502, + 665 + ], + "spans": [ + { + "bbox": [ + 108, + 180, + 502, + 665 + ], + "type": "table", + "html": "
ModelsTimer-XL(Ours)Timer(2024c)UniTST(2024a)iTransformer(2023)DLinear(2023)PatchTST(2022)TimesNet(2022)Stationary(2022b)Autoformer(2021)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTh1960.3640.3970.3710.4040.3790.4150.3870.4180.3690.4000.3730.4030.4520.4630.4520.4780.4670.499
1920.4050.4240.4070.4290.4150.4380.4160.4370.4050.4220.4050.4250.4740.4770.4840.5100.4920.523
3360.4270.4390.4340.4450.4400.4540.4340.4500.4350.4450.4230.4400.4930.4890.5110.5220.5190.531
7200.4390.4590.4610.4660.4820.4820.4470.4730.4930.5080.4450.4710.5600.5340.5710.5430.5890.560
Avg0.4090.4300.4180.4360.4290.4470.4210.4450.4260.4440.4120.4350.4950.4910.5050.5130.5170.528
ETTh2960.2770.3430.2850.3440.3430.3980.3040.3620.3050.3710.2890.3470.3400.3740.3480.4030.3580.397
1920.3480.3910.3650.4000.3760.4200.3720.4070.4120.4390.3600.3930.4020.4140.4080.4480.4350.451
3360.3750.4180.4120.4400.3990.4350.4180.4400.5270.5080.3890.4200.4520.4520.4240.4570.4540.475
7200.4090.4580.4680.4870.4190.4570.4630.4760.8300.6530.3980.4400.4620.4680.4480.4760.4790.492
Avg0.3520.4020.3820.4180.3840.4280.3890.4210.5180.4930.3590.4000.4140.4270.4070.4460.4310.454
ETTm1960.2900.3410.2810.3380.2890.3480.3110.3650.3070.3500.2850.3460.3380.3750.4140.4140.4660.466
1920.3370.3690.3300.3680.3320.3750.3530.3900.3370.3680.3290.3720.3710.3870.5240.4820.5040.496
3360.3740.3920.3670.3930.3650.3970.3870.4110.3660.3870.3630.3940.4100.4110.5410.4970.5740.530
7200.4370.4280.4320.4330.4210.4310.4520.4450.4190.4190.4210.4260.4780.4500.5780.5090.5960.558
Avg0.3590.3820.3520.3830.3520.3880.3760.4030.3570.3810.3490.3850.3990.4060.5140.4750.5350.512
ETTm2960.1750.2570.1750.2570.1710.2600.1830.2720.1670.2630.1720.2590.1870.2670.2370.3060.2550.339
1920.2420.3010.2390.3010.2280.2300.2500.3150.2300.3110.2330.2990.2490.3090.3300.3870.2790.335
3360.2930.3370.2930.3420.2820.3360.3110.3560.2980.3610.2800.3310.3210.3510.4040.4240.3310.374
7200.3760.3900.3920.4070.3800.3980.4170.4190.4320.4460.3570.3820.4970.4030.5250.4860.4130.450
Avg0.2710.3220.2750.3270.2650.3060.2900.3400.2820.3450.2610.3180.3140.3330.3740.4010.3200.374
ECL960.1270.2190.1290.2210.1300.2250.1330.2290.1380.2380.1320.2320.1840.2880.1850.2870.2560.357
1920.1450.2360.1480.2390.1500.2440.1580.2580.1520.2510.1510.2500.1920.2950.2820.3680.2910.376
3360.1590.2520.1640.2560.1660.2620.1680.2620.1670.2680.1710.2720.2000.3030.2890.3770.2900.379
7200.1870.2770.2010.2890.2060.2970.2050.2940.2030.3020.2220.3180.2280.3250.3050.3990.3200.403
Avg0.1550.2460.1610.2510.1630.2570.1640.2580.1650.2650.1690.2680.2010.3030.2650.3580.2890.379
Traffic960.3400.2380.3480.2400.3590.2500.3530.2590.3990.2850.3590.2550.5930.3150.6100.3220.6750.412
1920.3600.2470.3690.2500.3730.2570.3730.2670.4090.2900.3770.2650.5960.3170.6260.3460.6790.423
3360.3770.2560.3880.2600.3860.2650.3860.2750.4220.2970.3930.2760.6000.3190.6330.3520.6880.440
7200.4180.2790.4310.2850.4210.2860.4250.2960.4610.3190.4360.3050.6190.3350.6510.3660.6930.457
Avg0.3740.2550.3840.2590.3850.2650.3840.2740.4230.2980.3910.2750.6020.3220.6300.3470.6840.433
Weather960.1570.2050.1510.2020.1520.2060.1740.2250.1690.2290.1490.2020.1690.2280.1850.2410.3550.409
1920.2060.2500.1960.2450.1980.2490.2270.2680.2110.2680.1940.2450.2220.2690.2860.3250.4210.450
3360.2590.2910.2490.2880.2510.2910.2900.3090.2580.3060.2440.2850.2900.3100.3230.3470.4520.465
7200.3370.3440.3300.3440.3220.3400.3740.3600.3200.3620.3170.3380.3760.3640.4360.4010.5130.496
Avg0.2400.2730.2320.2700.2310.2720.2660.2910.2390.2910.2260.2680.2640.2930.3080.3290.4350.455
Solar-Energy960.1620.2210.2120.2300.1900.2400.1830.2650.1930.2580.1680.2370.1800.2720.1990.2900.2060.296
1920.1870.2390.2320.2460.2230.2640.2050.2830.2140.2740.1890.2570.1990.2860.2430.3070.2540.328
3360.2050.2550.2370.2530.2500.2830.2240.2990.2330.2910.2120.2770.2200.3010.2640.3220.2720.330
7200.2380.2790.2520.2660.2920.3110.2390.3160.2460.3070.2400.3050.2510.3210.3100.3390.3260.347
Avg0.1980.2490.2330.2490.2410.2750.2130.2910.2220.2830.2020.2690.2130.2950.2540.3150.2650.325
\\( 1^{\\text{st}} \\)Count23
", + "image_path": "ea8eef19601bc8ab862161aa3f0584b058f8fbebe4c2ccfc6a294e055a883ecc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 141, + 503, + 460 + ], + "blocks": [ + { + "bbox": [ + 105, + 114, + 504, + 137 + ], + "lines": [ + { + "bbox": [ + 105, + 114, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 504, + 137 + ], + "type": "text", + "content": "Table 13: Full results of zero-shot forecasting. A lower MSE or MAE indicates a better prediction. " + }, + { + "bbox": [ + 105, + 114, + 504, + 137 + ], + "type": "inline_equation", + "content": "1^{\\mathrm{st}}" + }, + { + "bbox": [ + 105, + 114, + 504, + 137 + ], + "type": "text", + "content": " Count represents the number of wins achieved by a model under all prediction lengths and datasets." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 141, + 503, + 460 + ], + "lines": [ + { + "bbox": [ + 108, + 141, + 503, + 460 + ], + "spans": [ + { + "bbox": [ + 108, + 141, + 503, + 460 + ], + "type": "table", + "html": "
ModelsTimer-XLBase(Ours)Time-MoEBase(2024)Time-MoELarge(2024)Time-MoELuTRA(2024)MoiiraiSmall(2024)MoiiraiBase(2024)MoiiraiLarge(2024)TimesFM(2023)MOMENT(2024)ChronosBase(2024)ChronosLarge(2024)
MetricMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAEMSEMAE
ETTm1960.3170.3560.3380.3680.3090.3570.2810.3410.4180.3920.3630.3560.3800.3610.3610.3700.6540.5270.4540.4080.4570.403
1920.3580.3810.3530.3880.3460.3810.3050.3580.4310.4050.3880.3750.4120.3830.4140.4050.6620.5320.5670.4770.5300.450
3360.3860.4010.3810.4130.3730.4080.3690.3950.4330.4120.4160.3920.4360.4000.4450.4290.6720.5370.6620.5250.5770.481
7200.4300.4310.5040.4930.4750.4770.4690.4720.4620.4320.4600.4180.4620.4200.5120.4710.6920.5510.9000.5910.6600.526
Avg0.3730.3920.3940.4150.3760.4050.3560.3910.4360.4100.4060.3850.4220.3910.4330.4180.6700.5360.6450.5000.5550.465
ETTm2960.1890.2770.2010.2910.1970.2860.1980.2880.2140.2880.2050.2730.2110.2740.2020.2700.2600.3350.1990.2740.1970.271
1920.2410.3150.2580.3340.2500.3220.2350.3120.2840.3320.2750.3160.2810.3180.2890.3210.2890.3500.2610.3220.2540.314
3360.2860.3480.3240.3730.3370.3750.2930.3480.3310.3620.3290.3500.3410.3550.3600.3660.3240.3690.3260.3660.3130.353
7200.3750.4020.4880.4640.4800.4610.4270.4280.4020.4080.4370.4110.4850.4280.4620.4300.3940.4090.4550.4390.4160.415
Avg0.2730.3360.3170.3650.3160.3610.2880.3440.3070.3470.3110.3370.3290.3430.3280.3460.3160.3650.3100.3500.2950.338
ETTh1960.3690.3910.3570.3810.3500.3820.3490.3790.4010.4020.3760.3920.3810.3880.4140.4040.6880.5570.4400.3930.4410.390
1920.4050.4130.3840.4040.3880.4120.3950.4130.4350.4210.4120.4130.4340.4150.4650.4340.6880.5600.4920.4260.5020.524
3360.4180.4230.4110.4340.4110.4300.4470.4530.4380.4340.4330.4280.4850.4450.5030.4560.6750.5630.5500.4620.5760.467
7200.4230.4410.4490.4770.4270.4550.4570.4620.4390.4540.4470.4440.6110.5100.5110.4810.6830.5850.8820.5910.8350.583
Avg0.4040.4170.4000.4240.3940.4190.4120.4260.4280.4270.4170.4190.4800.4390.4730.4430.6830.5660.5910.4680.5880.466
ETTh2960.2830.3420.3050.3590.3020.3540.2920.3520.2970.3360.2940.3300.2960.3300.3150.3490.3420.3960.3080.3430.3200.345
1920.3400.3790.3510.3860.3640.3850.3470.3790.3680.3810.3650.3750.3610.3710.3880.3950.3540.4020.3840.3920.4060.399
3360.3660.4000.3910.4180.4170.4250.4060.4190.3700.3930.3760.3900.3900.3900.4220.4270.3560.4070.4290.4300.4920.453
7200.3970.4310.4190.4540.5370.4960.4390.4470.4110.4260.4160.4330.4230.4180.4430.4540.3950.4340.5010.4770.6030.511
Avg0.3470.3880.3660.4040.4050.4150.3710.3990.3610.3840.3620.3820.3670.3770.3920.4060.3610.4090.4050.4100.4550.427
ECL960.1410.237
1920.1590.254
3360.1770.272
7200.2190.308
Avg0.1740.278
Weather960.1710.225
1920.2210.271
3360.2740.311
7200.3560.370
Avg0.2560.294
\\( 1^stCount \\)1510213010700051100120002
", + "image_path": "df82303335f7a17529bf97a572706050aec2649fac1387afa26d0099c989a53e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 121, + 462, + 441, + 471 + ], + "lines": [ + { + "bbox": [ + 121, + 462, + 441, + 471 + ], + "spans": [ + { + "bbox": [ + 121, + 462, + 441, + 471 + ], + "type": "text", + "content": "* Dataset for pre-training is not evaluated on corresponding models, which is denoted by a dash (-)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 121, + 471, + 465, + 479 + ], + "lines": [ + { + "bbox": [ + 121, + 471, + 465, + 479 + ], + "spans": [ + { + "bbox": [ + 121, + 471, + 465, + 479 + ], + "type": "text", + "content": "* Traffic from (PEMS) is generally used during the pre-training of large models and thus not evaluated here." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 121, + 479, + 392, + 489 + ], + "lines": [ + { + "bbox": [ + 121, + 479, + 392, + 489 + ], + "spans": [ + { + "bbox": [ + 121, + 479, + 392, + 489 + ], + "type": "text", + "content": "* Our model checkpoint is available at https://huggingface.co/thuml/timer-base-84m." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 596, + 509, + 703 + ], + "blocks": [ + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "lines": [ + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "content": "Table 14: Embedding ablation in TimeAttention. For the temporal dimension, we compare prevalent relative and absolute position embeddings. As for the variable dimension, we explore the effectiveness of the variable embedding that distinguishes endogenous and exogenous variables." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 596, + 509, + 703 + ], + "lines": [ + { + "bbox": [ + 106, + 596, + 509, + 703 + ], + "spans": [ + { + "bbox": [ + 106, + 596, + 509, + 703 + ], + "type": "table", + "html": "
DesignTemporalVariableTrafficWeatherSolar-EnergyERA5-MS
MSEMAEMSEMAEMSEMAEMSEMAE
Timer-XLRoPE (2024)with0.3400.2380.1570.2050.1620.2210.1640.307
ReplaceALiBi (2021)with0.3510.2460.1620.2120.1880.2100.1670.308
Relative (2020)with0.3610.2500.1630.2140.1970.2150.1680.309
Absolute (2017)with0.3810.2700.1590.2070.1710.2040.1650.306
w/oRoPE (2024)w/o0.3610.2540.1710.2170.1810.2210.2350.373
w/ow/o0.3630.2530.1640.2150.1940.2150.1670.309
", + "image_path": "535dd2678173801b4e371e0ddfab3b75b17ef7b397ed9875f0a61f6bf3a672fd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 82, + 309, + 174 + ], + "blocks": [ + { + "bbox": [ + 108, + 82, + 309, + 174 + ], + "lines": [ + { + "bbox": [ + 108, + 82, + 309, + 174 + ], + "spans": [ + { + "bbox": [ + 108, + 82, + 309, + 174 + ], + "type": "image", + "image_path": "ea692af7f9c14e27355f9cc560e2556cfcb2e4f62c0bebaeeebdf1a1b68d30dd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 317, + 89, + 506, + 171 + ], + "blocks": [ + { + "bbox": [ + 317, + 89, + 506, + 171 + ], + "lines": [ + { + "bbox": [ + 317, + 89, + 506, + 171 + ], + "spans": [ + { + "bbox": [ + 317, + 89, + 506, + 171 + ], + "type": "image", + "image_path": "0528daa29f797b75cecd95ce4f5e853b4bfa4258b386d370e67434d2bcdd22bf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 178, + 310, + 269 + ], + "blocks": [ + { + "bbox": [ + 108, + 178, + 310, + 269 + ], + "lines": [ + { + "bbox": [ + 108, + 178, + 310, + 269 + ], + "spans": [ + { + "bbox": [ + 108, + 178, + 310, + 269 + ], + "type": "image", + "image_path": "b48dafd63fd63593b1db526a2162e08aafa9da846564383f156ed3bc3af3dfa1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 277, + 473, + 289 + ], + "lines": [ + { + "bbox": [ + 135, + 277, + 473, + 289 + ], + "spans": [ + { + "bbox": [ + 135, + 277, + 473, + 289 + ], + "type": "text", + "content": "Figure 11: Case studies of learned attention in encoder-/decoder-only Transformers." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 317, + 186, + 505, + 266 + ], + "blocks": [ + { + "bbox": [ + 317, + 186, + 505, + 266 + ], + "lines": [ + { + "bbox": [ + 317, + 186, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 317, + 186, + 505, + 266 + ], + "type": "image", + "image_path": "d1f3b533ee13e593845504f471dde99e153a915b60dcf400c032ae496a7eac2b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 301, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 504, + 357 + ], + "type": "text", + "content": "degradation of PatchTST. Similar to the observations in Figure 3, the encoder-only architecture produces inferior predictions after thousands of time points, which can be concealed due to the short context adopted in previous benchmarks. Although PatchTST has conducted an initial exploration in the context of hundreds of time points, it inappropriately works in ever-long contexts. Therefore, we believe that context bottlenecks deserve further exploration in this community." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 106, + 391, + 506, + 515 + ], + "blocks": [ + { + "bbox": [ + 105, + 375, + 504, + 388 + ], + "lines": [ + { + "bbox": [ + 105, + 375, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 375, + 504, + 388 + ], + "type": "text", + "content": "Table 15: Performance on ERA5 (pred-1day). Lookback lengths vary from daily to yearly contexts." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 391, + 506, + 515 + ], + "lines": [ + { + "bbox": [ + 106, + 391, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 106, + 391, + 506, + 515 + ], + "type": "table", + "html": "
ModelsTimer-XLPatchTSTDLinear
MetricMSEMAEMSEMAEMSEMAE
Lookback-8 (1 Day)0.08470.21000.08970.21960.09700.2276
Lookback-32 (4 Day)0.07130.19280.07780.20800.08410.2113
Lookback-56 (1 Week)0.06880.18910.07850.20820.08140.2081
Lookback-224 (1 Month)0.06750.18680.07450.20420.07880.2048
Lookback-960 (4 Month)0.06670.18630.11940.26960.07730.2031
Lookback-2944 (1 Year)0.06630.18570.11090.26380.07630.2024
", + "image_path": "a1c342d369cdba7a3b2b11e76c67658ad5a342d12d35635d85cc25c22c782cdd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 531, + 504, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 504, + 587 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 504, + 587 + ], + "type": "text", + "content": "Representation Analysis We further delve into long-context modeling from the perspective of learned representations. As shown in Figure 11, the decoder-only model can selectively focus on the previous context while PatchTST wrongly focuses on noisy parts. Since causality is the basis of forecasting, using causal masks leads to coherent token embeddings, while the unmasked attention mechanism may break the causality and prevent the model from telling each tokens." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 598, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 506, + 654 + ], + "type": "text", + "content": "Normalization Section 4.1 has discussed instance normalization (Kim et al., 2021). It generally improves the performance of the previous encoder-only Transformers but leads to special problems in decoder-only Transformers (e.g., unmatched statistics in multi-step autoregression). However, it is indicative that Timer-XL without ReVIN can achieve competitive performance on well-acknowledged benchmarks in Table 16, while the performance of PatchTST may heavily rely on this normalization." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 667, + 285, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 285, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 285, + 678 + ], + "type": "text", + "content": "E.5 ILLUSTRATION OF TIMEATTENTION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Although the formulation to generalize from 1D sequences to multivariate time series is straightforward, Timer-XL is built on a decoder-only Transformer, an underexploited backbone among current time series models. As shown in Figure 12, challenges lie in capturing fine-grained dependencies between all variables in the patch level, while maintaining temporal causality in multiple sequences." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 104, + 504, + 179 + ], + "blocks": [ + { + "bbox": [ + 110, + 89, + 499, + 102 + ], + "lines": [ + { + "bbox": [ + 110, + 89, + 499, + 102 + ], + "spans": [ + { + "bbox": [ + 110, + 89, + 499, + 102 + ], + "type": "text", + "content": "Table 16: Evaluations (672-pred-96) on the effect of ReVIN (Kim et al., 2021) on Transformers." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 104, + 504, + 179 + ], + "lines": [ + { + "bbox": [ + 108, + 104, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 108, + 104, + 504, + 179 + ], + "type": "table", + "html": "
ModelsTimer-XL with ReVINTimer-XL w/o ReVINPatchTST with ReVINPatchTST w/o ReVIN
MetricMSE | MAEMSE | MAEMSE | MAEMSE | MAE
ETTh10.364 | 0.3970.370 | 0.4010.370 | 0.3990.421 | 0.448
Weather0.157 | 0.2050.151 | 0.2050.149 | 0.1980.173 | 0.242
ECL0.127 | 0.2190.130 | 0.2250.129 | 0.2220.138 | 0.244
", + "image_path": "2896537c0bd20abbef90f4319d3f887646983044366ec717bc89af867eec5895.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 205, + 246, + 366 + ], + "blocks": [ + { + "bbox": [ + 141, + 191, + 203, + 202 + ], + "lines": [ + { + "bbox": [ + 141, + 191, + 203, + 202 + ], + "spans": [ + { + "bbox": [ + 141, + 191, + 203, + 202 + ], + "type": "text", + "content": "(a) Univariate" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 205, + 246, + 366 + ], + "lines": [ + { + "bbox": [ + 106, + 205, + 246, + 366 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 246, + 366 + ], + "type": "image", + "image_path": "a576faa6ff22bb6735f90db34194db5a7caf1608a31e20c518c7337fc4a9bf76.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 265, + 205, + 396, + 366 + ], + "blocks": [ + { + "bbox": [ + 350, + 191, + 418, + 202 + ], + "lines": [ + { + "bbox": [ + 350, + 191, + 418, + 202 + ], + "spans": [ + { + "bbox": [ + 350, + 191, + 418, + 202 + ], + "type": "text", + "content": "(b) Multivariate" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 265, + 205, + 396, + 366 + ], + "lines": [ + { + "bbox": [ + 265, + 205, + 396, + 366 + ], + "spans": [ + { + "bbox": [ + 265, + 205, + 396, + 366 + ], + "type": "image", + "image_path": "c661a2004600bd871e7770694566693df277b6790453406217428e8dfd8c73a7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 407, + 205, + 505, + 367 + ], + "blocks": [ + { + "bbox": [ + 407, + 205, + 505, + 367 + ], + "lines": [ + { + "bbox": [ + 407, + 205, + 505, + 367 + ], + "spans": [ + { + "bbox": [ + 407, + 205, + 505, + 367 + ], + "type": "image", + "image_path": "79a2f6434987c9c6c32267b735d53096628ec361f836171c9f619020a6e96774.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 117, + 374, + 492, + 387 + ], + "lines": [ + { + "bbox": [ + 117, + 374, + 492, + 387 + ], + "spans": [ + { + "bbox": [ + 117, + 374, + 492, + 387 + ], + "type": "text", + "content": "Figure 12: Illustration of TimeAttention for modeling univariate and multivariate time series." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 399, + 506, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 506, + 445 + ], + "type": "text", + "content": "Technically, we introduce the masking formulation, whose key lies in the grouped causality of flattened 2D sequences. We derive it based on the Kronecker Product, which disentangles the large attention map into formalizable temporal and variable dependencies. It can be naturally extended to covariates or pre-defined variable dependencies, which may inspire a lot of future explorations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 460, + 195, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 195, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 195, + 472 + ], + "type": "text", + "content": "F LIMITATIONS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 485, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 506, + 586 + ], + "type": "text", + "content": "Timer-XL is a unified model for time series forecasting. It can be used for task-specific training or scalable pre-training, handling varying-length and multivariate time series. As an autoregressive model, Timer-XL necessitates iterative generation for long-term forecasting, which may lead to error accumulation and inflexibility in the output length. In the future, we plan to incorporate multi-resolution patches for input and output series. Furthermore, given that Timer-XL explicitly captures fine-grained token dependencies, there remains significant potential to reduce the complexity of TimeAttention, particularly in high-dimensional and lengthy time series. Finally, we will investigate the factors contributing to the stagnation of Transformer performance in extremely long contexts, and seek insights in the time series modality to improve context efficiency." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_content_list.json b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..798692055a671305b6a13cdb30c57cce66c51e16 --- /dev/null +++ b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_content_list.json @@ -0,0 +1,3441 @@ +[ + { + "type": "text", + "text": "TO COT OR NOT TO COT? CHAIN-OF-THOUGHT HELPS MAINLY ON MATH AND SYMBOLIC REASONING", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zayne Sprague\\*, Fangcong Yin\\*, Juan Diego Rodriguez\\*, Dongwei Jiang\\*, Manya Wadhwa\\*, Prasann Singhal\\*, Xinyu Zhao\\*, Xi Ye $^{\\text{心}}$ , Kyle Mahowald\\*, Greg Durrett\\*", + "bbox": [ + 179, + 169, + 720, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\spadesuit$ The University of Texas at Austin, $\\diamond$ Johns Hopkins University, $\\diamond$ Princeton University zaynesprague@utexas.edu", + "bbox": [ + 179, + 227, + 756, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 292, + 547, + 308 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chain-of-thought (CoT) via prompting is the de facto method for eliciting reasoning capabilities from large language models (LLMs). But for what kinds of tasks is this extra \"thinking\" really helpful? To analyze this, we conducted a quantitative meta-analysis covering over 100 papers using CoT and ran our own evaluations of 20 datasets across 14 models. Our results show that CoT gives strong performance benefits primarily on tasks involving math or logic, with much smaller gains on other types of tasks. On MMLU, directly generating the answer without CoT leads to almost identical accuracy as CoT unless the question or model's response contains an equals sign, indicating symbolic operations and reasoning. Following this finding, we analyze the behavior of CoT on these problems by separating planning and execution and comparing against tool-augmented LLMs. Much of CoT's gain comes from improving symbolic execution, but it underperforms relative to using a symbolic solver. Our results indicate that CoT can be applied selectively, maintaining performance while saving inference costs. Furthermore, they suggest a need to move beyond prompt-based CoT to new paradigms that better leverage intermediate computation across the whole range of LLM applications1.", + "bbox": [ + 228, + 325, + 767, + 547 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/38c07fa5ea4155601f6a3a8985cb6d7c7786e3a335ede3c9f4558f388673790c.jpg", + "image_caption": [ + "Figure 1: Left: meta-analysis of CoT literature; each point is a reported delta of CoT over direct answering for some (LLM, task) pair. Right: average performance of using zero-shot CoT v.s. direct answer prompts across five general reasoning categories, covering 20 datasets with 14 LLMs evaluated on each. In both sets of results, math and other kinds of symbolic reasoning are the domains that consistently see substantial improvements from CoT (red dotted line indicates the mean improvement from CoT across experiments)." + ], + "image_footnote": [], + "bbox": [ + 176, + 556, + 823, + 796 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1Our code can be found at https://github.com/Zayne-sprague/To-CoT-or-not-to-CoT.", + "bbox": [ + 191, + 909, + 750, + 922 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 102, + 336, + 118 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Chain-of-thought (CoT) (Nye et al., 2022; Wei et al., 2022) has become a widely used prompting technique for eliciting reasoning from language models. CoT can provide human-readable explanations of how problems are solved (Joshi et al., 2023; Lanham et al., 2023), but most frequently it is invoked to improve an LLM's ability to answer complex questions via intermediate computation (Madaan & Yazdanbakhsh, 2022; Wang et al., 2023a; Dziri et al., 2023). Current post-training schemes for LLMs heavily infuse CoT capabilities into models: systems like ChatGPT or Llama 3.1 default to CoT when given reasoning problems (OpenAI, 2023; Dubey et al., 2024).", + "bbox": [ + 169, + 133, + 826, + 232 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "CoT has seen widespread usage, but it is most heavily explored in the domain of mathematical reasoning (Zhou et al., 2023a; Fu et al., 2023; Chae et al., 2024; Xu et al., 2024b; Qi et al., 2024). In fact, many \"reasoning\" methods for LLMs are evaluated only in the math domain; for instance, Lightman et al. (2024) frame their paper as \"complex multi-step reasoning\" and Mixtral-Large2's release cited effort \"enhancing the model's reasoning capabilities\", but performance is only reported on GSM8K and MATH. CoT is reported to be effective across a wide range of studies, but many of these studies focus on a narrow slice of the task space. In areas beyond math, results show that CoT is not as useful (Kambhampati et al., 2024a) or can even hurt performance (Wang et al., 2024).", + "bbox": [ + 169, + 238, + 826, + 364 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we aim to evaluate where prompt-based CoT helps and why. We begin with a systematic meta-analysis of recent literature that reports performance of CoT versus direct answering (DA). We then augment this picture by conducting experiments on 20 datasets and 14 contemporary LLMs across zero-shot and few-shot prompt settings. Finding 1: CoT only helps substantially on problems requiring mathematical, logical, or algorithmic reasoning. Figure 1 shows this holds both across the literature and our own experiments. We find only a few cases of large gain in other kinds of tasks, and many of these outliers feature some component of symbolic reasoning. For instance, on MMLU (Hendrycks et al., 2021a) and MMLU Pro (Wang et al., 2024), we analyze the improvements from CoT and find that CoT only gives benefit on math slices of the dataset. As much as $95\\%$ of the total performance gain from CoT on MMLU is attributed to questions containing “ $=$ ” in the question or generated output. For non-math questions, we find no features to indicate when CoT will help.", + "bbox": [ + 169, + 369, + 826, + 539 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "How can we better understand why CoT improves on these questions and only these questions? The math and formal logical reasoning datasets we consider can be broken down into two stages of processing: a planning step (e.g., parsing a problem into equations) and an execution step (building intermediate outputs and working towards a solution) (Ye et al., 2023; Wang et al., 2023b; Sun et al., 2024). Finding 2: CoT primarily helps with the execution step that performs computation and symbolic manipulation, but falls short of what LLMs with tool augmentation can do. We find that LMs prompted with CoT can generate executable formal solution plans and execute those plans better than direct answering. But using LMs to generate a solution plan and then using an external symbolic solver to solve the plan outperforms using CoT for both steps for these tasks.", + "bbox": [ + 169, + 544, + 826, + 671 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These results paint a picture that CoT's utility is often circumscribed by tool augmentation: on problems where CoT helps, we already have more powerful tools than CoT that we can employ, and on \"soft reasoning\" problems like commonsense where no tools exist, we see limited benefit from CoT. This characterization has two major implications. First, CoT is unnecessary for many problems where it is widely employed: there exist more efficient prompting strategies that yield similar performance for much lower inference cost. Second, we see a critical need to move beyond prompt-based CoT to more sophisticated approaches based on search, interacting agents, or models more heavily fine-tuned for CoT. Future work can explore how intermediate computation can be better used to solve challenging problems outside of the math and symbolic reasoning domains.", + "bbox": [ + 169, + 676, + 826, + 803 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 BACKGROUND: CHAIN-OF-THOUGHT", + "text_level": 1, + "bbox": [ + 171, + 823, + 517, + 839 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The tasks we consider in this work consist of a question $\\mathbf{q} \\in \\Sigma^{*}$ for a vocabulary $\\Sigma$ and an answer $a \\in \\mathcal{L}(\\mathbf{q})$ for a label set $\\mathcal{L}(\\mathbf{q})$ . $\\mathcal{L}(\\mathbf{q})$ can consist of a data type like boolean or integer, classification labels, or problem-dependent labels like names of entities from $\\mathbf{q}$ . One exception that we still", + "bbox": [ + 169, + 854, + 826, + 898 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2https://mistral.ai/news/mistral-large-2407/", + "bbox": [ + 189, + 909, + 519, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "explore is BiGGen Bench (Kim et al., 2024), which instead relies on an LLM-as-a-judge (Dubois et al., 2023; Zheng et al., 2024b) to provide a label for generated long-form responses.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Prompting and chain-of-thought for reasoning A large language model places distributions over strings $p(\\mathbf{y}) = \\prod_{i=1}^{n} p_{\\mathrm{LM}}(y_i)$ where $\\mathbf{y} \\in \\Sigma^*$ . In practice, we can interpret these as conditional distributions $p(\\mathbf{y} \\mid \\mathbf{x})$ where $\\mathbf{x}$ is a user's prompt. Typical invocation of an LLM involves forming a prompt $\\mathcal{I}(\\mathbf{q})$ that wraps the question with additional instruction, then drawing a sample response $\\tilde{\\mathbf{y}} \\sim p(\\mathbf{y} \\mid \\mathcal{I}(\\mathbf{q}))$ , and finally returning $a = \\text{extract}(\\tilde{\\mathbf{y}})$ using some kind of answer extractor.", + "bbox": [ + 169, + 156, + 823, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the tasks we consider in this work, the output $\\tilde{\\mathbf{y}}$ can take one of two forms. A direct answer only contains a string realization of $a$ ; e.g., $\\mathbf{y} = (-185,4)$ which is tokenized as the answer $a = 1854$ . A chain of thought is a longer sequence $\\mathbf{y}$ including other tokens beyond the answer, e.g., $\\mathbf{y} = (-185,6,-\\mathrm{minus},-2,-\\mathrm{equals},-185,4)$ . In both cases, the extract function must parse and detokenize the output; in CoT, there is some extra work to spot where the answer is placed.", + "bbox": [ + 169, + 233, + 826, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our prompts can explicitly encourage use of direct answer or chain of thought as strategies, which we denote as $\\mathcal{I}_{\\mathrm{da}}$ and $\\mathcal{I}_{\\mathrm{cot}}$ . For eliciting CoT, this includes strategies like telling a model to \"think step by step\" (Kojima et al., 2022). For directly answering a question, a prompt may say \"immediately generate the answer\". We track the average location of the answer in the generated output for both CoT and direct prompts in Appendix F.3 to ensure that direct answer prompts give the answer early in the output. We also ensure that extract can parse answers from the generated output for each model, prompt, and dataset combination used in our experiments, tailoring the extract function as needed to ensure low unparseable rates for each model and task. All prompts and outputs per dataset per model have been uploaded to Huggingface and we include examples of some of our prompts in the Appendix J. We also experiment with few-shot CoT prompts, which we find perform similarly to zero-shot prompts; details about these are given in Appendix E.", + "bbox": [ + 169, + 309, + 826, + 464 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Symbolic reasoning Of key importance to this work is whether problems feature symbolic reasoning or not. We consider a problem to be symbolic if it can be grounded in a natural, well agreed-upon formal system. “ $12 \\times 4$ ” is an example of a symbolic problem, which can be grounded in mathematics. Other systems include first-order logic (Saparov & He, 2023; Hua et al., 2024) or planning languages (Liu et al., 2023a; Valmeekam et al., 2023). Formally, for symbolic problems, we define a function $f$ that acts as a map that produces some symbolic expression $S = f(\\mathbf{q})$ from the question. $S$ can be used as input for a solver to derive an answer, $\\hat{a} = \\operatorname{solve}(S)$ .", + "bbox": [ + 169, + 486, + 823, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Conversely, a problem like where on a river can you hold a cup upright to catch water on a sunny day? from CommonsenseQA (Talmor et al., 2019) is non-symbolic by our definition. While this problem could be formalized with some kind of predicate logic (Zhou et al., 2022; Quan et al., 2024; Zhou et al., 2024) or grounded in some kind of physical simulation (Hao et al., 2023; Wong et al., 2023), there is not a natural nor well agreed-upon framework for solving it.", + "bbox": [ + 169, + 590, + 823, + 662 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We view non-symbolic to symbolic reasoning as a spectrum. MuSR (Sprague et al., 2024) is a \"semisymbolic\" dataset in that it does contain an underlying formal system (e.g., for its murder mysteries portion, the notion that $\\mathrm{motive}(X)\\wedge \\mathrm{means}(X)\\wedge \\mathrm{opportunity}(X)\\Rightarrow \\mathrm{murderer}(X))$ , but also involves substantial commonsense reasoning that does not map onto a formal system. In these cases, we can still form $S = f(\\mathbf{q})$ , but $f$ must rely heavily on a language model and instantiate new information for $S$ that is not directly represented in $\\mathbf{q}$ .", + "bbox": [ + 169, + 667, + 826, + 752 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Central claim Figure 1 shows that there are a large number of positive results on CoT reported in the literature. Informally, we believe many readers of the literature to hold the following view: $\\mathcal{I}_{\\mathrm{cot}}$ will outperform $\\mathcal{I}_{\\mathrm{da}}$ on nearly all reasoning problems, whether those problems involve symbolic or non-symbolic reasoning. Our evidence does not support this conjecture. We will show that this performance boost is strongest for symbolic and semi-symbolic tasks, while giving little to no improvement (or even hurting performance) on non-symbolic tasks.", + "bbox": [ + 169, + 775, + 823, + 859 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "3We exclude a number of other \"CoT-like\" approaches in our analysis such as decomposed prompting (Khot et al., 2023; Zheng et al., 2024a) and multi-agent debate (Du et al., 2023; Chen et al., 2024). We focus on single prompt approaches. We deal with tool-augmented approaches in Section 5.", + "bbox": [ + 169, + 883, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/198d13e636eff5af0e8a7fb6ad15b14555b49b634303b36000808ae121126597.jpg", + "table_caption": [ + "Table 1: A few categories for experimental comparisons. Full list in Appendix B." + ], + "table_footnote": [], + "table_body": "
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
......
", + "bbox": [ + 174, + 126, + 823, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 RESULTS FROM THE LITERATURE", + "text_level": 1, + "bbox": [ + 171, + 387, + 485, + 402 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Criteria and Process We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). This resulted in 4,642 papers total that filtered using automatic and manual methods to papers including experiments comparing chain-of-thought, $\\mathcal{I}_{\\mathrm{cot}}$ , against direct answering prompts, $\\mathcal{I}_{\\mathrm{direct}}$ . A total of 110 papers were found that matched our criteria with 1,218 experimental comparisons. We then grouped the comparisons by the types of tasks and datasets being evaluated. More details on our automatic and manual filtering, as well as our categorization, can be found in Appendix A and B.", + "bbox": [ + 169, + 421, + 826, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Results Figure 2 shows the distribution of CoT deltas (CoT prompt minus the direct answer prompt performance) across our categorization of different task types found in the literature. Compared to Figure 1, we take the mean results per paper per category, indicated by blue dots, showing the trend across papers in the literature. The categories are ranked in order of ascending median CoT delta. The three categories which benefited the most from CoT are symbolic reasoning, math, and logical reasoning, with average improvements of 14.2, 12.3, 6.9, respectively. Average performance on these top three tasks with CoT was 56.9, whereas performance without CoT was 45.5. For other categories, the average performance with CoT was 56.8, compared to 56.1 without CoT. We do not consider this small improvement a victory for CoT. CoT involves more computation than direct answering, and a truly fair comparison between the methods should match the compute of the two methods, e.g., assembling across multiple prompts.", + "bbox": [ + 169, + 554, + 826, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Do any non-math datasets benefit from CoT? On the right side of Figure 2, we show the top 10 outliers from our observed trend, namely papers with high CoT deltas averaged across experiments in tasks other than math, symbolic, or logical reasoning. Although not categorized as math or logic, several of these are related to logical, mathematical or symbolic reasoning in some way. From this list, the dataset which benefits the most most from CoT is BIG-bench Hard (BBH) (Suzgun et al., 2023), a benchmark consisting largely of problems requiring algorithmic, arithmetic or logical reasoning. For instance, BIG-bench Navigate is a spatial reasoning task, but relies heavily on a mathematical primitive of counting steps taken to derive a final conclusion. Similarly, while BIG-bench Temporal is a temporal reasoning task (answering questions about when certain events could have occurred), it requires deductive reasoning to solve. In addition, Legal Argument Reasoning (SemEval-2024 Task 5) (Bongard et al., 2022) was categorized as context-aware QA, but also requires substantial reasoning ability. Finally, MMLU-Moral Scenarios (Hendrycks et al., 2021a) requires answering two independent questions at once, which essentially involves a symbolic combination of two simpler questions.", + "bbox": [ + 169, + 729, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/190393a2874564aba0ecc379cb8f7318fa75276598feeed1d23850a9185d0ff8.jpg", + "image_caption": [ + "CoT Performance Improvement Across Tasks Aggregated by Paper and Category", + "Figure 2: Results from our meta-analysis (grey dots) aggregated by paper and category (blue dots)." + ], + "image_footnote": [], + "bbox": [ + 173, + 112, + 803, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "There are a few outliers that less clearly follow the trend. ScienceQA (Lu et al., 2022) consists of multiple choice questions across a range of natural and social science disciplines, though it is hard to interpret gains without knowing breaking down performance by subject or question type. The dialogue evaluation dataset from Jia et al. (2024) sees large improvements with CoT, but this is a proprietary dataset, and we note that other essay scoring results in our meta-analysis (Li et al., 2024; Stahl et al., 2024) did not show improvements with CoT. Other non-math, symbolic or logical datasets that benefit from CoT are Commitment Bank (de Marneffe et al., 2019) and the task of eliciting verbalized confidence (Xiong et al., 2024). Nevertheless, these are exceptions to the rule. The majority of the reported benefits from using CoT in the NLP and ML literature comes from math or math-related tasks.", + "bbox": [ + 169, + 523, + 826, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 RESULTS FROM EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 685, + 460, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 171, + 715, + 377, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset, Models, Prompts All datasets, models, and prompts we evaluate over can be found in detail in the tables 3, 4, and 5 of Appendix C. We restricted our experiments to English models commonly used and benchmarked on general reasoning datasets. Our datasets include those which are widely used in CoT and reasoning literature, including a mix of non-symbolic, semisymbolic, and symbolic reasoning. They span different formats, including multiple-choice, short-answer, and free-response; however, most of these datasets are multiple choice or short answer, as CoT is not typically used in long-form response settings. We also categorize each dataset into a larger category of reasoning required to solve it: Commonsense, Knowledge, Symbolic, Mathematical, and Soft Reasoning. We define Soft Reasoning as questions relying on commonsense and natural language but going beyond simple inferences about these statements. Finally, we explore several prompting strategies for eliciting reasoning from language models, as past work has emphasized the importance of the prompt (Yang et al., 2024). However, we generally found slight performance differences; see Appendix D for details. We therefore focus on prompts similar to Kojima et al. (2022) and Wei et al.", + "bbox": [ + 169, + 743, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e1e540c0c8c5b46aa7692952347b23ada9c5a504bb629430beda49a0a2daf1ba.jpg", + "image_caption": [ + "Figure 3: Left: Performance gain from using CoT for each reasoning category. Right: Performance gain from using CoT for each dataset, averaged across models and broken out across 5 representative models. Red lines indicate median improvement. In both plots we see a consistent trend: most improvements from using CoT are from math and symbolic reasoning." + ], + "image_footnote": [], + "bbox": [ + 176, + 78, + 803, + 328 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(2022) for zero-shot and few-shot settings, respectively, with alterations to improve the model's ability to produce desired behavior (i.e., formats that allow for easily parsed answers). We upload all our prompts and outputs for each model for each prompting strategy on Huggingface.4.", + "bbox": [ + 169, + 417, + 823, + 462 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details We use a high-throughput inference package, vLLM (Kwon et al., 2023), for the model inference process. We use greedy decoding on all models. Our prompts are taken from the Llama 3.1 evaluations when available (Dubey et al., 2024), and minor adjustments are made to unify prompting strategies. For other datasets, we either use the standard prompt for the dataset from the corresponding original paper or implement our own prompt. Our answer parser (extract) is tailored to each dataset and model. Specific details about each dataset, its prompts, and answer extractor can be found in Appendix C.", + "bbox": [ + 169, + 479, + 823, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 599, + 277, + 612 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Where does zero-shot CoT improve over direct prompts? On datasets that require math (MATH, GSM8K) or formal logic (ContextHub, MuSR to a lesser degree) to answer the problem.", + "bbox": [ + 169, + 626, + 823, + 656 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 3 on the left shows the average CoT performance improvement for each reasoning category from Figure 1 (right); raw numbers can be found in Table 6 of the Appendix. On the right, Figure 3 shows the performance gain from using CoT for each dataset, averaged across all models and for a selection of individual models. On non-symbolic reasoning categories and datasets, specifically those that contain questions primarily involving commonsense (CSQA, PIQA, SiQA), language understanding (WinoGrande), and reading comprehension (AGI LSAT, ARC-Easy, ARC-Challenge), there is little to no separation between the performance of zero-shot CoT and zero-shot direct answer. Despite these datasets involving reasoning, CoT does not yield improvement.", + "bbox": [ + 169, + 662, + 823, + 776 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "By contrast, the mathematical and symbolic categories get larger boosts in improvements alongside symbolic and many semi-symbolic datasets. MATH and GSM8K show gains as large as $41.6\\%$ and $66.9\\%$ , respectively. The semi-symbolic datasets like ContextHub and MuSR Murder Mysteries show moderate gains. These datasets require the application of logical rules to reach the answer, e.g., first-order logic parsed from simple natural language (ContextHub) or more complex commonsense statements (MuSR Murder Mysteries). All results are shown in the Appendix F.1 as well as a full list of numeric results for both CoT and direct answer prompting in Table 7. We also explored the few-shot setting and found it had little impact on when CoT will help; see Appendix E.", + "bbox": [ + 169, + 780, + 826, + 893 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "4https://huggingface.co/collections/TAUR-Lab/cot-analysis-project-66bbb9e5e0156e65059895f5", + "bbox": [ + 189, + 909, + 857, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Does the answer format impact where CoT will help? Not much. Free response capabilities required for BigGen Bench may not benefit from pre-planning.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Many of the commonly-used datasets for problems other than math are multiple choice. We highlight here that CoT has similar performance to direct answer across models for two datasets that are not multiple-choice and contain varying levels of non-symbolic reasoning. First, MuSiQue (Trivedi et al., 2022) is a short-form QA task requiring multi-hop reasoning. We consider this a semi-symbolic dataset as the questions have an explicit multi-hop structure. Because answer spans in MuSiQue can be paraphrased in many different ways, we use GPT-4o to judge if two answer spans are equivalent. Despite being semi-symbolic, we see no overall improvement from CoT.", + "bbox": [ + 169, + 138, + 826, + 238 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Second, BiGGen Bench (Kim et al., 2024) uses free-form responses as the answer to a question, and an LLM-as-a-judge is used to evaluate these responses on a scale of 1 to 5. Because free-form responses blur the lines between CoT and direct answering, we create a new prompt that asks the language model to plan the free response before giving it. We then only pass the free response to the judge (GPT-4o-mini in our case) with the prompt from Kim et al. (2024). We also filter out any questions that explicitly state \"Think step-by-step\". We plot the performance of BiGGen Bench as the number of times a response receives a score of 4 or better. Despite including many reasoning questions (including several categories of math) and other categories, such as planning, we only see a mild improvement here. Because previous experiments show CoT helping on similar types of questions in the QA format, the lack of similar improvements here could imply that pre-planning is insufficient for unlocking reasoning capabilities in the LLM. Future work is needed to prove this.", + "bbox": [ + 169, + 243, + 826, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Are the gains in Knowledge, Soft Reasoning, and Commonsense significant? Mostly no, except for MMLU, StrategyQA, and MuSR.", + "bbox": [ + 169, + 411, + 823, + 440 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We tested the significance of the improvements from CoT on the 13 datasets in the Knowledge, Soft Reasoning, and Commonsense reasoning categories using paired bootstrapping to assess whether CoT gives a significant improvement. To account for multiple comparisons, we applied a Bonferroni correction, setting the p-value to 0.00027 to account for the 14 models and 13 datasets. About $32\\%$ (59) of the datasets that show a benefit in these three reasoning categories were considered significant. Nearly half of these comparisons (26) are on MMLU and MMLU Pro. On these datasets, we find that CoT is mainly helping on math-related questions. StrategyQA and MuSR also received a consistent performance boost across 10 and 6 models respectively. StrategyQA is often used to benchmark reasoning methods and is built specifically to get a benefit from methods that decompose the question into steps, so a gain in performance is not unprecedented. MuSR, similarly, was built to have multiple steps of complex natural language reasoning, which may receive benefits from CoT. The remaining datasets that receive significant benefits are spread across the datasets and models.", + "bbox": [ + 169, + 446, + 826, + 613 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Why do MMLU and MMLU Pro get a boost? MMLU and MMLU Pro contain many different questions requiring different types of reasoning. We separated MMLU and MMLU Pro questions into two bins, those related to math and those not related to math, by checking if the questions text or generated response from the LLM includes an “=”. Figure 4 shows that a majority of the performance gain seen from MMLU and MMLU Pro is from the math slices of each dataset. See more details in Appendix G.", + "bbox": [ + 169, + 627, + 823, + 712 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 STRENGTHS AND WEAKNESSES OF COT AT FORMAL REASONING", + "text_level": 1, + "bbox": [ + 171, + 731, + 743, + 747 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Previous sections establish that CoT primarily helps with symbolic reasoning tasks, but not why. Many symbolic and semi-symbolic tasks be broken down into two stages (Ye et al., 2023; Pan et al., 2023; Jiang et al., 2024): planning, either via a formal or informal specification via prompting (Sun et al., 2024; Wang et al., 2023b), and execution, using the same LM or external solvers. In this section, we attribute the performance gains from CoT on symbolic tasks to these two stages.", + "bbox": [ + 169, + 762, + 823, + 833 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Given a question that requires symbolic reasoning, we define the planning stage as extracting all variables from the context into a formal specification and defining their relations. The execution stage uses a solver that takes as input a plan and can be run in an orderly fashion to derive the final answer. Using our notation from Section 2, let $f(\\mathbf{q}) = \\mathcal{I}_{\\mathrm{planning}}^{m}(\\mathbf{q})$ be a mapping of the question $\\mathbf{q}$ to a symbolic plan $S_{\\mathrm{plan}}$ that can be executed by the language model or by an external symbolic solver, $\\hat{a} = \\mathrm{solve}(S_{\\mathrm{plan}})$ , where $\\hat{a}$ is the final answer for $\\mathbf{q}$ .", + "bbox": [ + 169, + 838, + 826, + 926 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3a5204f7a54b1eee1699edc2a7d6768cf0cb8700b42d38a92db5a2dc23c86aea.jpg", + "image_caption": [ + "Improvement of CoT over direct on = vs. no =" + ], + "image_footnote": [], + "bbox": [ + 179, + 113, + 810, + 265 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Q: Courtney said that there were 48 people, but Kelly said that Courtney had overstated the number by $20\\%$ . If Kelly was right, how many people were there?", + "bbox": [ + 179, + 361, + 818, + 388 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a174b5c5f50875f9f7bc69fcc6118cec554fc9fed20fb542bddadf8601a330f4.jpg", + "image_caption": [ + "Figure 4: CoT deltas between MMLU and MMLU Pro performance when a question or generated response contains an “=” (With =) or not (Without =). We filter out any questions that do not result in a final answer (degeneration, etc.). CoT primarily helps on the pairs of questions and generations that contain an “=”, which indicates math-related questions.", + "Figure 5: Prompt variants that separate planning and execution for GSM8K. For all prompt variants besides direct answer and CoT (not shown), we few-shot prompt an LLM to first generate a Python program as a solution plan. For Plan + Direct Solver, the LLM is prompted to directly give an answer from the plan; for Plan + CoT Solver, the LLM is prompted to solve the plan step-by-step with CoT and give an answer; for Plan + Tool Solver, we feed the plan into a Python interpreter." + ], + "image_footnote": [], + "bbox": [ + 181, + 388, + 816, + 554 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "By separating planning and execution in this way, we can test how much a language model can gain from only having a plan, to having a plan and solving it with CoT, or to having a plan and then solving it with an external symbolic solver. Given a plan $S_{\\mathrm{plan}} \\sim \\mathcal{I}_{\\mathrm{planning}}^{m}(\\mathbf{q})$ , we compare the performance of the settings below to evaluate at which stage LM is most effective and falls short.", + "bbox": [ + 169, + 669, + 823, + 727 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 SETTINGS EVALUATED", + "text_level": 1, + "bbox": [ + 171, + 748, + 374, + 762 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Settings 1 and 2: Few-shot direct answer and CoT: We use the few-shot direct answer and CoT prompts from Section 4.1 as baselines. Figure 5 includes an example of each setting on GSM8K.", + "bbox": [ + 169, + 775, + 823, + 805 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Settings 3 and 4: Plan + Direct Solver and Plan + CoT Solver: Here we use inspiration from Xu et al. (2024a) and generate a symbolic plan using the same strategy as Ye et al. (2023). Specifically, we use a few-shot prompt $\\mathcal{I}_{\\mathrm{planning}}^m$ to generate a formal specification $S_{\\mathrm{plan}}$ that should be executable by a symbolic solver. In the same prompt LMs are asked to solve their generated specification $S_{\\mathrm{plan}}$ and derive the final answer $\\tilde{\\mathbf{y}} \\sim p(\\mathbf{y} \\mid \\mathcal{I}_{\\mathrm{da}}(S_{\\mathrm{plan}}))$ , either directly giving the answer after generating the specification ( $Plan + Direct Solver$ ) or providing step-by-step explanations and tracking of intermediate steps for the derivation ( $Plan + CoT Solver$ ). Particularly, $S_{\\mathrm{plan}}$ is a Python program for math datasets, and is a set of first-order logic specifications for logical reasoning datasets.", + "bbox": [ + 169, + 810, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e6aa2aaa042764a5eb4e3bba9942d2c9afc160099caf5058485deba84fcaa205.jpg", + "image_caption": [ + "Figure 6: Performance of prompt variants that separate planning and execution for math and logical reasoning datasets. Despite outperforming direct answer for solving a formal plan and deriving the final answer, CoT is still limited in performing symbolic computations: there is a large performance boost from Plan + Tool Solver over CoT and Plan + CoT Solver on average across all models." + ], + "image_footnote": [], + "bbox": [ + 174, + 73, + 823, + 335 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Setting 5: Plan + Tool Solver: We then evaluate how effective CoT can be at performing symbolic computations compared with external symbolic solvers. Following prior work on augmenting LMs with tools for math and logic questions (Ye et al., 2023; Pan et al., 2023; Gao et al., 2023; Chen et al., 2023), we generate $S_{\\mathrm{plan}}$ the same way as in CoT Solver, but now feed in the plan into a symbolic solver (Python interpreter or a SMT Solver), such that $\\hat{a} = \\operatorname{solve}(S_{\\mathrm{plan}})$ .", + "bbox": [ + 169, + 424, + 823, + 494 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation Setup: We compare the performance of each setting on math (GSM8K) and logical reasoning (ContextHub and FOLIO) datasets. We follow Gao et al. (2023) to include GSM8K-Hard, a minimally modified version that replaces numbers of GSM8K with larger numbers, to account for the possibility of recent LLMs overfitting GSM8K by data contamination (Zhang et al., 2024).", + "bbox": [ + 169, + 501, + 823, + 558 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For Plan + Direct solver and Plan + CoT solver, we use the few-shot prompts from Ye et al. (2023). For Plan + Tool solver, we use state-of-the-art tool-augmented prompting methods. Particularly, for GSM8K, we use Program-aided Language Model (Gao et al., 2023, PAL) that executes the LM-generated plan with a Python interpreter. For logical reasoning datasets, we use Satisfiability-Aided Language Model (Ye et al., 2023, SatLM) that uses automated theorem prover Z3 (De Moura & Bjørner, 2008) to solve the generated specifications. If the generated plan cannot be parsed by the tool, we use random guessing when the question is multiple choice, and mark it incorrect otherwise.", + "bbox": [ + 169, + 564, + 826, + 662 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.2 EVALUATION RESULTS", + "text_level": 1, + "bbox": [ + 171, + 680, + 374, + 694 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 6 shows the results across a representative selection of models. Detailed numerical results, including the unparseable rates of model-generated plans, can be found in Appendix H.", + "bbox": [ + 169, + 705, + 823, + 736 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "When comparing direct answer with Plan + Direct solver and Plan + CoT solver, we note that for many datasets and models, only having a plan does not account for most of the performance gain. Compared with direct answer, CoT or Plan + CoT solver is needed for strong performance. Tracking the execution with one of these methods gives the strongest accuracy benefit, especially for math-heavy datasets.", + "bbox": [ + 169, + 741, + 823, + 811 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Despite their strength over direct answer and Plan + Direct solver, CoT and Plan + CoT solver are dominated by Plan + Tool solver in most settings. LLMs are limited by their ability to execute and track steps compared with symbolic solvers.", + "bbox": [ + 169, + 818, + 823, + 861 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We argue that these results provide an explanation of why CoT helps on symbolic tasks. While all tasks could feasibly benefit from a detailed description of how to solve each individual question (e.g., a plan in the context of this section), CoT only outperforms direct answer when these steps require a substantial amount of tracing and computation. In these settings, we can see clear performance", + "bbox": [ + 169, + 867, + 825, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "benefit from using symbolic solvers; CoT appears to be a poor (but universal) approximation to such solvers. When possible, LLMs should be paired with symbolic solvers at inference time when solving symbolic tasks to achieve consistently better performance over direct answer and CoT.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6 DISCUSSION AND RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 165, + 496, + 181 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Where is CoT helping and why? Our results showing CoT improvement for math and logic aligns well with early work on CoT for LLMs such as Scratchpads (Nye et al., 2022). As CoT gained popularity, its application has broadened to tasks that canonically do not require multiple steps. It can often yield small improvements over direct answering. We believe this led to the current prevailing sentiment that deliberation should improve performance on any task requiring some type of reasoning (our original claim from Section 2). However, our results show a clear separation between performance on non-symbolic and symbolic tasks. If, in theory, any question could benefit from deliberation, why is CoT only benefiting the questions that can be solved through symbolic manipulation? Our results from Section 5 suggest that the primary benefit of CoT comes in the ability to execute symbolic steps and track their output. Not all tasks have this feature: for example, questions from CommonsenseQA can hardly be translated into formally grounded and executable solution plans. Datasets like StrategyQA may feature multiple steps of reasoning, but executing those steps is not complex, so the benefits of CoT are small. It is unclear whether explicitly instilling models with particular modes of deliberation, like process of elimination for multiple choice questions, might make them more effective for non-symbolic tasks, or whether there's a fundamental limitation imposed by their pre-training data. We leave this distinction for future work.", + "bbox": [ + 169, + 196, + 826, + 420 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Can we improve CoT further? Our work treats chain-of-thought variants that explicitly don't involve multiple inferences. There is evidence that using additional calls to LLMs can help (Du et al., 2023; Yao et al., 2023; Besta et al., 2023; Chen et al., 2024), but these methods use significantly increased computation, and careful benchmarking sometimes reveals that naive techniques are as good as iterative ones (Olausson et al., 2024). However, past theoretical results show that Transformers are augmented in a fundamental way by CoT (Liu et al., 2023b; Merrill & Sabharwal, 2024); we believe this indicates the potential for improving CoT beyond prompt-based CoT. On the other hand, recent methods showing benefit from \"internalizing\" CoT (Deng et al., 2024) may indicate that explicit generation of intermediate tokens is not used to its full potential.", + "bbox": [ + 169, + 431, + 826, + 559 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Limitations One set of tasks we do not cover in our experiments (except for BiGGen Bench) is long-horizon planning. However, many works in the literature have already discussed the efficacy of planning with CoT. We also do not address the data contamination of some of these models on the datasets. We try to mitigate this by including multiple models, datasets (new and old), and our meta-analysis. For more discussion of planning and dataset contamination, see Appendix I.", + "bbox": [ + 169, + 571, + 823, + 643 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 661, + 320, + 678 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this work, we characterize the performance of prompt-based CoT through a meta-analysis of the literature and experiments across different models, datasets, and prompts. We find that CoT predominantly helps on math and formal logic, largely due to its ability to trace the intermediate steps of a problem. But CoT rarely outperforms tool-augmented approaches for these same problems. We believe that CoT remains a powerful technique, but to give improvement across a wider range of NLP tasks, research should move beyond prompt-based CoT to new paradigms like search, interacting agents, or better fine-tuned models.", + "bbox": [ + 169, + 691, + 823, + 791 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REPRODUCIBILITY", + "text_level": 1, + "bbox": [ + 171, + 809, + 333, + 825 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "For our experiments, we provide in-depth details of how we evaluated models on each dataset in Section 4.1 and Appendix C. Furthermore, we release all prompts for every dataset on Huggingface, including per model output and sampling parameters. For our meta-analysis of the literature, we describe our filtering criteria and process of annotating experiments into high-level categories in Section 3 and Appendix B. We also release the full list of papers in our meta-analysis together with extracted experimental comparisons and task category annotations.", + "bbox": [ + 169, + 839, + 823, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 103, + 356, + 118 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We acknowledge George Tsoukalas for providing insightful feedback throughout the project. We also thank Kaj Bostrom and Eunsol Choi for reviewing and providing feedback on drafts of the work. This work was partially supported by NSF CAREER Award IIS-2145280 (to Durrett), NSF CAREER Award 2339729 (to Mahowald), the NSF AI Institute for Foundations of Machine Learning (IFML), the Sloan Foundation via a Sloan Research Fellowship, and a grant from Open Philanthropy.", + "bbox": [ + 169, + 133, + 826, + 218 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 237, + 287, + 252 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Hassan Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Singh Behl, Alon Benhaim, Misha Bilenko, Johan Bjorck, Sébastien Bubeck, Martin Cai, Caio C'esar Teodoro Mendes, Weizhu Chen, Vishrav Chaudhary, Parul Chopra, Allison Del Giorno, Gustavo de Rosa, Matthew Dixon, Ronen Eldan, Dan Iter, Abhishek Goswami, Suriya Gunasekar, Emman Haider, Junheng Hao, Russell J. Hewett, Jamie Huynh, Mojan Javaheripi, Xin Jin, Piero Kauffmann, Nikos Karampatziakis, Dongwoo Kim, Mahmoud Khademi, Lev Kurilenko, James R. Lee, Yin Tat Lee, Yuanzhi Li, Chen Liang, Weishung Liu, Eric Lin, Zeqi Lin, Piyush Madan, Arindam Mitra, Hardik Modi, Anh Nguyen, Brandon Norick, Barun Patra, Daniel Perez-Becker, Thomas Portet, Reid Pryzant, Heyang Qin, Marko Radmilac, Corby Rosset, Sambudha Roy, Olli Saarikivi, Amin Saied, Adil Salim, Michael Santacroce, Shital Shah, Ning Shang, Hiteshi Sharma, Xianmin Song, Olatunjri Ruwase, Xin Wang, Rachel Ward, Guanhua Wang, Philipp Witte, Michael Wyatt, Can Xu, Jiahang Xu, Sonali Yadav, Fan Yang, Ziyi Yang, Donghan Yu, Cheng-Yuan Zhang, Cyril Zhang, Jianwen Zhang, Li Lyna Zhang, Yi Zhang, Yunan Zhang, and Xiren Zhou. Phi-3 technical report: A highly capable language model locally on your phone. ArXiv, abs/2404.14219, 2024. URL https://apisemantic scholar.org/CorpusID:269293048.", + "Anthropic. The Claude 3 Model Family: Opus, Sonnet, Haiku. a. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf.", + "Anthropic. Claude 3.5 Sonnet Model Card Addendum. b. URL https://www-cdn.anthropic.com/fed9cc193a14b84131812372d8d5857f8f304c52/Model_Card_Claude_3_Addendum.pdf.", + "Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefer. Graph of thoughts: Solving elaborate problems with large language models. In AAAI Conference on Artificial Intelligence, 2023. URL https://api-semanticscholar.org/CorpusID:261030303.", + "Yonatan Bisk, Rowan Zellers, Ronan Le Bras, Jianfeng Gao, and Yejin Choi. Piqa: Reasoning about physical commonsense in natural language. In AAAI Conference on Artificial Intelligence, 2019. URL https://api-semanticscholar.org/CorpusID:208290939.", + "Leonard Bongard, Lena Held, and Ivan Habernal. The legal argument reasoning task in civil procedure. In Nikolaos Aletras, Ilias Chalkidis, Leslie Barrett, Catalina Goantă, and Daniel Preotić-Pietro (eds.), Proceedings of the Natural Legal Language Processing Workshop 2022, pp. 194-207, Abu Dhabi, United Arab Emirates (Hybrid), December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.nllp-1.17. URL https://aclanthology.org/2022.nllp-1.17.", + "Chen Bowen, Rune Sætre, and Yusuke Miyao. A comprehensive evaluation of inductive reasoning capabilities and problem solving in large language models. In Yvette Graham and Matthew Purver (eds.), Findings of the Association for Computational Linguistics: EACL 2024, pp. 323-339, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024-findings-eacl.22.", + "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh," + ], + "bbox": [ + 173, + 260, + 823, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bcbd4967418bfb8ac142f64a-Paper.pdf.", + "Hyungjoo Chae, Yeonghyeon Kim, Seungone Kim, Kai Tzu-iunn Ong, Beong-woo Kwak, Moohyeon Kim, Seonghwan Kim, Taeyoon Kwon, Jiwan Chung, Youngjae Yu, et al. Language Models as Compilers: Simulating Pseudocode Execution Improves Algorithmic Reasoning in Language Models. arXiv preprint arXiv:2404.02575, 2024.", + "Chih Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse LLMs, 2024. URL https://openreview.net/forum?id=Yo16nUVIJD.", + "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd.", + "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge. arXiv:1803.05457v1, 2018.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. ArXiv, abs/2110.14168, 2021. URL https://api(semanticscholar.org/CorpusID:239998651.", + "Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The CommitmentBank: Investigating projection in naturally occurring discourse. In Proceedings of Sinn und Bedeutung 23, 2019.", + "Leonardo De Moura and Nikolaj Björner. Z3: An efficient SMT solver. In Proceedings of the Theory and Practice of Software, 14th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, TACAS'08/ETAPS'08, pp. 337-340, Berlin, Heidelberg, 2008. Springer-Verlag. ISBN 3540787992.", + "Yuntian Deng, Yejin Choi, and Stuart Shieber. From Explicit CoT to Implicit CoT: Learning to Internalize CoT Step by Step. arXiv preprint arXiv:2405.14838, 2024.", + "Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. arXiv preprint arXiv:2305.14325, 2023.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The Llama 3 Herd of Models. arXiv preprint arXiv:2407.21783, 2024.", + "Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. AlpacaFarm: A Simulation Framework for Methods that Learn from Human Feedback, 2023.", + "Nouha Dziri, Ximing Lu, Melanie Sclar, Xiang Lorraine Li, Liwei Jiang, Bill Yuchen Lin, Sean Welleck, Peter West, Chandra Bhagavatula, Ronan Le Bras, Jena D. Hwang, Soumya Sanyal, Xiang Ren, Allyson Ettinger, Zaid Harchaoui, and Yejin Choi. Faith and fate: Limits of transformers on compositionality. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Fkckkr3ya8.", + "Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=yf1icZHC-19." + ], + "bbox": [ + 171, + 103, + 823, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. Pal: program-aided language models. In Proceedings of the 40th International Conference on Machine Learning, ICML'23. JMLR.org, 2023.", + "Mor Geva, Daniel Khashabi, Elad Segal, Tushar Khot, Dan Roth, and Jonathan Berant. Did Aristotle use a laptop? A question answering benchmark with implicit reasoning strategies. Transactions of the Association for Computational Linguistics, 9:346-361, February 2021. ISSN 2307-387X. doi: 10.1162/tacl_a_00370.", + "L. Guan, Yifan Zhou, Denis Liu, Yantian Zha, Heni Ben Amor, and Subbarao Kambhampati. \"Task Success\" is not Enough: Investigating the Use of Video-Language Models as Behavior Critics for Catching Undesirable Agent Behaviors. ArXiv, abs/2402.04210, 2024. URL https://api.sementicscholar.org/CorpusID:267500077.", + "Atharva Gundawar, Mudit Verma, L. Guan, Karthik Valmeekam, Siddhant Bhambri, and Subbarao Kambhampati. Robust Planning with LLM-Modulo Framework: Case Study in Travel Planning. ArXiv, abs/2405.20625, 2024. URL https://api(semanticscholar.org/CorpusID:270199944.", + "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Luke Benson, Lucy Sun, Ekaterina Zubova, Yujie Qiao, Matthew Burtell, David Peng, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Shafiq Joty, Alexander R. Fabbri, Wojciech Kryscinski, Xi Victoria Lin, Caiming Xiong, and Dragomir Radev. FOLIO: Natural Language Reasoning with First-Order Logic. arXiv preprint arXiv:2209.00840, 2022. URL https://arxiv.org/abs/2209.00840.", + "Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507.", + "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. Proceedings of the International Conference on Learning Representations (ICLR), 2021a.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring Mathematical Problem Solving With the MATH Dataset. NeurIPS, 2021b.", + "Hanxu Hu, Hongyuan Lu, Huajian Zhang, Wai Lam, and Yue Zhang. Chain-of-symbol prompting elicits planning in large language models, 2023.", + "Wenyue Hua, Kaijie Zhu, Lingyao Li, Lizhou Fan, Shuhang Lin, Mingyu Jin, Haochen Xue, Zelong Li, Jindong Wang, and Yongfeng Zhang. Disentangling Logic: The Role of Context in Large Language Model Reasoning Capabilities. ArXiv, abs/2406.02787, 2024. URL https://apisemantic scholar.org/CorpusID:270258104.", + "Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. arXiv preprint arXiv:2201.07207, 2022.", + "Jinghan Jia, Abi Komma, Timothy Leffel, Xujun Peng, Ajay Nagesh, Tamer Soliman, Aram Galstyan, and Anoop Kumar. Leveraging LLMs for dialogue quality measurement. In Yi Yang, Aida Davani, Avi Sil, and Anoop Kumar (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 6: Industry Track), pp. 359-367, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-industry.30. URL https://aclanthology.org/2024.nacl-industry.30." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Albert Qiaochu Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de Las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, L'elio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. ArXiv, abs/2310.06825, 2023. URL https://api-semanticscholar.org/CorpusID:263830494.", + "Dongwei Jiang, Marcio Fonseca, and Shay B. Cohen. Leanreasoner: Boosting complex logical reasoning with lean. In Kevin Duh, Helena Gómez-Adorno, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 7497-7510. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.NAACL-LONG.416. URL https://doi.org/10.18653/v1/2024.naacl-long.416.", + "Brihi Joshi, Ziyi Liu, Sahana Ramnath, Aaron Chan, Zhewei Tong, Shaoliang Nie, Qifan Wang, Yejin Choi, and Xiang Ren. Are Machine Rationales (Not) Useful to Humans? Measuring and Improving Human Utility of Free-text Rationales. ArXiv, abs/2305.07095, 2023. URL https://api_semanticscholar.org/CorpusID:258676376.", + "Subbarao Kambhampati. Can large language models reason and plan? Annals of the New York Academy of Sciences, 1534:15 - 18, 2024. URL https://api-semanticscholar.org/CorpusID:268249961.", + "Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp (eds.), Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 22895-22907. PMLR, 21-27 Jul 2024a. URL https://proceedings.mlr.org/press/v235/kambhampati24a.html.", + "Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In *Forty-first International Conference on Machine Learning*, 2024b. URL https://openreview.net/forum?id=Th8JPEmH4z.", + "Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in llms. arXiv preprint arXiv:2404.11041, 2024.", + "Marzena Karpinska, Katherine Thai, Kyle Lo, Tanya Goyal, and Mohit Iyyer. One thousand and one pairs: A \"novel\" challenge for long-context language models. ArXiv, abs/2406.16264, 2024. URL https://api-semanticscholar.org/CorpusID:270703648.", + "Tushar Khot, H. Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The International Conference on Learning Representations, volume abs/2210.02406, 2023. URL https://api_semanticscholar.org/CorpusID:252715485.", + "Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The BiGGen Bench: A Principled Benchmark for Fine-grained Evaluation of Language Models with Language Models. arXiv preprint arXiv:2406.05761, 2024.", + "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. In Proceedings of the 36th International Conference on Neural Information Processing Systems, Red Hook, NY, USA, 2022. Curran Associates Inc. ISBN 9781713871088.", + "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Brenden M. Lake and Marco Baroni. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 2879-2888. PMLR, 2018. URL http://proceedings.mlr.press/v80/lake18a.html.", + "Tamera Lanham, Anna Chen, Ansh Radhakrishnan, Benoit Steiner, Carson Denison, Danny Hernandez, Dustin Li, Esin Durmus, Evan Hubinger, Jackson Kernion, et al. Measuring faithfulness in chain-of-thought reasoning. arXiv preprint arXiv:2307.13702, 2023.", + "Fangyu Lei, Qian Liu, Yiming Huang, Shizhu He, Jun Zhao, and Kang Liu. S3Eval: A synthetic, scalable, systematic evaluation suite for large language model. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1259-1286, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.69. URL https://aclanthology.org/2024.naacl-long.69.", + "Tianwen Li, Zhexiong Liu, Lindsay Matsumura, Elaine Wang, Diane Litman, and Richard Correnti. Using large language models to assess young students' writing revisions. In Ekaterina Kochmar, Marie Bexte, Jill Burstein, Andrea Horbach, Ronja Laermann-Quante, Anaïs Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 365–380, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.30.", + "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=v8L0pN6E0i.", + "B. Liu, Yuqian Jiang, Xiaohan Zhang, Qian Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. Llm+p: Empowering large language models with optimal planning proficiency. ArXiv, abs/2304.11477, 2023a. URL https://api-semanticscholar.org/CorpusID:258298051.", + "Bingbin Liu, Jordan T. Ash, Surbhi Goel, Akshay Krishnamurthy, and Cyril Zhang. Transformers learn shortcuts to automata. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=De4FYqjFueZ.", + "Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bitstream/11332b6b6cf4485b84afadb1352d3a9a-AbsAbstract-Conference.html.", + "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At Which Training Stage Does Code Data Help LLMs Reasoning? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=KIPJKST4gw.", + "Aman Madaan and Amir Yazdanbakhsh. Text and patterns: For effective chain of thought, it takes two to tango. ArXiv, abs/2209.07686, 2022. URL https://api-semanticscholar.org/ CorpusID:252355328.", + "William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The International Conference on Learning Representations*, volume abs/2310.07923, 2024. URL https://api-semanticscholar.org/CorpusID:263909434." + ], + "bbox": [ + 171, + 103, + 825, + 924 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2022. URL https://openreview.net/forum?id=iedYJm92o0a.", + "Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=y0GJXRungR.", + "OpenAI. GPT-4 Technical Report. ArXiv, abs/2303.08774, 2023. URL https://apisemantic scholar.org/CorpusID:257532815.", + "Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023.findings-emnlp.248.", + "Xiangyu Peng, Siyan Li, Sarah Wiegrefe, and Mark Riedl. Inferring the reader: Guiding automated story generation with commonsense reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 7008-7029, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.520. URL https://aclanthology.org/2022-findings-emnlp.520.", + "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual Reasoning Makes Smaller LLMs Stronger Problem-Solvers. arXiv preprint arXiv:2408.06195, 2024.", + "Xin Quan, Marco Valentino, Louise Dennis, and Andre Freitas. Enhancing ethical explanations of large language models through iterative symbolic refinement. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1-22, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.1.", + "Rachel Reid and et. al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. ArXiv, abs/2403.05530, 2024. URL https://api(semanticscholar.org/ CorpusID:268297180.", + "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A Graduate-Level Google-Proof Q&A Benchmark, 2023.", + "Gemma Team Morgane Riviere and et. al. Gemma 2: Improving open language models at a practical size. 2024. URL https://api_semanticscholar.org/CorpusID:270843326.", + "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. WinoGrande: an adversarial winograd schema challenge at scale. Commun. ACM, 64(9):99-106, aug 2021. ISSN 0001-0782. doi: 10.1145/3474381. URL https://doi.org/10.1145/3474381.", + "Maarten Sap, Hannah Rashkin, Derek Chen, Ronan Le Bras, and Yejin Choi. Social IQa: Commonsense reasoning about social interactions. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 4463-4473, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1454. URL https://aclanthology.org/D19-1454.", + "Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=jenyYQzue1.", + "Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615, 2022.", + "Maja Stahl, Leon Biermann, Andreas Nehring, and Henning Wachsmuth. Exploring LLM prompting strategies for joint essay scoring and feedback generation. In Ekaterina Kochmar, Marie Bexe, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Anais Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 283–298, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.23.", + "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. ArXiv, abs/2402.08115, 2024a. URL https://api_semanticscholar.org/CorpusID:267637077.", + "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. 2024b. URL https://api-semanticscholar.org/CorpusID: 269626390.", + "Simeng Sun, Yang Liu, Shuohang Wang, Dan Iter, Chenguang Zhu, and Mohit Iyyer. PEARL: Prompting large language models to plan and execute actions over long documents. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 469-486, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.29.", + "Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824.", + "Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. *CommonsenseQA: A question answering challenge targeting commonsense knowledge.* In *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies*, Volume 1 (Long and Short Papers), pp. 4149-4158, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1421. URL https://aclanthology.org/N19-1421.", + "Hugo Touvron, Louis Martin, Kevin R. Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Daniel M. Bikel, Lukas Blecher, Cristian Cantón Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony S. Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel M. Kloumann, A. V. Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, R. Subramanian, Xia Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zhengxu Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. ArXiv, abs/2307.09288, 2023. URL https://api(semanticscholar.org/CorpusID: 259950998." + ], + "bbox": [ + 171, + 102, + 825, + 922 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. MuSiQue: Multi-hop questions via single-hop question composition. Transactions of the Association for Computational Linguistics, 2022.", + "Karthik Valmeekam, Matthew Marquez, Sarath Sreedharan, and Subbarao Kambhampati. On the planning abilities of large language models - a critical investigation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=X6dEqXIsEW.", + "Karthik Valmeekam, Matthew Marquez, Alberto Olmo, Sarath Sreedharan, and Subbarao Kambhampati. PlanBench: An extensible benchmark for evaluating large language models on planning and reasoning about change. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024. Curran Associates Inc.", + "Mudit Verma, Siddhant Bhambri, and Subbarao Kambhampati. Theory of mind abilities of large language models in human-robot interaction: An illusion? Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction, 2024. URL https://apisemantic scholar.org/CorpusID:266902529.", + "Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153.", + "Lei Wang, Wanyu Xu, Yihuai Lan, Zhiqiang Hu, Yunshi Lan, Roy Ka-Wei Lee, and Ee-Peng Lim. Plan-and-solve prompting: Improving zero-shot chain-of-thought reasoning by large language models. In Annual Meeting of the Association for Computational Linguistics, 2023b. URL https://apisemantic scholar.org/CorpusID:258558102.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023c. URL https://openreview.net/forum?id=1PL1NIMMrw.", + "Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark, 2024.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837, 2022.", + "Li Siang Wong, Gabriel Grand, Alexander K. Lew, Noah D. Goodman, Vikash K. Mansinghka, Jacob Andreas, and Joshua B. Tenenbaum. From word models to world models: Translating from natural language to the probabilistic language of thought. ArXiv, abs/2306.12672, 2023. URL https://api_semanticscholar.org/CorpusID:259224900.", + "Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. TravelPlanner: A Benchmark for Real-World Planning with Language Agents. ArXiv, abs/2402.01622, 2024. URL https://api_semanticscholar.org/CorpusID:267406800.", + "Miao Xiong, Zhiyuan Hu, Xinyang Lu, Yifei Li, Jie Fu, Junxian He, and Bryan Hooi. Can LLMs Express Their Uncertainty? An Empirical Evaluation of Confidence Elicitation in LLMs. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=gjeQKFxFpZ.", + "Jundong Xu, Hao Fei, Liangming Pan, Qian Liu, Mong-Li Lee, and Wynne Hsu. Faithful logical reasoning via symbolic chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13326-13365, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.acl-long.720." + ], + "bbox": [ + 171, + 102, + 825, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xiaohan Xu, Chongyang Tao, Tao Shen, Can Xu, Hongbo Xu, Guodong Long, and Jian-Guang Lou. Re-reading improves reasoning in language models, 2024b. URL https://openreview.net/forum?id=3jXCF5dNpC.", + "Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=Bb4VGOWELI.", + "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of Thoughts: Deliberate problem solving with large language models, 2023.", + "Xi Ye, Qiaochu Chen, Isil Dillig, and Greg Durrett. Satisfiability-aided language models using declarative prompting. In Advances in Neural Information Processing Systems, 2023.", + "Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. ArXiv, abs/2405.00332, 2024. URL https://api-semanticscholar.org/CorpusID:269484687.", + "Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, Heng-Tze Cheng, Ed H. Chi, Quoc V Le, and Denny Zhou. Take a step back: Evoking reasoning via abstraction in large language models. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3bq3jsvcQ1.", + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging LLM-as-a-judge with MT-bench and Chatbot Arena. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024b. Curran Associates Inc.", + "Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models, 2023.", + "Ben Zhou, Kyle Richardson, Xiaodong Yu, and Dan Roth. Learning to decompose: Hypothetical question decomposition based on comparable texts. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2223-2235, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.142. URL https://aclanthology.org/2022.emnlp-main.142.", + "Ben Zhou, Hongming Zhang, Sihao Chen, Dian Yu, Hongwei Wang, Baolin Peng, Dan Roth, and Dong Yu. Conceptual and unbiased reasoning in language models. ArXiv, abs/2404.00205, 2024. URL https://api-semanticscholar.org/CorpusID:268820105.", + "Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgtM.", + "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large Language Models are Human-Level Prompt Engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-." + ], + "bbox": [ + 171, + 102, + 825, + 810 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A META-ANALYSIS EXPANDED DETAILS ON CRITERIA AND PROCESS", + "text_level": 1, + "bbox": [ + 171, + 102, + 767, + 118 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Automatic Selection and Paper Filtering We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). We filtered all 4,642 papers (2,259 from ICLR 2024 and 2,382 from the two ACL-affiliated conferences) for those with at least two occurrences of \"CoT\", \"chain-of-thought\", or \"chain of thought\", resulting in 516 papers. There are conceivably papers using CoT called by another name (e.g., Scratchpads), but we believe these 516 give a representative sample appropriate for systematic analysis.", + "bbox": [ + 169, + 133, + 826, + 233 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Manual Paper Filtering and Results Extraction We then filter down to papers that perform a comparison of CoT prompting vs. direct prompting, whether or not this is core to the paper's research question. We manually filtered the 516 papers in question and extracted the key results from those that remained. We excluded multimodal models, CoT-fine-tuned models, any experiments where the \"CoT\" method involves multiple forward passes (e.g., self-consistency (Wang et al., 2023c) and tree-of-thought (Yao et al., 2023)),5 and systems that augment LLMs with external tools (discussed more in Section 5).", + "bbox": [ + 169, + 250, + 826, + 347 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For each paper passing through these criteria, we manually extracted the results from key tables comparing CoT and direct answer prompts. We only include results where the CoT and direct prompts are run on the same model and same dataset while being on a scale of 0 to 100 (excluding Likert scale evaluations, for example) for a more direct comparison. When papers include various CoT or direct answer prompts (including zero/few-shot variants), we always take the best-performing prompt for both. We focus on key test results where applicable, excluding dev sets if they are reported alongside test and also excluding numbers from ablations or nonstandard subsets of datasets.", + "bbox": [ + 169, + 354, + 826, + 450 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "This resulted in a total of 1,218 experimental comparisons across 110 papers (35 from ICLR and 75 from NAACL and EACL) covering 264 datasets. Details and more information will be available in our GitHub Repo.", + "bbox": [ + 169, + 458, + 826, + 501 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Categorization Given the large number of tasks and datasets being compared, we grouped each task into a set of 14 categories. These categories were determined based on the description (and possibly examples) of the task, not taking into account system performance. These categories abstract over traditional NLP task classifications (e.g., NER, reading comprehension) and take into account both the task format and the kinds of reasoning involved. Definitions for several categories are shown in Table 1 and the full description is given in Appendix B.", + "bbox": [ + 169, + 517, + 826, + 602 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B QUANTITATIVE META-ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 625, + 485, + 640 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "See the full list of categories and their descriptions that we used for the meta-analysis in Table 2.", + "bbox": [ + 169, + 656, + 805, + 671 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C EXPANDED EXPERIMENTAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 691, + 517, + 709 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "A full list of the datasets can be found in Table 4. Each model can be seen in Table 5. We use one answer parser for all datasets of the same answer response format (one for multiple choice, short answer, etc.); however, some datasets require special handling and have edge cases that we handle separately from the rest of the datasets. Similarly, for each model, we use the exact same prompt across them, except when closed source models require different prompts because they do not allow for partial completions (i.e., when we cannot put \"let's think step by step\" to warm-start the assistant's response). All prompts are given in our Huggingface repo, including the model output and what our answer parser extracted as the answer.", + "bbox": [ + 169, + 724, + 826, + 837 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Experiments were conducted either by invoking APIs or by running open-source models on our own hardware, mostly on a machine with 8 A40s or 4 Quadro RTX 8000s. All locally hosted models were", + "bbox": [ + 169, + 843, + 825, + 872 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_footnote", + "text": "5These systems use more compute than direct answer, and there is not a clear comparison to be made here. Moreover, our anecdotal coverage of these methods shows that they are most used for math, coding, and logic settings, for which we already have high representation among reported CoT methods.", + "bbox": [ + 169, + 883, + 823, + 925 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/481a836f0c5229e7c25ba7aebb1c230dc39c76544624658815990853b3c80d27.jpg", + "table_caption": [ + "Table 2: Categories and their descriptions for the meta-analysis." + ], + "table_footnote": [], + "table_body": "
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Commonsense reasoningDatasets designed to test for commonsense knowledge and reasoning, i.e., world knowledge that most people would have, rather than specialized expert-level knowledge in a discipline acquired after years of study.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Spatial and temporal rea-soningDatasets designed to test for an understanding of space and spatial relations (e.g., navigation) or reasoning involving time and sequences over time.
Multi-hop QAQuestions involving the composition of multiple steps of reasoning in order to arrive at an answer, such as “What is the capital of the country whose scientist discovered penicillin?”
Context-aware QATasks such as closed-book QA and reading comprehension involving rea-soning about a given text in context. The context is often a short passage, but could also take the form of a knowledge graph (KBQA) or a table. This category also includes information extraction tasks, such as NER or relation extraction.
EntailmentTasks involving establishing the inferential relation between two texts, prototypically NLI, but also including fact verification.
Text classificationTasks involving the classification of a text into a small set of categories, such a topic or sentiment classification, but also involving tasks such as hate speech detection and misinformation detection.
GenerationTasks involving text generation, including machine translation, dialogue, question generation, as well as code generation. Tasks such as SQL execution (Lei et al., 2024) or systematic transformations of data (e.g., SCAN (Lake & Baroni, 2018)) are excluded because they can be solved by executing a program.
Meta-linguisticTasks probing for models' knowledge of linguistics, such as identifying the main subject of a sentence or solving linguistic puzzles.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
OtherTasks which did not fit in any of the other categories, such as evaluating AI safety, eliciting models' verbalized confidence, or melody retrieval.
", + "bbox": [ + 174, + 126, + 823, + 742 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "hosted with vLLM. All parameters given to the vLLM API endpoint are given in the Huggingface repo as well.", + "bbox": [ + 169, + 768, + 823, + 799 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "OTHER COT PROMPT VARIANTS", + "text_level": 1, + "bbox": [ + 171, + 821, + 477, + 835 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D.1 TESTING PERFORMANCE VOLATILITY ACROSS PROMPTS", + "text_level": 1, + "bbox": [ + 171, + 854, + 614, + 869 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To test the impact of prompt choice on performance, we performed our zero-shot experiment on Llama 3.1 8B with 7 different datasets and 4 different zero-shot CoT prompting strategies common in the literature (Kojima et al., 2022; Wang et al., 2023b; Zhou et al., 2023b; Yang et al., 2024).", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/4901ebf5dcf6c5e3308e4fc8cd8be2bc356075e2cbff12912f2eb88ac42bd641.jpg", + "table_caption": [ + "Table 3: Models, datasets, and prompting strategies used in our experiments. Models marked with $\\dagger$ are run with a 4k context size window. Note that Gemma has a larger than 4k context size window, but VLLM only supports up to a 4k context size window for it. Models marked with * indicate closed-source models that cannot handle prefixed assistant messages. Datasets marked with $\\triangle$ do not have a few-shot setting." + ], + "table_footnote": [], + "table_body": "
ModelsLlama 2 7B Chat† (Touvron et al., 2023), Mistral 7B Instruct v0.3 (Jiang et al., 2023), Llama 3.1 8B Instruct (Dubey et al., 2024), Llama 3.1 70B Instruct, Gemma 2 9B It† (Riviere & et. al., 2024), Phi-3 Small 8k Instruct (Abdin et al., 2024), gpt-4o-mini-2024-07-18*, gpt-4o-2024-08-06*, Gemini 1.5 Flash* (Reid & et. al., 2024), Gemini 1.5 Pro* (Reid & et. al., 2024), claude-3-haiku-20240307* (Anthropic, a), claude-3-5-sonnet-20240620* (Anthropic, b)
DatasetsCommonsenseQA (Talmor et al., 2019), StrategyQA (Geva et al., 2021), SiQA△ Sap et al. (2019), PiQA△ (Bisk et al., 2019), Winogrande△ (Sakaguchi et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024), ContextHub (Levels 1 and 2 only) (Hua et al., 2024), ARC△ (Clark et al., 2018), AGIEval LSAT (Zhong et al., 2023), MMLU (Hendrycks et al., 2021a), MMLU Pro (Wang et al., 2024), MATH (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), GSM8K-hard (Gao et al., 2023), FOLIO (Han et al., 2022), MuSiQue△ (Trivedi et al., 2022), Big-Bench Hard (Suzgun et al., 2023; Srivastava et al., 2022), BiGGen Bench (Kim et al., 2024)
Promptszero-shot direct answer, zero-shot CoT (Kojima et al., 2022), few-shot direct answer (Brown et al., 2020), few-shot CoT (Wei et al., 2022)
", + "bbox": [ + 173, + 220, + 823, + 435 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/45bf096ee3d131eb1c544ded558b8563d629a7955eedc841e204cab4acca93e4.jpg", + "table_caption": [ + "Table 4: List of datasets used in our experiments. We categorize each dataset into one of five categories based on the type of reasoning required: Commonsense, Knowledge, Soft Reasoning, Symbolic, or Mathematical. We also report answer formats. When we use few-shot prompts, we mark how many examples those prompts contain. BiGGen Bench has many categories of questions that explicitly ask for CoTs in the response; we ignore those categories for our evaluation." + ], + "table_footnote": [], + "table_body": "
DatasetTypeAnswer Formatm-Shots
CommonsenseQACommonsenseMultiple choice7
StrategyQACommonsenseTrue or False6
SIQACommonsenseMultiple choice0
PIQACommonsenseMultiple choice0
WinograndeCommonsenseMultiple choice0
Arc EasyKnowledgeMultiple choice0
Arc ChallengeKnowledgeMultiple choice0
AGIEval LSATSoft ReasoningMultiple choice3
BiGGen-BenchSoft ReasoningFree response0
MMLUKnowledgeMultiple Choice5
MMLU ProKnowledgeMultiple Choice5
BigBench-HardSymbolicMultiple Choice0
MuSRSoft ReasoningMultiple Choice1
GPQAMathematicalMultiple Choice3
MuSiQueSoft ReasoningShort Answer0
GSM8KMathematicalShort Answer8
GSM8K-HardMathematicalShort Answer8
FOLIOSymbolicTrue, False, or Unknown4
ContextHubSymbolicTrue, False, or Neither3
MATHMathematicalShort Answer4
", + "bbox": [ + 261, + 602, + 736, + 883 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/3c8cb99a7df5940afd3c610f4dd360299a0612a254a7e621c3907526efa1c63d.jpg", + "table_caption": [ + "Table 5: List of models for our experiments. We focus on contemporary instruction-tuned models; although pretrained and smaller language models could be used, they are not the focus of our study. Prompts and outputs used for each model are available on Huggingface. * Note that Gemma can accept more than 4k input tokens, but we are restricted to 4k by vLLM." + ], + "table_footnote": [], + "table_body": "
ModelContext LengthIs Open Source
Llama 2 7B Chat4kTrue
Mistral 7B Instruct v0.38kTrue
Llama 3.1 8B Instruct128kTrue
Llama 3.1 70B Instruct128kTrue
Gemma 2 9B It4k*True
Qwen 7B Instruct131kTrue
Qwen 72B Instruct131kTrue
GPT4o-Mini128kFalse
GPT4o128kFalse
Gemini 1.5 Pro128kFalse
Gemini Flash1mFalse
Claude 3.5 Sonnet200kFalse
Claude 3 Haiku200kFalse
", + "bbox": [ + 305, + 167, + 692, + 363 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/9f95f43776fbd56cd222beea57a2a5f12075b2356a2362dff3912e55246a903a.jpg", + "image_caption": [ + "Figure 7: Performance of multiple prompts commonly used to elicit reasoning through CoT in the zero shot setting. Each prompt starts the assistant completion with a different phrase meant to elicit reasoning. All results are from using Llama 3.1 8B Instruct. For the Kojima variant, we explicitly place \"Let's think step by step.\" in the assistant message. There is very little variation between the CoT prompts on average.", + "Figure 7 shows variation due to prompts is typically small and no prompt gives a consistent gain over the other. For our experiments, this suggests that different prompts have small effects on the overall outcome on average." + ], + "image_footnote": [], + "bbox": [ + 187, + 380, + 808, + 609 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "E FEW-SHOT EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 834, + 421, + 849 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Compared to a zero-shot prompt, a few-shot prompt additionally contains demonstrations of the relevant reasoning mode on different problem instances $\\{(v(\\mathbf{q}_i),\\mathbf{y}_i^*)\\}$ . Few-shot prompts for direct answer simply encode the answer $a_{i}$ as $\\mathbf{y}_i^*$ , whereas few-shot prompts for chain-of-thought include a reasoning trace ending in the correct answer. Now we can define the $m$ -shot direct prompt as", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/9436671d072bfe9e1df42bec2c0d8f5bee79dec70bd5f3261c1d86c2c4d1641e.jpg", + "image_caption": [ + "Figure 8: Average performance improvement from using CoT across different models in the zero-shot and few-shot settings. Each bar represents how much CoT improves the accuracy for that specific setting. In general, CoT in the few-shot setting does not change the qualitative performance of CoT versus zero-shot, though it can change the magnitude for symbolic datasets." + ], + "image_footnote": [], + "bbox": [ + 178, + 104, + 816, + 372 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "$\\mathcal{I}_{\\mathrm{da}}^{m}(\\mathbf{q}) = v_{\\mathrm{da}}(\\mathbf{q}_{1})\\mathbf{a}_{1}v_{\\mathrm{da}}(\\mathbf{q}_{2})\\mathbf{a}_{2}\\dots v_{\\mathrm{da}}(\\mathbf{q}_{m})\\mathbf{a}_{m}v_{\\mathrm{da}}(\\mathbf{q})$ and the $m$ -shot cot prompt as $\\mathcal{I}_{\\mathrm{cot}}^{m}(\\mathbf{q}) = v_{\\mathrm{cot}}(\\mathbf{q}_{1})\\mathbf{y}_{1}^{*}v_{\\mathrm{cot}}(\\mathbf{q}_{2})\\mathbf{y}_{2}^{*}\\dots v_{\\mathrm{cot}}(\\mathbf{q}_{m})\\mathbf{y}_{m}^{*}v_{\\mathrm{cot}}(\\mathbf{q})$ .", + "bbox": [ + 169, + 494, + 823, + 527 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 8 shows the difference between few-shot prompting and the zero-shot setting discussed in the main text of the paper. We see that using CoT in the few-shot setting largely does not change the datasets that benefit from it. Only one dataset, MuSR Team Allocation, starts to improve with few-shot; however, we believe this to be an exception because the final step to derive the answer is complex in the prompt and clearer in the examples. The magnitude of improvement over direct answer prompting when using CoT is also similar to the zero-shot setting.", + "bbox": [ + 169, + 531, + 826, + 617 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "F EXPANDED COT VS DIRECT EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 635, + 658, + 651 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "F.1 FULL ZERO-SHOT RESULTS", + "text_level": 1, + "bbox": [ + 171, + 667, + 406, + 681 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/ac49b23c021cbc9aca989b032d58bd2ac8d3e4c4ac7556ce13ed7cddeb1c2061.jpg", + "table_caption": [ + "Table 6: Direct answer and CoT accuracies for each reasoning category across models." + ], + "table_footnote": [], + "table_body": "
ModelCommonsenseKnowledgeMathematicalSymbolicSoft
DA %CoT %DA %CoT %DA %CoT %DA %CoT %DA %CoT %
Claude-3 Haiku74.377.273.076.118.148.238.648.755.956.6
Claude-3.5 Sonnet84.385.883.888.838.759.053.267.167.675.7
GPT-4o Mini81.883.273.683.122.959.748.160.961.163.5
Gemini 1.5 Flash80.376.878.281.027.255.747.059.760.662.6
Gemini 1.5 Pro80.478.380.983.835.458.552.962.664.167.8
Gemma 2 9b75.076.174.976.918.550.546.755.858.260.5
Gpt-4o87.387.782.988.636.563.355.768.365.974.0
Meta-Llama 2 7b51.450.944.146.69.317.222.435.437.237.6
Meta-Llama 3.1 70b84.284.782.485.624.954.949.060.065.769.5
Meta-Llama 3.1 8b72.973.470.174.116.047.834.851.655.056.2
Mistral 7b58.361.862.064.510.928.941.845.048.649.7
Phi-3 Small 8k70.872.576.179.717.847.151.258.757.956.4
Qwen 2 72b82.984.978.684.623.958.548.258.764.265.1
Qwen 2 7b64.066.165.271.315.953.543.852.354.449.4
Average74.875.773.377.522.650.245.256.158.360.3
", + "bbox": [ + 176, + 127, + 820, + 348 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/fb151bdd62ef2d6da214ecee6b4a7e25253dcf4b16b1bcbfb99d38956a707e8a.jpg", + "table_caption": [ + "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Team AllocationsSoft ReasoningLlama 2 7b34.837.2
MuSR Team AllocationsSoft ReasoningMistral 7b38.846.8
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b44.048.0
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b65.266.8
MuSR Team AllocationsSoft ReasoningGemma 2 9b47.244.8
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k47.261.6
MuSR Team AllocationsSoft ReasoningQwen 2 7b42.049.6
MuSR Team AllocationsSoft ReasoningQwen 2 72b58.066.8
MuSR Team AllocationsSoft ReasoningGPT-4o Mini61.258.4
MuSR Team AllocationsSoft ReasoningGpt-4o64.063.6
MuSR Team AllocationsSoft ReasoningClaude-3 Haiku56.859.2
MuSR Team AllocationsSoft ReasoningClaude-3.5 Sonnet80.463.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash48.855.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Pro58.462.4
SiQACommonsenseLlama 2 7b53.455.9
SiQACommonsenseMistral 7b35.933.5
SiQACommonsenseLlama 3.1 8b73.573.5
SiQACommonsenseLlama 3.1 70b78.780.9
SiQACommonsenseGemma 2 9b74.976.3
SiQACommonsensePhi-3 Small 8k38.040.4
SiQACommonsenseQwen 2 7b37.339.3
SiQACommonsenseQwen 2 72b80.580.4
SiQACommonsenseGPT-4o Mini79.080.0
SiQACommonsenseGpt-4o81.981.5
SiQACommonsenseClaude-3 Haiku75.474.8
SiQACommonsenseClaude-3.5 Sonnet79.781.0
SiQACommonsenseGemini 1.5 Flash74.579.1
SiQACommonsenseGemini 1.5 Pro73.978.2
MuSiQueSoft ReasoningLlama 2 7b40.136.1
MuSiQueSoft ReasoningMistral 7b47.347.2
MuSiQueSoft ReasoningLlama 3.1 8b62.664.7
MuSiQueSoft ReasoningLlama 3.1 70b74.072.2
MuSiQueSoft ReasoningGemma 2 9b67.768.7
MuSiQueSoft ReasoningPhi-3 Small 8k58.364.3
MuSiQueSoft ReasoningQwen 2 7b60.765.1
MuSiQueSoft ReasoningQwen 2 72b56.369.0
MuSiQueSoft ReasoningGPT-4o Mini71.368.2
MuSiQueSoft ReasoningGpt-4o73.570.1
MuSiQueSoft ReasoningClaude-3 Haiku54.856.0
MuSiQueSoft ReasoningClaude-3.5 Sonnet66.970.4
MuSiQueSoft ReasoningGemini 1.5 Flash69.866.2
MuSiQueSoft ReasoningGemini 1.5 Pro69.871.3
AGIEval LSAT RCSoft ReasoningLlama 2 7b31.236.4
AGIEval LSAT RCSoft ReasoningMistral 7b61.761.0
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b71.068.8
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.487.0
AGIEval LSAT RCSoft ReasoningGemma 2 9b75.178.1
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k68.869.9
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.066.5
AGIEval LSAT RCSoft ReasoningQwen 2 72b83.684.4
", + "bbox": [ + 220, + 395, + 777, + 915 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/10df7936f14bb883a39ce8192760a300751dc4f64ae68a01bb958b1b04e0fd93.jpg", + "table_caption": [ + "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.374.3
AGIEval LSAT RCSoft ReasoningGpt-4o88.181.4
AGIEval LSAT RCSoft ReasoningClaude-3 Haiku71.765.1
AGIEval LSAT RCSoft ReasoningClaude-3.5 Sonnet90.089.6
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash78.181.0
AGIEval LSAT RCSoft ReasoningGemini 1.5 Pro82.285.9
CommonsenseQACommonsenseLlama 2 7b49.454.6
CommonsenseQACommonsenseMistral 7b68.068.0
CommonsenseQACommonsenseLlama 3.1 8b68.574.9
CommonsenseQACommonsenseLlama 3.1 70b83.584.4
CommonsenseQACommonsenseGemma 2 9b79.280.1
CommonsenseQACommonsensePhi-3 Small 8k81.880.3
CommonsenseQACommonsenseQwen 2 7b78.579.0
CommonsenseQACommonsenseQwen 2 72b87.487.3
CommonsenseQACommonsenseGPT-4o Mini82.583.9
CommonsenseQACommonsenseGpt-4o86.587.3
CommonsenseQACommonsenseClaude-3 Haiku80.679.0
CommonsenseQACommonsenseClaude-3.5 Sonnet85.184.3
CommonsenseQACommonsenseGemini 1.5 Flash79.782.6
CommonsenseQACommonsenseGemini 1.5 Pro79.982.9
GPQAMathematicalLlama 2 7b28.324.3
GPQAMathematicalMistral 7b23.024.3
GPQAMathematicalLlama 3.1 8b24.125.9
GPQAMathematicalLlama 3.1 70b23.225.9
GPQAMathematicalGemma 2 9b26.321.2
GPQAMathematicalPhi-3 Small 8k22.320.8
GPQAMathematicalQwen 2 7b24.124.6
GPQAMathematicalQwen 2 72b21.018.1
GPQAMathematicalGPT-4o Mini21.024.0
GPQAMathematicalGpt-4o23.725.9
GPQAMathematicalClaude-3 Haiku25.422.3
GPQAMathematicalClaude-3.5 Sonnet25.425.9
GPQAMathematicalGemini 1.5 Flash22.322.8
GPQAMathematicalGemini 1.5 Pro21.023.7
AGIEval LSAT LRSoft ReasoningLlama 2 7b29.433.5
AGIEval LSAT LRSoft ReasoningMistral 7b44.147.8
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b59.053.9
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b81.481.0
AGIEval LSAT LRSoft ReasoningGemma 2 9b64.967.6
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.564.1
AGIEval LSAT LRSoft ReasoningQwen 2 7b50.658.4
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.375.1
AGIEval LSAT LRSoft ReasoningGPT-4o Mini65.368.2
AGIEval LSAT LRSoft ReasoningGpt-4o87.383.9
AGIEval LSAT LRSoft ReasoningClaude-3 Haiku55.754.7
AGIEval LSAT LRSoft ReasoningClaude-3.5 Sonnet83.782.7
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash70.071.2
AGIEval LSAT LRSoft ReasoningGemini 1.5 Pro79.480.4
PiQACommonsenseLlama 2 7b62.164.7
PiQACommonsenseMistral 7b78.677.7
PiQACommonsenseLlama 3.1 8b85.084.2
PiQACommonsenseLlama 3.1 70b91.890.6
PiQACommonsenseGemma 2 9b84.084.8
PiQACommonsensePhi-3 Small 8k89.185.5
PiQACommonsenseQwen 2 7b84.386.2
PiQACommonsenseQwen 2 72b92.989.1
PiQACommonsenseGPT-4o Mini93.188.6
PiQACommonsenseGpt-4o95.995.5
PiQACommonsenseClaude-3 Haiku85.986.6
PiQACommonsenseClaude-3.5 Sonnet94.694.5
PiQACommonsenseGemini 1.5 Flash84.689.8
PiQACommonsenseGemini 1.5 Pro88.191.3
Arc EasyKnowledgeLlama 2 7b71.169.8
Arc EasyKnowledgeMistral 7b87.586.7
Arc EasyKnowledgeLlama 3.1 8b93.092.5
Arc EasyKnowledgeLlama 3.1 70b97.597.9
Arc EasyKnowledgeGemma 2 9b94.995.8
Arc EasyKnowledgePhi-3 Small 8k96.096.3
Arc EasyKnowledgeQwen 2 7b89.584.7
Arc EasyKnowledgeQwen 2 72b97.997.4
Arc EasyKnowledgeGPT-4o Mini96.894.6
Arc EasyKnowledgeGpt-4o98.998.1
Arc EasyKnowledgeClaude-3 Haiku95.195.4
Arc EasyKnowledgeClaude-3.5 Sonnet98.698.4
Arc EasyKnowledgeGemini 1.5 Flash96.897.2
Arc EasyKnowledgeGemini 1.5 Pro97.294.6
Arc ChallengeKnowledgeLlama 2 7b49.245.2
", + "bbox": [ + 220, + 123, + 777, + 914 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/5ae34891f3c36408bcc6bfbc261276e5c966d281447cb298c6085bf1be1f94d4.jpg", + "table_caption": [ + "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
Arc ChallengeKnowledgeMistral 7b78.376.6
Arc ChallengeKnowledgeLlama 3.1 8b86.082.6
Arc ChallengeKnowledgeLlama 3.1 70b95.093.6
Arc ChallengeKnowledgeGemma 2 9b91.089.6
Arc ChallengeKnowledgePhi-3 Small 8k91.691.0
Arc ChallengeKnowledgeQwen 2 7b83.975.3
Arc ChallengeKnowledgeQwen 2 72b96.394.6
Arc ChallengeKnowledgeGPT-4o Mini93.382.6
Arc ChallengeKnowledgeGpt-4o96.095.3
Arc ChallengeKnowledgeClaude-3 Haiku89.389.3
Arc ChallengeKnowledgeClaude-3.5 Sonnet96.095.3
Arc ChallengeKnowledgeGemini 1.5 Flash92.393.6
Arc ChallengeKnowledgeGemini 1.5 Pro91.690.6
AGIEval LSAT ARSoft ReasoningLlama 2 7b17.017.4
AGIEval LSAT ARSoft ReasoningMistral 7b21.719.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b20.426.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b32.628.7
AGIEval LSAT ARSoft ReasoningGemma 2 9b24.823.0
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k28.326.5
AGIEval LSAT ARSoft ReasoningQwen 2 7b27.023.9
AGIEval LSAT ARSoft ReasoningQwen 2 72b29.128.3
AGIEval LSAT ARSoft ReasoningGPT-4o Mini32.223.0
AGIEval LSAT ARSoft ReasoningGpt-4o37.830.0
AGIEval LSAT ARSoft ReasoningClaude-3 Haiku24.823.5
AGIEval LSAT ARSoft ReasoningClaude-3.5 Sonnet38.333.9
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash27.827.8
AGIEval LSAT ARSoft ReasoningGemini 1.5 Pro30.031.7
BiGGen BenchSoft ReasoningLlama 2 7b61.656.8
BiGGen BenchSoft ReasoningMistral 7b70.168.1
BiGGen BenchSoft ReasoningLlama 3.1 8b66.567.7
BiGGen BenchSoft ReasoningLlama 3.1 70b78.976.9
BiGGen BenchSoft ReasoningGemma 2 9b64.764.5
BiGGen BenchSoft ReasoningPhi-3 Small 8k69.763.0
BiGGen BenchSoft ReasoningQwen 2 7b46.269.9
BiGGen BenchSoft ReasoningQwen 2 72b74.379.9
BiGGen BenchSoft ReasoningGPT-4o Mini70.377.7
BiGGen BenchSoft ReasoningGpt-4o86.082.0
BiGGen BenchSoft ReasoningClaude-3 Haiku80.080.0
BiGGen BenchSoft ReasoningClaude-3.5 Sonnet91.479.3
BiGGen BenchSoft ReasoningGemini 1.5 Flash73.968.5
BiGGen BenchSoft ReasoningGemini 1.5 Pro78.767.1
WinograndeCommonsenseLlama 2 7b49.950.4
WinograndeCommonsenseMistral 7b60.456.5
WinograndeCommonsenseLlama 3.1 8b66.563.3
WinograndeCommonsenseLlama 3.1 70b84.281.2
WinograndeCommonsenseGemma 2 9b68.767.7
WinograndeCommonsensePhi-3 Small 8k81.581.6
WinograndeCommonsenseQwen 2 7b67.160.7
WinograndeCommonsenseQwen 2 72b81.980.7
WinograndeCommonsenseGPT-4o Mini79.271.9
WinograndeCommonsenseGpt-4o89.786.5
WinograndeCommonsenseClaude-3 Haiku70.766.2
WinograndeCommonsenseClaude-3.5 Sonnet89.485.7
WinograndeCommonsenseGemini 1.5 Flash72.574.8
WinograndeCommonsenseGemini 1.5 Pro75.578.3
MMLUKnowledgeLlama 2 7b46.341.7
MMLUKnowledgeMistral 7b60.556.5
MMLUKnowledgeLlama 3.1 8b72.667.5
MMLUKnowledgeLlama 3.1 70b85.083.2
MMLUKnowledgeGemma 2 9b73.871.4
MMLUKnowledgePhi-3 Small 8k76.373.6
MMLUKnowledgeQwen 2 7b67.064.5
MMLUKnowledgeQwen 2 72b81.377.8
MMLUKnowledgeGPT-4o Mini79.974.8
MMLUKnowledgeGpt-4o87.583.4
MMLUKnowledgeClaude-3 Haiku72.268.4
MMLUKnowledgeClaude-3.5 Sonnet87.284.0
MMLUKnowledgeGemini 1.5 Flash76.374.7
MMLUKnowledgeGemini 1.5 Pro81.381.1
StrategyQACommonsenseLlama 2 7b39.531.2
StrategyQACommonsenseMistral 7b66.155.8
StrategyQACommonsenseLlama 3.1 8b73.768.6
StrategyQACommonsenseLlama 3.1 70b85.383.8
StrategyQACommonsenseGemma 2 9b73.766.4
StrategyQACommonsensePhi-3 Small 8k72.366.0
StrategyQACommonsenseQwen 2 7b63.254.8
StrategyQACommonsenseQwen 2 72b81.776.9
", + "bbox": [ + 220, + 123, + 777, + 916 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/d428f884b8057ca10febe71ad16d24604f9b8fa1ddf577e95d17b03aaf7d1f11.jpg", + "table_caption": [ + "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
StrategyQACommonsenseGPT-4o Mini82.284.5
StrategyQACommonsenseGpt-4o84.585.5
StrategyQACommonsenseClaude-3 Haiku73.465.0
StrategyQACommonsenseClaude-3.5 Sonnet80.176.3
StrategyQACommonsenseGemini 1.5 Flash72.575.2
StrategyQACommonsenseGemini 1.5 Pro74.071.4
MuSR Object PlacementsSoft ReasoningLlama 2 7b36.330.5
MuSR Object PlacementsSoft ReasoningMistral 7b50.843.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b55.553.5
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b65.643.8
MuSR Object PlacementsSoft ReasoningGemma 2 9b63.357.0
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k53.155.1
MuSR Object PlacementsSoft ReasoningQwen 2 7b48.848.4
MuSR Object PlacementsSoft ReasoningQwen 2 72b61.745.7
MuSR Object PlacementsSoft ReasoningGPT-4o Mini59.055.0
MuSR Object PlacementsSoft ReasoningGpt-4o67.645.3
MuSR Object PlacementsSoft ReasoningClaude-3 Haiku46.952.3
MuSR Object PlacementsSoft ReasoningClaude-3.5 Sonnet69.551.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash61.756.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Pro66.450.0
FOLIOSymbolicLlama 2 7b36.533.0
FOLIOSymbolicMistral 7b50.741.9
FOLIOSymbolicLlama 3.1 8b58.656.7
FOLIOSymbolicLlama 3.1 70b70.969.0
FOLIOSymbolicGemma 2 9b66.055.7
FOLIOSymbolicPhi-3 Small 8k68.059.6
FOLIOSymbolicQwen 2 7b60.651.2
FOLIOSymbolicQwen 2 72b65.065.0
FOLIOSymbolicGPT-4o Mini65.058.1
FOLIOSymbolicGpt-4o79.862.6
FOLIOSymbolicClaude-3 Haiku61.648.8
FOLIOSymbolicClaude-3.5 Sonnet73.968.5
FOLIOSymbolicGemini 1.5 Flash74.969.5
FOLIOSymbolicGemini 1.5 Pro73.974.4
ContextHub Deductive L2SymbolicLlama 2 7b34.812.6
ContextHub Deductive L2SymbolicMistral 7b48.855.1
ContextHub Deductive L2SymbolicLlama 3.1 8b52.821.5
ContextHub Deductive L2SymbolicLlama 3.1 70b50.041.1
ContextHub Deductive L2SymbolicGemma 2 9b50.043.0
ContextHub Deductive L2SymbolicPhi-3 Small 8k52.449.1
ContextHub Deductive L2SymbolicQwen 2 7b51.339.8
ContextHub Deductive L2SymbolicQwen 2 72b52.844.0
ContextHub Deductive L2SymbolicGPT-4o Mini47.042.0
ContextHub Deductive L2SymbolicGpt-4o54.545.6
ContextHub Deductive L2SymbolicClaude-3 Haiku45.241.8
ContextHub Deductive L2SymbolicClaude-3.5 Sonnet53.046.2
ContextHub Deductive L2SymbolicGemini 1.5 Flash45.039.5
ContextHub Deductive L2SymbolicGemini 1.5 Pro57.343.3
ContextHub Abductive L2SymbolicLlama 2 7b34.331.9
ContextHub Abductive L2SymbolicMistral 7b34.025.7
ContextHub Abductive L2SymbolicLlama 3.1 8b41.337.3
ContextHub Abductive L2SymbolicLlama 3.1 70b51.044.4
ContextHub Abductive L2SymbolicGemma 2 9b41.532.9
ContextHub Abductive L2SymbolicPhi-3 Small 8k44.332.8
ContextHub Abductive L2SymbolicQwen 2 7b37.833.4
ContextHub Abductive L2SymbolicQwen 2 72b45.532.2
ContextHub Abductive L2SymbolicGPT-4o Mini65.055.0
ContextHub Abductive L2SymbolicGpt-4o57.546.8
ContextHub Abductive L2SymbolicClaude-3 Haiku37.031.4
ContextHub Abductive L2SymbolicClaude-3.5 Sonnet56.840.4
ContextHub Abductive L2SymbolicGemini 1.5 Flash53.132.2
ContextHub Abductive L2SymbolicGemini 1.5 Pro53.543.7
MMLU ProKnowledgeLlama 2 7b19.919.6
MMLU ProKnowledgeMistral 7b31.628.4
MMLU ProKnowledgeLlama 3.1 8b44.838.0
MMLU ProKnowledgeLlama 3.1 70b64.955.0
MMLU ProKnowledgeGemma 2 9b48.142.7
MMLU ProKnowledgePhi-3 Small 8k54.843.7
MMLU ProKnowledgeQwen 2 7b45.036.2
MMLU ProKnowledgeQwen 2 72b62.844.3
MMLU ProKnowledgeGPT-4o Mini62.342.6
MMLU ProKnowledgeGpt-4o72.155.0
MMLU ProKnowledgeClaude-3 Haiku47.639.0
MMLU ProKnowledgeClaude-3.5 Sonnet73.457.2
MMLU ProKnowledgeGemini 1.5 Flash58.547.2
MMLU ProKnowledgeGemini 1.5 Pro65.357.4
MuSR Murder MysteriesSoft ReasoningLlama 2 7b50.050.0
", + "bbox": [ + 220, + 123, + 777, + 914 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/b525d383042b5783dceda3c38de601aba0076e717365bb347ecf929af7e0a671.jpg", + "table_caption": [ + "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Murder MysteriesSoft ReasoningMistral 7b62.855.6
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b70.457.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.669.6
MuSR Murder MysteriesSoft ReasoningGemma 2 9b76.861.6
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k61.658.8
MuSR Murder MysteriesSoft ReasoningQwen 2 7b59.253.2
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.864.4
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini71.263.6
MuSR Murder MysteriesSoft ReasoningGpt-4o87.670.8
MuSR Murder MysteriesSoft ReasoningClaude-3 Haiku62.456.8
MuSR Murder MysteriesSoft ReasoningClaude-3.5 Sonnet85.270.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.858.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Pro77.664.0
ContextHub Deductive L1SymbolicLlama 2 7b47.78.3
ContextHub Deductive L1SymbolicMistral 7b50.367.3
ContextHub Deductive L1SymbolicLlama 3.1 8b50.723.3
ContextHub Deductive L1SymbolicLlama 3.1 70b53.840.7
ContextHub Deductive L1SymbolicGemma 2 9b56.339.2
ContextHub Deductive L1SymbolicPhi-3 Small 8k54.850.2
ContextHub Deductive L1SymbolicQwen 2 7b59.343.3
ContextHub Deductive L1SymbolicQwen 2 72b51.544.0
ContextHub Deductive L1SymbolicGPT-4o Mini49.341.5
ContextHub Deductive L1SymbolicGpt-4o59.349.0
ContextHub Deductive L1SymbolicClaude-3 Haiku50.539.7
ContextHub Deductive L1SymbolicClaude-3.5 Sonnet54.547.0
ContextHub Deductive L1SymbolicGemini 1.5 Flash47.338.5
ContextHub Deductive L1SymbolicGemini 1.5 Pro57.346.0
ContextHub Abductive L1SymbolicLlama 2 7b29.416.4
ContextHub Abductive L1SymbolicMistral 7b46.925.8
ContextHub Abductive L1SymbolicLlama 3.1 8b43.624.2
ContextHub Abductive L1SymbolicLlama 3.1 70b55.343.9
ContextHub Abductive L1SymbolicGemma 2 9b61.958.9
ContextHub Abductive L1SymbolicPhi-3 Small 8k62.560.3
ContextHub Abductive L1SymbolicQwen 2 7b52.247.5
ContextHub Abductive L1SymbolicQwen 2 72b61.945.0
ContextHub Abductive L1SymbolicGPT-4o Mini61.142.2
ContextHub Abductive L1SymbolicGpt-4o74.265.6
ContextHub Abductive L1SymbolicClaude-3 Haiku35.322.8
ContextHub Abductive L1SymbolicClaude-3.5 Sonnet80.860.3
ContextHub Abductive L1SymbolicGemini 1.5 Flash66.447.2
ContextHub Abductive L1SymbolicGemini 1.5 Pro62.260.0
Big-Bench HardSymbolicLlama 2 7b29.831.9
Big-Bench HardSymbolicMistral 7b39.335.1
Big-Bench HardSymbolicLlama 3.1 8b62.845.6
Big-Bench HardSymbolicLlama 3.1 70b78.954.8
Big-Bench HardSymbolicGemma 2 9b58.750.8
Big-Bench HardSymbolicPhi-3 Small 8k70.055.1
Big-Bench HardSymbolicQwen 2 7b52.647.6
Big-Bench HardSymbolicQwen 2 72b75.159.0
Big-Bench HardSymbolicGPT-4o Mini77.749.7
Big-Bench HardSymbolicGpt-4o84.664.5
Big-Bench HardSymbolicClaude-3 Haiku62.447.3
Big-Bench HardSymbolicClaude-3.5 Sonnet83.656.9
Big-Bench HardSymbolicGemini 1.5 Flash71.355.4
Big-Bench HardSymbolicGemini 1.5 Pro71.650.3
MATHMathematicalLlama 2 7b4.24.0
MATHMathematicalMistral 7b12.46.1
MATHMathematicalLlama 3.1 8b47.213.8
MATHMathematicalLlama 3.1 70b64.422.8
MATHMathematicalGemma 2 9b45.619.1
MATHMathematicalPhi-3 Small 8k43.218.5
MATHMathematicalQwen 2 7b53.713.3
MATHMathematicalQwen 2 72b63.523.8
MATHMathematicalGPT-4o Mini69.624.3
MATHMathematicalGpt-4o73.335.2
MATHMathematicalClaude-3 Haiku32.717.4
MATHMathematicalClaude-3.5 Sonnet63.834.6
MATHMathematicalGemini 1.5 Flash54.531.3
MATHMathematicalGemini 1.5 Pro62.139.4
GSM8k-HardMathematicalLlama 2 7b6.71.8
GSM8k-HardMathematicalMistral 7b21.03.0
GSM8k-HardMathematicalLlama 3.1 8b34.46.0
GSM8k-HardMathematicalLlama 3.1 70b46.614.0
GSM8k-HardMathematicalGemma 2 9b40.98.8
GSM8k-HardMathematicalPhi-3 Small 8k33.06.9
GSM8k-HardMathematicalQwen 2 7b48.45.0
GSM8k-HardMathematicalQwen 2 72b54.813.7
", + "bbox": [ + 220, + 123, + 777, + 915 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "images/40dd47fb0c0340c0e72e770cdc928d678926fc1bdbb82d49d5824dcbe69e1cc0.jpg", + "table_caption": [ + "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
GSM8k-HardMathematicalGPT-4o Mini53.911.7
GSM8k-HardMathematicalGpt-4o60.326.0
GSM8k-HardMathematicalClaude-3 Haiku45.39.6
GSM8k-HardMathematicalClaude-3.5 Sonnet50.832.3
GSM8k-HardMathematicalGemini 1.5 Flash54.616.2
GSM8k-HardMathematicalGemini 1.5 Pro58.226.2
GSM8kMathematicalLlama 2 7b29.66.9
GSM8kMathematicalMistral 7b59.210.2
GSM8kMathematicalLlama 3.1 8b85.418.5
GSM8kMathematicalLlama 3.1 70b85.637.0
GSM8kMathematicalGemma 2 9b89.224.9
GSM8kMathematicalPhi-3 Small 8k90.024.9
GSM8kMathematicalQwen 2 7b87.920.7
GSM8kMathematicalQwen 2 72b94.640.1
GSM8kMathematicalGPT-4o Mini94.131.8
GSM8kMathematicalGpt-4o95.858.8
GSM8kMathematicalClaude-3 Haiku89.422.9
GSM8kMathematicalClaude-3.5 Sonnet96.162.2
GSM8kMathematicalGemini 1.5 Flash91.438.6
GSM8kMathematicalGemini 1.5 Pro92.752.4
", + "bbox": [ + 220, + 125, + 777, + 340 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.2 FULL FEW-SHOT RESULTS", + "text_level": 1, + "bbox": [ + 171, + 375, + 397, + 388 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/4112dcc85423a261e0150879b292cf256cebcbd7047df4ac82f69de0680d3a93.jpg", + "table_caption": [ + "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT RCSoft ReasoningLlama 2 7b33.138.7
AGIEval LSAT RCSoft ReasoningMistral 7b52.457.2
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b60.270.3
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.488.8
AGIEval LSAT RCSoft ReasoningGemma 2 9b74.379.2
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k63.265.1
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.768.8
AGIEval LSAT RCSoft ReasoningQwen 2 72b85.985.9
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.371.4
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash79.281.8
AGIEval LSAT LRSoft ReasoningLlama 2 7b33.734.7
AGIEval LSAT LRSoft ReasoningMistral 7b46.148.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b55.758.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b83.385.1
AGIEval LSAT LRSoft ReasoningGemma 2 9b65.768.2
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.759.2
AGIEval LSAT LRSoft ReasoningQwen 2 7b54.161.2
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.579.6
AGIEval LSAT LRSoft ReasoningGPT-4o Mini68.464.5
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash68.672.9
GPQAMathematicalMistral 7b23.025.9
GPQAMathematicalLlama 3.1 8b22.127.2
GPQAMathematicalLlama 3.1 70b24.824.3
GPQAMathematicalGemma 2 9b19.922.3
GPQAMathematicalPhi-3 Small 8k23.922.5
GPQAMathematicalQwen 2 7b23.421.2
GPQAMathematicalQwen 2 72b22.819.9
GPQAMathematicalGPT-4o Mini20.020.0
GPQAMathematicalGemini 1.5 Flash21.924.6
CommonsenseQACommonsenseLlama 2 7b18.219.2
CommonsenseQACommonsenseMistral 7b73.670.4
CommonsenseQACommonsenseLlama 3.1 8b74.076.5
CommonsenseQACommonsenseLlama 3.1 70b84.784.6
CommonsenseQACommonsenseGemma 2 9b81.880.8
CommonsenseQACommonsensePhi-3 Small 8k80.880.4
CommonsenseQACommonsenseQwen 2 7b80.372.9
CommonsenseQACommonsenseQwen 2 72b88.487.8
CommonsenseQACommonsenseGPT-4o Mini84.784.7
CommonsenseQACommonsenseGemini 1.5 Flash81.783.3
AGIEval LSAT ARSoft ReasoningLlama 2 7b19.618.7
AGIEval LSAT ARSoft ReasoningMistral 7b20.922.6
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b24.826.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b36.130.9
AGIEval LSAT ARSoft ReasoningGemma 2 9b22.228.7
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k27.820.0
AGIEval LSAT ARSoft ReasoningQwen 2 7b24.323.0
AGIEval LSAT ARSoft ReasoningQwen 2 72b27.030.0
", + "bbox": [ + 225, + 426, + 771, + 917 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/e475ef0b9cb04a84d3ae94a8f5ab9b38aadc1893e814332b167f58731fd384f0.jpg", + "table_caption": [ + "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT ARSoft ReasoningGPT-4o Mini28.726.1
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash28.320.4
MMLUKnowledgeLlama 2 7b49.042.8
MMLUKnowledgeMistral 7b63.057.0
MMLUKnowledgeLlama 3.1 8b71.769.3
MMLUKnowledgeLlama 3.1 70b84.383.7
MMLUKnowledgeGemma 2 9b74.772.4
MMLUKnowledgePhi-3 Small 8k77.375.2
MMLUKnowledgeQwen 2 7b69.968.6
MMLUKnowledgeQwen 2 72b82.781.8
MMLUKnowledgeGPT-4o Mini82.377.8
MMLUKnowledgeGemini 1.5 Flash78.179.0
StrategyQACommonsenseLlama 2 7b57.930.9
StrategyQACommonsenseMistral 7b70.772.0
StrategyQACommonsenseLlama 3.1 8b74.465.8
StrategyQACommonsenseLlama 3.1 70b87.184.2
StrategyQACommonsenseGemma 2 9b77.173.3
StrategyQACommonsensePhi-3 Small 8k75.071.1
StrategyQACommonsenseQwen 2 7b71.958.9
StrategyQACommonsenseQwen 2 72b83.280.1
StrategyQACommonsenseGPT-4o Mini83.086.2
StrategyQACommonsenseGemini 1.5 Flash77.080.3
ContextHub Abductive L2SymbolicLlama 2 7b36.235.0
ContextHub Abductive L2SymbolicMistral 7b33.830.0
ContextHub Abductive L2SymbolicLlama 3.1 8b32.736.1
ContextHub Abductive L2SymbolicLlama 3.1 70b54.651.2
ContextHub Abductive L2SymbolicGemma 2 9b44.833.2
ContextHub Abductive L2SymbolicPhi-3 Small 8k49.834.2
ContextHub Abductive L2SymbolicQwen 2 7b39.635.0
ContextHub Abductive L2SymbolicQwen 2 72b54.734.9
ContextHub Abductive L2SymbolicGPT-4o Mini62.060.0
ContextHub Abductive L2SymbolicGemini 1.5 Flash48.647.8
ContextHub Abductive L1SymbolicLlama 2 7b21.416.7
ContextHub Abductive L1SymbolicMistral 7b23.621.7
ContextHub Abductive L1SymbolicLlama 3.1 8b40.036.1
ContextHub Abductive L1SymbolicLlama 3.1 70b62.258.9
ContextHub Abductive L1SymbolicGemma 2 9b48.959.4
ContextHub Abductive L1SymbolicPhi-3 Small 8k59.256.4
ContextHub Abductive L1SymbolicQwen 2 7b48.638.9
ContextHub Abductive L1SymbolicQwen 2 72b53.356.1
ContextHub Abductive L1SymbolicGPT-4o Mini77.259.2
ContextHub Abductive L1SymbolicGemini 1.5 Flash79.768.6
MuSR Murder MysteriesSoft ReasoningMistral 7b62.056.4
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b61.661.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.268.0
MuSR Murder MysteriesSoft ReasoningGemma 2 9b81.662.0
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k62.053.6
MuSR Murder MysteriesSoft ReasoningQwen 2 7b56.055.6
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.466.0
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini76.069.6
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.066.4
MuSR Team AllocationsSoft ReasoningMistral 7b42.843.2
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b59.651.6
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b89.263.6
MuSR Team AllocationsSoft ReasoningGemma 2 9b48.445.6
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k66.046.4
MuSR Team AllocationsSoft ReasoningQwen 2 7b34.040.8
MuSR Team AllocationsSoft ReasoningQwen 2 72b56.066.4
MuSR Team AllocationsSoft ReasoningGPT-4o Mini75.660.0
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash90.054.4
MMLU ProKnowledgeLlama 2 7b21.520.4
MMLU ProKnowledgeMistral 7b34.826.7
MMLU ProKnowledgeLlama 3.1 8b44.738.0
MMLU ProKnowledgeLlama 3.1 70b64.455.1
MMLU ProKnowledgeGemma 2 9b48.542.4
MMLU ProKnowledgePhi-3 Small 8k54.843.2
MMLU ProKnowledgeQwen 2 7b46.639.0
MMLU ProKnowledgeQwen 2 72b62.551.6
MMLU ProKnowledgeGPT-4o Mini63.045.0
MMLU ProKnowledgeGemini 1.5 Flash59.450.6
MuSR Object PlacementsSoft ReasoningMistral 7b55.541.0
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b66.850.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b67.257.4
MuSR Object PlacementsSoft ReasoningGemma 2 9b68.058.2
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k62.151.6
MuSR Object PlacementsSoft ReasoningQwen 2 7b46.943.8
MuSR Object PlacementsSoft ReasoningQwen 2 72b66.443.0
", + "bbox": [ + 225, + 123, + 771, + 917 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/f1ee595f2bae3b379ed01eb3e261d1974f2de31226bb5ac0646fe5244a263ac2.jpg", + "table_caption": [ + "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + ], + "table_footnote": [], + "table_body": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
MuSR Object PlacementsSoft ReasoningGPT-4o Mini67.047.0
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash73.054.7
ContextHub Deductive L2SymbolicLlama 2 7b34.715.0
ContextHub Deductive L2SymbolicMistral 7b63.851.4
ContextHub Deductive L2SymbolicLlama 3.1 8b76.127.3
ContextHub Deductive L2SymbolicLlama 3.1 70b82.653.6
ContextHub Deductive L2SymbolicGemma 2 9b61.947.6
ContextHub Deductive L2SymbolicPhi-3 Small 8k61.554.0
ContextHub Deductive L2SymbolicQwen 2 7b55.336.4
ContextHub Deductive L2SymbolicQwen 2 72b80.254.0
ContextHub Deductive L2SymbolicGPT-4o Mini59.041.0
ContextHub Deductive L2SymbolicGemini 1.5 Flash90.242.5
ContextHub Deductive L1SymbolicLlama 2 7b34.716.0
ContextHub Deductive L1SymbolicMistral 7b46.259.2
ContextHub Deductive L1SymbolicLlama 3.1 8b73.023.0
ContextHub Deductive L1SymbolicLlama 3.1 70b67.550.0
ContextHub Deductive L1SymbolicGemma 2 9b66.045.7
ContextHub Deductive L1SymbolicPhi-3 Small 8k74.851.8
ContextHub Deductive L1SymbolicQwen 2 7b58.837.5
ContextHub Deductive L1SymbolicQwen 2 72b70.742.8
ContextHub Deductive L1SymbolicGPT-4o Mini59.244.3
ContextHub Deductive L1SymbolicGemini 1.5 Flash89.349.8
MATHMathematicalLlama 2 7b4.73.9
MATHMathematicalMistral 7b13.77.1
MATHMathematicalLlama 3.1 8b41.214.2
MATHMathematicalLlama 3.1 70b61.924.2
MATHMathematicalGemma 2 9b47.519.8
MATHMathematicalPhi-3 Small 8k42.418.9
MATHMathematicalQwen 2 7b55.015.0
MATHMathematicalQwen 2 72b65.326.2
MATHMathematicalGPT-4o Mini71.724.6
MATHMathematicalGemini 1.5 Flash54.732.3
GSM8KMathematicalLlama 2 7b29.07.7
GSM8KMathematicalMistral 7b56.212.5
GSM8KMathematicalLlama 3.1 8b86.420.1
GSM8KMathematicalLlama 3.1 70b96.139.1
GSM8KMathematicalGemma 2 9b89.224.9
GSM8KMathematicalPhi-3 Small 8k90.424.5
GSM8KMathematicalQwen 2 7b87.621.4
GSM8KMathematicalQwen 2 72b93.240.6
GSM8KMathematicalGPT-4o Mini94.232.8
GSM8KMathematicalGemini 1.5 Flash90.640.4
", + "bbox": [ + 225, + 123, + 771, + 563 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "F.3 ANSWER EXTRACTOR AND AVERAGE ANSWER SPAN RESULTS", + "text_level": 1, + "bbox": [ + 174, + 599, + 648, + 613 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "In this section, we report the number of generations from each model on each dataset that our answer parser could not extract. \"1\" denotes that a model was not run on a certain dataset due to context length limitations in the few-shot setting. We see that these unparseable rates are generally low across the board. The weakest models struggle on some of the most challenging datasets, but unparseable rates are all at or below $15\\%$ .", + "bbox": [ + 174, + 626, + 823, + 695 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We also report the average character index of the beginning of the answer span that the answer parser extracted. Of particular note is that the direct answer prompts all return an answer within the first 60 characters, indicating that the answers are returned almost immediately, as desired. CoT completions are much longer.", + "bbox": [ + 174, + 703, + 823, + 758 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "G ZOOM-IN: MMLU AND MMLU PRO", + "text_level": 1, + "bbox": [ + 174, + 780, + 514, + 794 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "MMLU and MMLU Pro show gains from adding CoT, but because these datasets are so broad, they defy simple characterization. We explore the performance of CoT on each category of MMLU to understand divergences in CoT performance between these domains. We list the top three categories where CoT gives the largest error reduction for Llama 3.1 8B and 70B on MMLU and MMLU Pro in Table 17. Some of these categories are explicitly mathematical in nature, as we might expect from Figure 3. We can also see that CoT is helping on categories like \"business\"; upon closer inspection, we found that these categories frequently involve math as well (e.g., business questions may involve computations surrounding wealth). We need to more carefully characterize MMLU at the instance", + "bbox": [ + 174, + 811, + 823, + 922 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 490, + 949, + 506, + 959 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/4555fd6feb79a2952a72299156e54e6ce602be5defc12ec9511a3ee2a0640053.jpg", + "image_caption": [ + "CoT vs direct answer prompting in zero-shot setting (sorted by CoT delta)" + ], + "image_footnote": [], + "bbox": [ + 189, + 112, + 823, + 202 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/23092bff0590411ea6a3a6a3b3cc309010f03fcc52eb0dd86e63bde56fae586a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 205, + 823, + 286 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/7ee7c9def64df0116c3d3d3fd0d7f821a92277a84d73722bb4a98810297129e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 292, + 823, + 371 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/caedee3172006eff41acc4139009516b0772877a4d107fc766aa03a806257c3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 376, + 823, + 455 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/2dd6a070b54dea85cca67c15249312175b3c234a4d8465f0ff509f7b9bc66f03.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 460, + 823, + 540 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/b1ceea31a35477784ca2848751bf8f055fef4bf34de33f6883042939667571be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 545, + 823, + 625 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/52f19834dc472e800244344b94f45e6ba78b8e1ce6e5d6f1e74a5077acb42011.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 630, + 823, + 710 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/1efd9008ea291cbd03a58815d2537ab80fd48a1a8a742e1048e4fc078b52607b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 715, + 823, + 794 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/b3a9ab83cd4738f25c01c3b5a4c3ac93767c31780316f7283e4097b8757c2499.jpg", + "image_caption": [ + "Figure 9: Performance of zero-shot direct (blue) and zero-shot CoT (orange) across datasets and models. Graphs are sorted in ascending order by median delta (CoT, direct). The datasets benefiting substantially are all symbolic or semi-symbolic in nature." + ], + "image_footnote": [], + "bbox": [ + 189, + 799, + 823, + 876 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 32 + }, + { + "type": "table", + "img_path": "images/702ab985934a532ba974cf9b014e4583b0a5e45b3f3849ee36e39ad0e482ce6d.jpg", + "table_caption": [ + "Table 9: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above $15\\%$ ." + ], + "table_footnote": [], + "table_body": "
Zero-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMeLaLima 2-7bMauu1/0MeLaLima 3-1-9bMeLaLima 3-1-7bGamma 2-9bPru-5 Small 8kQeu-2-7bQeu-2-7bGpr-ho MthGpr-loClauh-3-HouClauh-5-SomertGemini-1.5 Path
CommonsenseQA1.92.51.10.00.80.11.60.70.00.00.10.00.2
StrategyQA0.01.90.10.011.70.54.92.70.00.00.00.00.2
SiQA0.26.60.00.13.90.30.13.00.10.10.00.00.4
PiQA0.46.00.00.13.32.10.05.50.20.00.10.00.9
Winogrande0.03.00.10.02.10.25.10.40.00.00.00.03.6
Arc Easy0.01.80.50.00.00.29.10.73.50.40.20.03.2
Arc Challenge0.02.31.00.00.30.710.70.710.00.70.00.05.0
AGIeval LSAT LR0.40.00.00.00.00.20.00.00.02.50.00.00.2
AGIeval LSAT AR0.40.00.00.04.33.90.00.00.08.70.00.00.0
AGIeval LSAT RC0.40.40.00.00.00.00.00.00.09.70.00.00.4
ContextHub Deductive L10.00.00.00.01.20.02.30.00.00.00.20.00.2
ContextHub Deductive L20.00.00.00.00.00.02.21.00.00.02.80.00.0
ContextHub Abductive L10.00.00.00.00.00.30.00.00.00.00.00.00.0
ContextHub Abductive L20.00.00.00.00.00.11.50.20.00.00.80.00.0
MuSR Murder Mysteries0.00.00.00.00.00.00.00.00.00.00.00.00.0
MuSR Team Allocations0.00.00.00.03.60.00.00.00.00.00.08.40.4
MuSR Object Placements0.00.00.00.00.00.00.00.00.00.00.00.00.0
MMLU0.10.00.00.00.10.23.61.20.60.01.30.30.2
MMLU Pro0.71.31.00.31.03.76.812.20.40.30.30.40.6
GPQA1.37.10.00.08.712.75.415.20.00.01.60.00.7
MATH0.66.90.30.20.10.13.53.00.80.00.30.00.4
GSM8k0.24.12.50.02.70.01.70.20.00.012.75.50.0
BigGen Bench4.60.30.90.10.51.01.31.01.30.00.00.10.4
GSM8k-Hard4.87.62.00.40.40.23.21.10.10.55.20.50.2
MuSiQue0.10.00.00.00.00.10.00.00.00.00.00.20.1
Folio4.40.00.00.00.00.03.90.00.012.30.00.00.5
BigBench-Hard0.00.00.07.40.00.20.00.00.00.30.04.512.8
", + "bbox": [ + 173, + 219, + 823, + 441 + ], + "page_idx": 33 + }, + { + "type": "table", + "img_path": "images/a7e3511c7b1a049d571584971e34c53b6185e7d89bd7342771ac75f8702f7813.jpg", + "table_caption": [ + "Table 10: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above $15\\%$ ." + ], + "table_footnote": [], + "table_body": "
datasetZero-shot CoT Unparseable Answer Rate by Percentage
Meta-Lima 2-TbMeta-10Meta-Lima 3.1-80Meta-Lima 3.1-70bGensim 19bPir-3-Small6kQwen-2-TbQwen-2-TbGPT-4MoGPT-4oCiaJa-3-HuluCiaJa-5-SonarGeminl 1.5 Plan
CommonsenseQA2.91.38.60.00.60.10.00.01.60.00.20.32.4
StrategyQA1.00.11.10.80.30.40.30.00.00.00.02.14.4
SiQA0.81.80.30.11.60.00.10.10.00.00.30.13.5
PiQA1.61.60.20.12.80.30.50.30.00.01.40.34.6
Winogrande0.91.40.20.20.90.40.30.00.00.00.02.03.4
Arc Easy0.20.40.20.00.51.61.60.00.50.00.00.00.4
Arc Challenge0.00.70.00.00.00.00.70.00.00.00.00.00.7
AGIEval LSAT LR3.32.20.00.01.20.02.00.00.00.00.00.00.2
AGIEval LSAT AR4.87.06.12.25.75.24.30.41.31.30.00.41.7
AGIEval LSAT RC7.11.10.00.00.73.06.70.00.00.00.00.00.4
ContextHub Deductive L10.71.30.00.00.00.00.00.00.00.00.00.00.3
ContextHub Deductive L20.20.40.00.00.00.00.00.00.00.00.00.00.4
ContextHub Abductive L10.60.00.00.00.00.00.00.00.00.00.00.00.0
ContextHub Abductive L20.00.20.10.00.00.30.00.00.00.00.00.00.4
MuSR Murder Mysteries0.00.40.00.00.011.60.40.00.00.00.06.83.6
MuSR Team Allocations5.23.20.80.00.80.40.40.00.00.00.00.00.0
MuSR Object Placements0.01.60.00.00.40.80.00.00.00.00.02.00.4
MMLU1.90.61.00.21.51.00.40.20.00.10.03.13.2
MMLU Pro4.45.413.13.312.53.65.42.02.41.90.45.04.4
GPQA4.510.39.41.68.51.83.80.70.00.00.011.815.0
MATH1.65.58.22.52.31.63.00.40.40.50.91.71.0
GSM8k1.71.40.710.50.40.60.40.00.00.00.30.00.1
BigGen Bench5.00.40.50.10.50.40.39.50.00.00.00.10.4
GSM8k-Hard2.18.710.24.510.73.23.51.00.80.53.01.82.7
MuSiQue1.40.08.30.10.00.00.00.00.00.00.00.73.1
Folio0.00.01.50.00.00.00.00.00.00.00.02.01.5
BigBench-Hard3.85.41.80.41.30.10.40.30.00.00.01.20.9
", + "bbox": [ + 173, + 635, + 823, + 854 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 33 + }, + { + "type": "table", + "img_path": "images/ad7d6883efbe38b1043ded572f7e2f2cf8102fdecfbbf57fcc849c6bc0e1feb9.jpg", + "table_caption": [ + "Table 11: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above $15\\%$ ." + ], + "table_footnote": [], + "table_body": "
Few-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMetaLlama 2-7bMistral 7bMetaLlama 3-1-8bMetaLlama 3-1-7bGamma 2-9bPhi-3 Small 8kQwen 2-7bQwen 2-7bGPT4o MiniGemini 1.5 Flash
CommonsenseQA0.00.10.20.01.30.99.91.30.00.6
AGIEval LSAT LR6.70.00.00.00.00.00.00.00.00.2
AGIEval LSAT AR2.60.00.00.03.55.20.00.00.00.0
AGIEval LSAT RC0.00.00.00.00.00.00.00.00.00.0
ContextHub Deductive L10.02.80.00.00.010.70.30.00.00.0
ContextHub Deductive L20.00.10.00.00.00.30.20.00.00.0
ContextHub Abductive L10.02.80.00.00.00.60.00.00.00.0
ContextHub Abductive L20.02.00.00.00.00.00.00.00.00.0
MuSR Murder Mysteries-1.00.00.00.00.00.00.00.00.00.4
MuSR Team Allocations-1.00.00.00.00.00.00.00.00.00.0
MuSR Object Placements-1.00.00.00.00.41.20.00.00.00.0
MMLU4.20.20.00.00.10.00.40.10.00.2
MMLU Pro5.11.22.40.31.09.10.52.60.40.5
GPQA-1.01.30.00.03.67.413.41.10.00.0
MATH0.35.90.30.20.10.11.62.20.00.3
GSM8k0.10.10.50.00.12.20.00.20.00.0
", + "bbox": [ + 174, + 229, + 823, + 431 + ], + "page_idx": 34 + }, + { + "type": "table", + "img_path": "images/a6319c7cc10d8b1be07d409b3c70c34b2c8566c8c9137a1231562af89e58c526.jpg", + "table_caption": [ + "Table 12: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above $15\\%$ ." + ], + "table_footnote": [], + "table_body": "
Few-shot CoT Unparseable Answer Rate by Percentage
datasetMetaLlama2-7bMetaLlama3-18bMetaLlama3-178bMetaLlama3-178bGamma2-9bPhi5Small8cQwen2-7bQwen2-7bGPT40MiniGamin1.5Flush
CommonsenseQA0.70.91.80.10.20.10.00.00.03.4
AGIEval LSAT LR0.60.80.40.01.43.10.80.00.00.6
AGIEval LSAT AR2.29.13.90.911.73.03.51.70.01.3
AGIEval LSAT RC7.85.90.00.01.99.32.60.00.02.2
ContextHub Deductive L10.20.00.20.00.00.00.00.00.00.3
ContextHub Deductive L20.90.00.20.00.00.00.00.00.00.3
ContextHub Abductive L10.80.00.00.00.00.00.00.00.00.0
ContextHub Abductive L23.10.05.30.10.00.20.00.00.00.7
MuSR Murder Mysteries-1.01.20.00.00.40.80.00.00.014.0
MuSR Team Allocations-1.02.40.00.00.00.00.80.00.00.4
MuSR Object Placements-1.00.40.00.01.20.40.00.00.00.0
MMLU0.60.81.10.21.50.70.30.20.22.5
MMLU Pro0.61.98.52.114.11.81.90.81.13.9
GPQA-1.012.110.30.912.96.05.63.30.013.6
MATH1.56.88.22.411.12.62.91.10.51.8
GSM8k0.81.31.00.10.50.50.10.00.10.1
", + "bbox": [ + 174, + 642, + 823, + 845 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 34 + }, + { + "type": "table", + "img_path": "images/c4e923459f6217aa6b13c4e35c6231eb859eeb4e0beb0ba546cb86c3e215e28b.jpg", + "table_caption": [ + "Table 13: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + ], + "table_footnote": [], + "table_body": "
Zero-shot Direct Answer Span Location By Character Index
datasetMeta-Liuna 2-7bMetaLiuHa 3-18bMetaLiuHa 3-17bMetaLiuHa 3-18bGamma 2-9bPhi-3 Small 8kOpen-2-7bOpen-2-7bGPT-4a-MiniGPT-40Clude-3 HakuClude-3.5 SorenGeminl-1.5 Pals
CommonsenseQA98278881088107788
StrategyQA444527444444464488424187
SiQA888888298886688
PiQA788888258884588
Winogrande89888898895488
Arc Easy98888898887788
Arc Challenge88888898887788
AGIEval LSAT LR2524242424242524432125252626
AGIEval LSAT AR2524242424242624482325252627
AGIEval LSAT RC2524242424242524311825252625
ContextHub Deductive L11919191920191919191920201920
ContextHub Deductive L21919191919191919191920201919
ContextHub Abductive L11919191920191919191920201919
ContextHub Abductive L21919191920191919191920201919
MuSR Minder Mysteries882788888886488
MuSR Team Allocations272219192723262288302088
MuSR Object Placements882788888887688
MMLU1918191920181818191919191920
MMLU Pro2019381921191920191920201919
GPQA1919191921191919191920201920
MATH3031282830303333282831292828
GSM8k2229302828372428282829282828
GSM8k-Hard95711219134020788888
Folio3988888311381656870
BigBench-Hard3922252126322926281928281016
", + "bbox": [ + 173, + 239, + 823, + 450 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/989af41e1c80832f033f078ff46f63f0b983872addc5ffc5c0c7e90bfea2e9d1.jpg", + "table_caption": [ + "Table 14: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + ], + "table_footnote": [], + "table_body": "
Zero-shot CoT Answer Span Location By Character Index
datasetMeiLima 27bMeiLima 76MeiLima 3.1 8bMeiLima 3.1 70bGenma 29bPrl-3 Small 8kQues 27bQues 27bGPT-hMiaGPT-4Cluide-3 HabiCluide-5 SorenGenim 1.5 PlixB
CommonsenseQA441564845123723646657734189910866261103214165
StrategyQA726434996113126746036335869210337541158256195
SiQA56942384196523552847242084710946021016196169
PiQA6994558699142075324473646839355781092200150
Winogrande377324645694187326391298634750408889200173
Arc Easy6845811154131936761053435599012397891222340231
Arc Challenge76364411781316422596571387102012698281240372267
AGIEval LSAT LR205313241163167552468915607689499981561728906886
AGIEval LSAT AR1377179114222182712102718191264123011511202849817871
AGIEval LSAT RC1977103211031575739590117066097310791628786703709
ContextHub Deductive L1694368759711383327539402540580542556320254
ContextHub Deductive L28424721095990614442789585840758777655515503
ContextHub Abductive L1577461747879464440754638788879683594368325
ContextHub Abductive L28616001270122968657197685611151113894894601551
MuSR Murder Mysteries4951592195818471210124612411718196119651671175913491213
MuSR Team Allocations12121845229423101513143320212213256226981479185615961607
MuSR Object Placements917625135412666956419048191593153612101455616429
MMLU834512663622503277497407400461447409630413
MMLU Pro1371513788716640518954699926940590653660774
GPQA10347789179018065001018628541666486472981735
MATH7421118122211797486701189114511251153677675679698
GSM8k57263783471945352170964510481035708680541437
GSM8k-Hard916939102710695557661083105313501266594815605512
Folio72476514791379733668919488128515839071194934492
BigBench-Hard596230876861429349315443877973545863455346
", + "bbox": [ + 173, + 654, + 823, + 863 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/f041762e1b1b8186aa50ecc657eda52ca9c55269424cae98517cd3081ef1ef11.jpg", + "table_caption": [ + "Table 15: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + ], + "table_footnote": [], + "table_body": "
Few-shot Direct Answer Span Location By Character Index
datasetMetaLlama 2.7bMistral 7bMetaLlama 3.1.8bMetaLlama 3.1.70bGemma 2.9bPhi-3 Small BkQwen 2.7bQwen 2.72bGPT4o MiniGemini 1.5 Flash
CommonsenseQA8782788810888
AGIEval LSAT LR25242424242424243124
AGIEval LSAT AR25242424242424242724
AGIEval LSAT RC25242424242424242524
ContextHub Deductive L119191919191919191919
ContextHub Deductive L219191919191919191919
ContextHub Abductive L119191919191919191919
ContextHub Abductive L219191919191919191919
MuSR Murder Mysteries-18278888888
MuSR Team Allocations-12119192721272388
MuSR Object Placements-18278888888
MMLU19181919191818181919
MMLU Pro19193819202019191919
GPQA-1191919191919191919
MATH29362929283030412828
GSM8k22232322222322242728
", + "bbox": [ + 174, + 243, + 823, + 446 + ], + "page_idx": 36 + }, + { + "type": "table", + "img_path": "images/eb5531d446ad6066758c182b7a8d1196c7c3548d2b180a7ba0af4c2d752f268a.jpg", + "table_caption": [ + "Table 16: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + ], + "table_footnote": [], + "table_body": "
Few-shot CoT Answer Span Location By Character Index
datasetMetaLima 27bMetaLima 7bMetaLima 3.186MetaLima 3.170bGamma 2.98Phi-3 Small BkQwen 2.78Qwen 2.77bGPT-4o MiniGemini 1.5 Flash
CommonsenseQA301195470921145192280174219158
AGIEval LSAT LR1037510464539437359530599894523
AGIEval LSAT AR1024124788676857310257508351033670
AGIEval LSAT RC7993781312061641112412051086266
ContextHub Deductive L1383386406376359376388364416366
ContextHub Deductive L2736767829822823855612807884809
ContextHub Abductive L1301386428450431413541447575379
ContextHub Abductive L2709586967754804784829821905815
MuSR Murder Mysteries-1128016931702122513381246171919741419
MuSR Team Allocations-1219520872160162817552181215626321841
MuSR Object Placements-1907110412137069196769631351853
MMLU282266333245265260267243392218
MMLU Pro429397424411516425541325681396
GPQA-1848782774615711662703670594
MATH63070558464074752910748481261553
GSM8k374332352352398372415341651314
", + "bbox": [ + 174, + 657, + 823, + 859 + ], + "page_idx": 36 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 36 + }, + { + "type": "table", + "img_path": "images/bd6f800abf29621ab441123c4bfa60d355e63a3ff1e1385c2ba36722b9a5b404.jpg", + "table_caption": [ + "Table 17: The top 3 slices benefiting the most from CoT across MMLU and MMLU Pro for Llama 3.1 8b and 70b. 6 out of 12 of these top slices directly contain \"math\" or \"mathematics.\" We dive deeper into each category subsequently and observe that the questions leading to improvements in the other categories are mathematical in nature as well." + ], + "table_footnote": [], + "table_body": "
MMLUMMLU Pro
ModelSubjectDirect (%)CoT (%)Err. Red. (%)NSubjectDirect (%)CoT (%)Err. Red. (%)N
Llama 3.1 8belementary mathematics46.888.478.1378math23.644.827.81350
Llama 3.1 8bhigh_school mathematics39.671.552.8270business29.445.623.0789
Llama 3.1 8bmiscellaneous83.989.937.3783physics27.941.418.81299
Llama 3.1 70belementary mathematics82.394.770.1378math44.568.342.91351
Llama 3.1 70bmedical_genetics93.097.057.1100business44.067.842.5789
Llama 3.1 70bhigh_school mathematics61.582.253.8270chemistry40.564.039.61132
", + "bbox": [ + 174, + 167, + 823, + 268 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "level. In doing so, we can test our hypotheses with much finer granularity than possible by relying on subjective groupings into tasks and categories.", + "bbox": [ + 169, + 295, + 823, + 325 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Breakdown by the presence of equations We aim to design an instance-level classifier to determine if CoT is expected to help on a question or not. That is, we want a function $g: \\mathbf{q} \\to \\{0,1\\}$ where $g(\\mathbf{q})$ returns 1 if $\\text{extract}(\\tilde{\\mathbf{y}}_{\\text{cot}}) = \\mathbf{y}^*$ and $\\text{extract}(\\tilde{\\mathbf{y}}_{da}) \\neq \\mathbf{y}^*$ where $\\mathbf{y}^*$ is the gold answer to $\\mathbf{q}$ . We explored different forms of $g$ ; however, we ultimately found it most effective to use a classifier $g: (\\mathbf{q}, \\tilde{\\mathbf{y}}_{\\text{cot}}) \\to \\{0,1\\}$ which also consults the chain-of-thought produced by the model. This allows us to featurize how the LM solves the problem, particularly whether it uses symbolic reasoning or not.", + "bbox": [ + 169, + 339, + 823, + 436 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "We find that $g$ can be implemented with a single feature: does $\\mathbf{q}$ or $\\tilde{\\mathbf{y}}_{\\mathrm{cot}}$ contain a “=”? The “=” token very strongly indicates the presence of equations in the problem or its solution, which turn out to be a strong hallmark of symbolic reasoning.", + "bbox": [ + 169, + 443, + 823, + 487 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "We plot the overall CoT delta (performance of CoT minus the performance of direct answer) for both MMLU and MMLU Pro across multiple models between two bins according to this classifier $g$ , labeled as \"With =\" and \"Without =\", in Figure 4. We also report the amount of performance gain explained by questions having an \" ==\" vs. not in Appendix G.1. We find that the majority of the performance gain from CoT on MMLU and MMLU Pro comes from questions that have an \" ==\" in the question or generated responses. Because \" ==\" are usually found in math problems, we equate this to CoT primarily benefiting MMLU and MMLU Pro on the math-related questions with very little to no gain (depending on the model) for non-math questions.", + "bbox": [ + 169, + 492, + 825, + 604 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "G.1 PERFORMANCE IMPACTS OF “=” ON MMLU AND MMLU PRO", + "text_level": 1, + "bbox": [ + 169, + 621, + 653, + 636 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Tables 18 and 19 show the amount of total improvement from using CoT over direct prompting that can be explained by the presence of “=” on MMLU and MMLU Pro over multiple models.", + "bbox": [ + 169, + 647, + 823, + 676 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "H FULL RESULTS OF EVALUATIONS ON FORMAL REASONING DATASETS", + "text_level": 1, + "bbox": [ + 169, + 696, + 779, + 712 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "As discussed in Section 5, we include detailed evaluation results of few-shot direct answer, few-shot CoT, direct answer solver, CoT solver, and tool-augmented prompting in Table 20. The unparseable rate stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except tool-augmented prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect.", + "bbox": [ + 169, + 727, + 823, + 825 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "We note that all models have a low unparseable rate $(< 10\\%)$ for all methods except tool-augmented prompting. By manually inspecting the outputs, we observe that the high unparseable rate for some models with tool-augmented prompting is caused by these models generating Python programs or", + "bbox": [ + 169, + 832, + 823, + 877 + ], + "page_idx": 37 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 37 + }, + { + "type": "page_footnote", + "text": "6We explored implementing $g$ with a logistic regression classifier with tfidf features over the $(\\mathbf{q},\\tilde{\\mathbf{y}}_{\\mathrm{cot}})$ pairs, trained over a subset of the data from MMLU and MMLU Pro. This classifier actually allowed us to discover the “=” feature, but its accuracy did not exceed the accuracy of that single feature.", + "bbox": [ + 169, + 883, + 823, + 925 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 37 + }, + { + "type": "table", + "img_path": "images/6febf3ae283fbde75099e8e87cdca9a4231bae7768d586d6e567998e8662d802.jpg", + "table_caption": [ + "Table 18: Total CoT deltas on MMLU broken down by the total gain from questions and responses with an “=” vs. without an “=”" + ], + "table_footnote": [], + "table_body": "
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b6.00.65.49.8%10.9%
Mistral 7b4.11.22.928.6%9.8%
Llama 3.1 8b5.52.92.652.9%9.6%
Llama 3.1 70b1.91.80.194.0%10.6%
Gemma 2 9b2.62.00.678.5%10.0%
Phi-3 Small 8k3.11.51.747.4%8.3%
Qwen 2 7b2.53.0-0.5100.0%9.8%
Qwen 2 72b3.52.41.167.8%9.6%
GPT-4o Mini5.23.51.766.9%10.5%
GPT-4o4.22.41.857.6%10.3%
Claude-3 Haiku3.72.41.364.4%9.3%
Claude-3.5 Sonnet3.22.30.972.1%10.7%
Gemini 1.5 Flash3.01.71.259.0%10.1%
Gemini 1.5 Pro1.91.00.951.9%9.6%
", + "bbox": [ + 174, + 223, + 823, + 422 + ], + "page_idx": 38 + }, + { + "type": "table", + "img_path": "images/138c71e9121096b8b253e0e24407f383622f4841a8cbdaa5f8eea499d47f6c07.jpg", + "table_caption": [ + "Table 19: Total CoT deltas on MMLU Pro broken down by the total gain from questions and responses with an “=” vs. without an “=”" + ], + "table_footnote": [], + "table_body": "
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b1.61.30.379.6%43.6%
Mistral 7b3.81.91.950.7%41.8%
Llama 3.1 8b12.410.02.480.8%35.2%
Llama 3.1 70b11.411.10.397.6%39.6%
Gemma 2 9b7.67.40.297.9%40.2%
Phi-3 Small 8k11.69.91.785.7%42.7%
Qwen 2 7b10.08.91.188.6%41.6%
Qwen 2 72b19.016.12.984.7%41.4%
GPT-4o Mini20.618.42.389.0%44.0%
GPT-4o17.717.10.696.7%44.1%
Claude-3 Haiku8.77.80.990.1%42.0%
Claude-3.5 Sonnet16.214.81.391.9%43.4%
Gemini 1.5 Flash12.911.81.191.3%42.3%
Gemini 1.5 Pro10.08.61.485.7%41.8%
", + "bbox": [ + 174, + 637, + 823, + 837 + ], + "page_idx": 38 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Table 20: Performance and unparseable rates for few-shot direct answer, few-shot CoT, Plan + Direct Solver, Plan + CoT Solver, and Plan + Tool Solver Solver. \"Acc.\" stands for accuracy and \"% Unp.\" stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except Plan + Tool Solver prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect.", + "bbox": [ + 169, + 99, + 823, + 198 + ], + "page_idx": 39 + }, + { + "type": "table", + "img_path": "images/96e75529ee65ce957f59c29c1af838549364a5b33f15b4fab321ee40fdc96cd9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetMethodMistral 7bLlama 3.1 8bLlama 3.1 70bGPT-4o Mini
Acc.% Unp.Acc.% Unp.Acc.% Unp.Acc.% Unp.
GSM8KDirect Answer12.50.120.10.539.10.032.80.0
GSM8KCoT56.21.486.41.096.10.194.20.1
GSM8KPlan + CoT Solver45.01.078.70.494.70.092.00.1
GSM8KPlan + Direct Solver10.60.119.60.142.20.039.30.0
GSM8KPlan + Tool Solver59.88.680.31.394.40.490.51.5
GSM8K-HardDirect Answer2.90.74.40.612.80.712.37.6
GSM8K-HardCoT20.35.032.49.647.84.452.20.5
GSM8K-HardPlan + CoT Solver18.72.632.41.349.70.651.50.3
GSM8K-HardPlan + Direct Solver3.00.55.50.815.80.117.40.3
GSM8K-HardPlan + Tool Solver44.28.957.91.268.00.570.41.4
ContextHub Deductive L1Direct Answer59.22.823.00.050.00.044.30.0
ContextHub Deductive L1CoT46.20.273.00.267.50.059.20.0
ContextHub Deductive L1Plan + CoT Solver49.50.064.80.065.50.063.20.0
ContextHub Deductive L1Plan + Direct Solver45.83.055.80.053.50.056.20.0
ContextHub Deductive L1Plan + Tool Solver68.827.884.211.891.79.890.77.8
ContextHub Abductive L1Direct Answer21.72.836.10.058.90.059.20.0
ContextHub Abductive L1CoT23.90.040.00.062.20.076.90.0
ContextHub Abductive L1Plan + CoT Solver38.30.042.50.065.60.074.20.0
ContextHub Abductive L1Plan + Direct Solver46.93.933.30.363.10.061.70.0
ContextHub Abductive L1Plan + Tool Solver59.235.870.89.773.94.274.710.3
FOLIODirect Answer56.212.359.60.069.50.064.00.0
FOLIOCoT53.71.556.72.572.42.070.40.0
FOLIOPlan + CoT Solver53.70.055.70.073.90.570.40.0
FOLIOPlan + Direct Solver52.70.054.20.072.90.063.50.0
FOLIOPlan + Tool Solver48.846.854.228.670.016.762.625.1
", + "bbox": [ + 179, + 209, + 818, + 498 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "formal specifications that fail to follow the format of the formal language (Python or z3) and that lead to execution errors. Such an issue is particularly severe for the smaller models. However, we note that despite the high unparseable rate, the overall accuracy of these models with tool augmentation is still on par with or outperforms other methods.", + "bbox": [ + 169, + 523, + 823, + 582 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "I DISCUSSION OF LIMITATIONS", + "text_level": 1, + "bbox": [ + 171, + 599, + 450, + 616 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "I.1 LONG HORIZON PLANNING", + "text_level": 1, + "bbox": [ + 171, + 631, + 405, + 645 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "One set of tasks where symbolic reasoning helps substantially that our experiments haven't covered as thoroughly (with the exception of BiGGen-Bench) is long-horizon planning (Valmeekam et al., 2023; Xie et al., 2024; Gundawar et al., 2024; Valmeekam et al., 2024). There are two reasons we don't treat it here. First, we are primarily interested in tasks that are conveyed in language, and we see less complex planning in language-only tasks. Second, there has already been a large debate on the effectiveness of CoT, both pro (Huang et al., 2022; Hu et al., 2023) and against (Valmeekam et al., 2023; Kambhampati, 2024; Kambhampati et al., 2024b; Stechly et al., 2024a; Guan et al., 2024; Verma et al., 2024; Gundawar et al., 2024; Stechly et al., 2024b) using CoT and its derivatives like tree-of-thought (Yao et al., 2023; Kang et al., 2024), that has resulted in complex systems to help solve planning problems better. While story generation and interpretation involve elements of planning with natural language (Peng et al., 2022; Karpinska et al., 2024), such tasks are not conventionally formalized and benchmarked as planning and reasoning.", + "bbox": [ + 169, + 657, + 823, + 825 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "I.2 DATASET CONTAMINATION", + "text_level": 1, + "bbox": [ + 171, + 842, + 398, + 856 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "One limitation of our study is the presence of possible data contamination: it is unknown which benchmarks may have been explicitly pre-trained on by language models. If a model had memorized answers to benchmark questions, we would expect direct answering to close some of the gap with CoT, as the model can just reproduce a known answer rather than deriving it from scratch. We argue", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 39 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "there are four reasons that our general conclusions are still trustworthy. First, we use a range of language model scales, including small models that have less capacity to memorize. Second, datasets with poor direct answering performance like GSM8K-Hard are unlikely to have been substantially memorized. Third, the inclusion of recent datasets such as MuSR (Sprague et al., 2024) and BiGGen Bench (Kim et al., 2024) helps to defray this risk. Fourth, our survey of the literature includes papers that were submitted to conferences in 2023, representing a range of older LLMs trained at various times.", + "bbox": [ + 174, + 103, + 823, + 200 + ], + "page_idx": 40 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "J EXAMPLE PROMPTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 372, + 118 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "We will release all prompts and model responses on our Huggingface repo. We list a few prompt response pairs here in this section.", + "bbox": [ + 169, + 250, + 826, + 280 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "AGIEval LSAT AR zero-shot CoT prompt for Llama 3.1 70B", + "text_level": 1, + "bbox": [ + 197, + 462, + 602, + 478 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "<|start_header_id|>user<|end_header_id|>", + "bbox": [ + 197, + 491, + 529, + 507 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Explain your reasoning step-by-step for each question before answering. Give your final answer in the format \\The answer is therefore $$ \". Failure to comply with the answer formatting will result in no credit.", + "bbox": [ + 194, + 518, + 787, + 575 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Of the eight students\\u2014George, Helen, Irving, Kyle, Lenore, Nina, Olivia, and Robert\\u2014in a seminar, exactly six will give individual oral reports during three consecutive days\\u2014Monday, Tuesday, and Wednesday. Exactly two reports will be given each day\\u2014one in the morning and one in the afternoon\\u2014according to the following conditions: Tuesday is the only day on which George can give a report. Neither Olivia nor Robert can give an afternoon report. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday.Q: Which one of the following could be the schedule of the students' reports? Answer Choices: (A)Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle (B)Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore (C)Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving (D)Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore (E)Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George Let's think step by step.<|eot_id|><|start_header_id|>assistant<|end_header_id|>", + "bbox": [ + 196, + 575, + 790, + 881 + ], + "page_idx": 41 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 948, + 509, + 960 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "AGIEval LSAT AR zero-shot CoT response for Llama 3.1 70B", + "text_level": 1, + "bbox": [ + 197, + 104, + 612, + 119 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "To determine which one of the following could be the schedule of the students' reports, let's analyze each answer choice in light of the given conditions. First, let's recall the conditions: 1. Exactly six students will give reports over three days. 2. Tuesday is the only day George can give a report. 3. Neither Olivia nor Robert can give an afternoon report. 4. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Let's evaluate each answer choice: (A) Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle - This choice does not violate any conditions directly. However, we need to check if it aligns with all the rules, especially considering the Nina rule, which isn't directly applicable here since Nina isn't scheduled. George's requirement is also not met since George isn't scheduled on Tuesday. (B) Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Since Nina's report is on Wednesday, this condition is technically not violated, but we need to ensure all other conditions are met. George is not scheduled on Tuesday, which is a requirement. (C) Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving - This choice meets the condition that George gives a report on Tuesday. It also does not violate any other conditions directly. However, we need to ensure that the Nina rule isn't applicable or is met, which in this case, it's not applicable since Nina isn't scheduled. (D) Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports. Here, Helen is already scheduled on the same day as Nina, and while Irving is scheduled the next day, the condition specifies both Helen and Irving must give reports the next day, which is not the case since Helen's report is on the same day as Nina's. (E) Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George - This choice violates the condition that Tuesday is the only day George can give a report, as George is scheduled on Wednesday. Given the analysis, the only choice that does not directly violate any of the given conditions and meets the specific requirements (especially considering the placement of George and the handling of the Nina rule where applicable) is: The answer is therefore (C).", + "bbox": [ + 197, + 133, + 799, + 700 + ], + "page_idx": 42 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 42 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "MATH zero-shot CoT prompt for Llama 3.1 70B" + ], + "code_body": "<|begin_of_text>|<|start_header_id>|>user<|end_header_id>|> \nSolve the following math problem efficiently and clearly: \n- For simple problems (2 steps or fewer):\\nProvide a concise solution with minimal explanation. \n- For complex problems (3 steps or more): \nUse this step-by-step format:\\n## Step 1: [Concise description]\\n[Brief explanation and calculations] \n# Step 2: [Concise description] \n[Brief explanation and calculations] \n... \nRegardless of the approach, always conclude with: \nTherefore, the final answer is: $\\mathbb{S}\\backslash$ boxed{answer}$. I hope it is correct. \nWhere [answer] is just the final number or expression that solves the problem. \nProblem: Chris labels every lattice point in the coordinate plane with the square of the distance from the point to the origin (a lattice point is a point such that both of its coordinates are integers). How many times does he label a point with the number $25$?<|eot_id>|><|start_header_id|>assistant", + "guess_lang": "txt", + "bbox": [ + 196, + 132, + 799, + 523 + ], + "page_idx": 43 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "MATH zero-shot CoT response for Llama 3.1 70B" + ], + "code_body": "## Step 1: Understand the problem Chris labels each lattice point in the coordinate plane with the square of the distance from the point to the origin. We need to find how many times he labels a point with the number 25. ## Step 2: Determine the equation for the distance The distance from a point (x, y) to the origin (0, 0) is given by the equation \\(\\mathbb{S}\\backslash \\mathrm{sqrt}\\{\\mathbf{x}^{\\wedge}2 +\\) \\(y^{\\wedge}2\\}\\). Since Chris labels the points with the square of the distance, we are looking for points where \\(\\$ x^{\\wedge}2 + y^{\\wedge}2 = 25\\). ## Step 3: Find the integer solutions to the equation We need to find all integer values of x and y that satisfy the equation \\(\\$ x^{\\wedge}2 + y^{\\wedge}2 = 25\\). The possible values of \\(\\$ x^{\\wedge}2\\) and \\(\\$ y^{\\wedge}2\\) that add up to 25 are: (0, 25), (25, 0), (1, 24), (24, 1), (4, 21), (21, 4), (9, 16), (16, 9). ## Step 4: Count the number of solutions However, we need to consider that some of these pairs have negative counterparts, and we should also count those. For example, (-0, 5) and (0, -5) are also solutions, as well as (-5, 0) and (5, 0). Similarly, (-3, 4), (3, -4), (-4, 3), and (4, -3) are also solutions. So, we have a total of 12 solutions. The final answer is: \\)\\backslash$ boxed{12}.", + "guess_lang": "txt", + "bbox": [ + 196, + 685, + 799, + 909 + ], + "page_idx": 43 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "CommonsenseQA zero-shot direct answer prompt for Llama 3.1 70B", + "text_level": 1, + "bbox": [ + 197, + 104, + 653, + 119 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "<|start_header_id|>system<|end_header_id|>", + "bbox": [ + 197, + 133, + 545, + 148 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "You are a helpful AI assistant that will answer reasoning questions. You will always say at the end \\\"Answer: \\\". You must only pick one answer and you must end your response with \\\"Answer: \\\" every time!<|eot_id>|<|start_header_id|>user<|end_header_id|>", + "bbox": [ + 197, + 161, + 789, + 231 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Question: Where would you find magazines along side many other printed works?", + "bbox": [ + 196, + 243, + 772, + 272 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Answer Choices: \n(A) doctor \n(B) bookstore \n(C) market \n(D) train station \n(E) mortuary", + "bbox": [ + 197, + 286, + 356, + 369 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Only write the answer. Write the answer in the following format: $\"Answer:\"Your answer>$ \" . You must always give an answer. You may only pick one answer choice, if you think multiple are correct only pick the one you think is best.<|eot_id|><|start_header_id|>assistant<|end_header_id|>", + "bbox": [ + 196, + 382, + 795, + 439 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "CommonsenseQA zero-shot direct answer response for Llama 3.1 70B", + "text_level": 1, + "bbox": [ + 197, + 488, + 661, + 505 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "B", + "bbox": [ + 199, + 518, + 210, + 529 + ], + "page_idx": 44 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 44 + } +] \ No newline at end of file diff --git a/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_model.json b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c468216428e6bb53bbfbaf4974f521da344cf613 --- /dev/null +++ b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_model.json @@ -0,0 +1,4767 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "TO COT OR NOT TO COT? CHAIN-OF-THOUGHT HELPS MAINLY ON MATH AND SYMBOLIC REASONING" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.17, + 0.721, + 0.216 + ], + "angle": 0, + "content": "Zayne Sprague\\*, Fangcong Yin\\*, Juan Diego Rodriguez\\*, Dongwei Jiang\\*, Manya Wadhwa\\*, Prasann Singhal\\*, Xinyu Zhao\\*, Xi Ye\\(^{\\text{心}}\\), Kyle Mahowald\\*, Greg Durrett\\*" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.228, + 0.758, + 0.259 + ], + "angle": 0, + "content": "\\(\\spadesuit\\)The University of Texas at Austin, \\(\\diamond\\)Johns Hopkins University, \\(\\diamond\\)Princeton University zaynesprague@utexas.edu" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.294, + 0.548, + 0.309 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.326, + 0.769, + 0.549 + ], + "angle": 0, + "content": "Chain-of-thought (CoT) via prompting is the de facto method for eliciting reasoning capabilities from large language models (LLMs). But for what kinds of tasks is this extra \"thinking\" really helpful? To analyze this, we conducted a quantitative meta-analysis covering over 100 papers using CoT and ran our own evaluations of 20 datasets across 14 models. Our results show that CoT gives strong performance benefits primarily on tasks involving math or logic, with much smaller gains on other types of tasks. On MMLU, directly generating the answer without CoT leads to almost identical accuracy as CoT unless the question or model's response contains an equals sign, indicating symbolic operations and reasoning. Following this finding, we analyze the behavior of CoT on these problems by separating planning and execution and comparing against tool-augmented LLMs. Much of CoT's gain comes from improving symbolic execution, but it underperforms relative to using a symbolic solver. Our results indicate that CoT can be applied selectively, maintaining performance while saving inference costs. Furthermore, they suggest a need to move beyond prompt-based CoT to new paradigms that better leverage intermediate computation across the whole range of LLM applications1." + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.557, + 0.825, + 0.797 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.807, + 0.828, + 0.892 + ], + "angle": 0, + "content": "Figure 1: Left: meta-analysis of CoT literature; each point is a reported delta of CoT over direct answering for some (LLM, task) pair. Right: average performance of using zero-shot CoT v.s. direct answer prompts across five general reasoning categories, covering 20 datasets with 14 LLMs evaluated on each. In both sets of results, math and other kinds of symbolic reasoning are the domains that consistently see substantial improvements from CoT (red dotted line indicates the mean improvement from CoT across experiments)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.751, + 0.924 + ], + "angle": 0, + "content": "1Our code can be found at https://github.com/Zayne-sprague/To-CoT-or-not-to-CoT." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.338, + 0.119 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.134, + 0.827, + 0.233 + ], + "angle": 0, + "content": "Chain-of-thought (CoT) (Nye et al., 2022; Wei et al., 2022) has become a widely used prompting technique for eliciting reasoning from language models. CoT can provide human-readable explanations of how problems are solved (Joshi et al., 2023; Lanham et al., 2023), but most frequently it is invoked to improve an LLM's ability to answer complex questions via intermediate computation (Madaan & Yazdanbakhsh, 2022; Wang et al., 2023a; Dziri et al., 2023). Current post-training schemes for LLMs heavily infuse CoT capabilities into models: systems like ChatGPT or Llama 3.1 default to CoT when given reasoning problems (OpenAI, 2023; Dubey et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.239, + 0.828, + 0.365 + ], + "angle": 0, + "content": "CoT has seen widespread usage, but it is most heavily explored in the domain of mathematical reasoning (Zhou et al., 2023a; Fu et al., 2023; Chae et al., 2024; Xu et al., 2024b; Qi et al., 2024). In fact, many \"reasoning\" methods for LLMs are evaluated only in the math domain; for instance, Lightman et al. (2024) frame their paper as \"complex multi-step reasoning\" and Mixtral-Large2's release cited effort \"enhancing the model's reasoning capabilities\", but performance is only reported on GSM8K and MATH. CoT is reported to be effective across a wide range of studies, but many of these studies focus on a narrow slice of the task space. In areas beyond math, results show that CoT is not as useful (Kambhampati et al., 2024a) or can even hurt performance (Wang et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.371, + 0.828, + 0.54 + ], + "angle": 0, + "content": "In this work, we aim to evaluate where prompt-based CoT helps and why. We begin with a systematic meta-analysis of recent literature that reports performance of CoT versus direct answering (DA). We then augment this picture by conducting experiments on 20 datasets and 14 contemporary LLMs across zero-shot and few-shot prompt settings. Finding 1: CoT only helps substantially on problems requiring mathematical, logical, or algorithmic reasoning. Figure 1 shows this holds both across the literature and our own experiments. We find only a few cases of large gain in other kinds of tasks, and many of these outliers feature some component of symbolic reasoning. For instance, on MMLU (Hendrycks et al., 2021a) and MMLU Pro (Wang et al., 2024), we analyze the improvements from CoT and find that CoT only gives benefit on math slices of the dataset. As much as \\(95\\%\\) of the total performance gain from CoT on MMLU is attributed to questions containing “\\(=\\)” in the question or generated output. For non-math questions, we find no features to indicate when CoT will help." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.545, + 0.828, + 0.672 + ], + "angle": 0, + "content": "How can we better understand why CoT improves on these questions and only these questions? The math and formal logical reasoning datasets we consider can be broken down into two stages of processing: a planning step (e.g., parsing a problem into equations) and an execution step (building intermediate outputs and working towards a solution) (Ye et al., 2023; Wang et al., 2023b; Sun et al., 2024). Finding 2: CoT primarily helps with the execution step that performs computation and symbolic manipulation, but falls short of what LLMs with tool augmentation can do. We find that LMs prompted with CoT can generate executable formal solution plans and execute those plans better than direct answering. But using LMs to generate a solution plan and then using an external symbolic solver to solve the plan outperforms using CoT for both steps for these tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.677, + 0.828, + 0.804 + ], + "angle": 0, + "content": "These results paint a picture that CoT's utility is often circumscribed by tool augmentation: on problems where CoT helps, we already have more powerful tools than CoT that we can employ, and on \"soft reasoning\" problems like commonsense where no tools exist, we see limited benefit from CoT. This characterization has two major implications. First, CoT is unnecessary for many problems where it is widely employed: there exist more efficient prompting strategies that yield similar performance for much lower inference cost. Second, we see a critical need to move beyond prompt-based CoT to more sophisticated approaches based on search, interacting agents, or models more heavily fine-tuned for CoT. Future work can explore how intermediate computation can be better used to solve challenging problems outside of the math and symbolic reasoning domains." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.824, + 0.519, + 0.84 + ], + "angle": 0, + "content": "2 BACKGROUND: CHAIN-OF-THOUGHT" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.856, + 0.827, + 0.9 + ], + "angle": 0, + "content": "The tasks we consider in this work consist of a question \\(\\mathbf{q} \\in \\Sigma^{*}\\) for a vocabulary \\(\\Sigma\\) and an answer \\(a \\in \\mathcal{L}(\\mathbf{q})\\) for a label set \\(\\mathcal{L}(\\mathbf{q})\\). \\(\\mathcal{L}(\\mathbf{q})\\) can consist of a data type like boolean or integer, classification labels, or problem-dependent labels like names of entities from \\(\\mathbf{q}\\). One exception that we still" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.521, + 0.924 + ], + "angle": 0, + "content": "2https://mistral.ai/news/mistral-large-2407/" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "explore is BiGGen Bench (Kim et al., 2024), which instead relies on an LLM-as-a-judge (Dubois et al., 2023; Zheng et al., 2024b) to provide a label for generated long-form responses." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.157, + 0.825, + 0.228 + ], + "angle": 0, + "content": "Prompting and chain-of-thought for reasoning A large language model places distributions over strings \\(p(\\mathbf{y}) = \\prod_{i=1}^{n} p_{\\mathrm{LM}}(y_i)\\) where \\(\\mathbf{y} \\in \\Sigma^*\\). In practice, we can interpret these as conditional distributions \\(p(\\mathbf{y} \\mid \\mathbf{x})\\) where \\(\\mathbf{x}\\) is a user's prompt. Typical invocation of an LLM involves forming a prompt \\(\\mathcal{I}(\\mathbf{q})\\) that wraps the question with additional instruction, then drawing a sample response \\(\\tilde{\\mathbf{y}} \\sim p(\\mathbf{y} \\mid \\mathcal{I}(\\mathbf{q}))\\), and finally returning \\(a = \\text{extract}(\\tilde{\\mathbf{y}})\\) using some kind of answer extractor." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.234, + 0.827, + 0.306 + ], + "angle": 0, + "content": "For the tasks we consider in this work, the output \\(\\tilde{\\mathbf{y}}\\) can take one of two forms. A direct answer only contains a string realization of \\(a\\); e.g., \\(\\mathbf{y} = (-185,4)\\) which is tokenized as the answer \\(a = 1854\\). A chain of thought is a longer sequence \\(\\mathbf{y}\\) including other tokens beyond the answer, e.g., \\(\\mathbf{y} = (-185,6,-\\mathrm{minus},-2,-\\mathrm{equals},-185,4)\\). In both cases, the extract function must parse and detokenize the output; in CoT, there is some extra work to spot where the answer is placed." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.31, + 0.827, + 0.465 + ], + "angle": 0, + "content": "Our prompts can explicitly encourage use of direct answer or chain of thought as strategies, which we denote as \\(\\mathcal{I}_{\\mathrm{da}}\\) and \\(\\mathcal{I}_{\\mathrm{cot}}\\). For eliciting CoT, this includes strategies like telling a model to \"think step by step\" (Kojima et al., 2022). For directly answering a question, a prompt may say \"immediately generate the answer\". We track the average location of the answer in the generated output for both CoT and direct prompts in Appendix F.3 to ensure that direct answer prompts give the answer early in the output. We also ensure that extract can parse answers from the generated output for each model, prompt, and dataset combination used in our experiments, tailoring the extract function as needed to ensure low unparseable rates for each model and task. All prompts and outputs per dataset per model have been uploaded to Huggingface and we include examples of some of our prompts in the Appendix J. We also experiment with few-shot CoT prompts, which we find perform similarly to zero-shot prompts; details about these are given in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.487, + 0.825, + 0.587 + ], + "angle": 0, + "content": "Symbolic reasoning Of key importance to this work is whether problems feature symbolic reasoning or not. We consider a problem to be symbolic if it can be grounded in a natural, well agreed-upon formal system. “\\(12 \\times 4\\)” is an example of a symbolic problem, which can be grounded in mathematics. Other systems include first-order logic (Saparov & He, 2023; Hua et al., 2024) or planning languages (Liu et al., 2023a; Valmeekam et al., 2023). Formally, for symbolic problems, we define a function \\(f\\) that acts as a map that produces some symbolic expression \\(S = f(\\mathbf{q})\\) from the question. \\(S\\) can be used as input for a solver to derive an answer, \\(\\hat{a} = \\operatorname{solve}(S)\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.592, + 0.825, + 0.663 + ], + "angle": 0, + "content": "Conversely, a problem like where on a river can you hold a cup upright to catch water on a sunny day? from CommonsenseQA (Talmor et al., 2019) is non-symbolic by our definition. While this problem could be formalized with some kind of predicate logic (Zhou et al., 2022; Quan et al., 2024; Zhou et al., 2024) or grounded in some kind of physical simulation (Hao et al., 2023; Wong et al., 2023), there is not a natural nor well agreed-upon framework for solving it." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.668, + 0.827, + 0.753 + ], + "angle": 0, + "content": "We view non-symbolic to symbolic reasoning as a spectrum. MuSR (Sprague et al., 2024) is a \"semisymbolic\" dataset in that it does contain an underlying formal system (e.g., for its murder mysteries portion, the notion that \\(\\mathrm{motive}(X)\\wedge \\mathrm{means}(X)\\wedge \\mathrm{opportunity}(X)\\Rightarrow \\mathrm{murderer}(X))\\), but also involves substantial commonsense reasoning that does not map onto a formal system. In these cases, we can still form \\(S = f(\\mathbf{q})\\), but \\(f\\) must rely heavily on a language model and instantiate new information for \\(S\\) that is not directly represented in \\(\\mathbf{q}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.776, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Central claim Figure 1 shows that there are a large number of positive results on CoT reported in the literature. Informally, we believe many readers of the literature to hold the following view: \\(\\mathcal{I}_{\\mathrm{cot}}\\) will outperform \\(\\mathcal{I}_{\\mathrm{da}}\\) on nearly all reasoning problems, whether those problems involve symbolic or non-symbolic reasoning. Our evidence does not support this conjecture. We will show that this performance boost is strongest for symbolic and semi-symbolic tasks, while giving little to no improvement (or even hurting performance) on non-symbolic tasks." + }, + { + "type": "page_footnote", + "bbox": [ + 0.17, + 0.885, + 0.825, + 0.926 + ], + "angle": 0, + "content": "3We exclude a number of other \"CoT-like\" approaches in our analysis such as decomposed prompting (Khot et al., 2023; Zheng et al., 2024a) and multi-agent debate (Du et al., 2023; Chen et al., 2024). We focus on single prompt approaches. We deal with tool-augmented approaches in Section 5." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.231, + 0.101, + 0.766, + 0.117 + ], + "angle": 0, + "content": "Table 1: A few categories for experimental comparisons. Full list in Appendix B." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.127, + 0.825, + 0.361 + ], + "angle": 0, + "content": "
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
......
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.388, + 0.486, + 0.404 + ], + "angle": 0, + "content": "3 RESULTS FROM THE LITERATURE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.827, + 0.537 + ], + "angle": 0, + "content": "Criteria and Process We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). This resulted in 4,642 papers total that filtered using automatic and manual methods to papers including experiments comparing chain-of-thought, \\(\\mathcal{I}_{\\mathrm{cot}}\\), against direct answering prompts, \\(\\mathcal{I}_{\\mathrm{direct}}\\). A total of 110 papers were found that matched our criteria with 1,218 experimental comparisons. We then grouped the comparisons by the types of tasks and datasets being evaluated. More details on our automatic and manual filtering, as well as our categorization, can be found in Appendix A and B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.827, + 0.71 + ], + "angle": 0, + "content": "Results Figure 2 shows the distribution of CoT deltas (CoT prompt minus the direct answer prompt performance) across our categorization of different task types found in the literature. Compared to Figure 1, we take the mean results per paper per category, indicated by blue dots, showing the trend across papers in the literature. The categories are ranked in order of ascending median CoT delta. The three categories which benefited the most from CoT are symbolic reasoning, math, and logical reasoning, with average improvements of 14.2, 12.3, 6.9, respectively. Average performance on these top three tasks with CoT was 56.9, whereas performance without CoT was 45.5. For other categories, the average performance with CoT was 56.8, compared to 56.1 without CoT. We do not consider this small improvement a victory for CoT. CoT involves more computation than direct answering, and a truly fair comparison between the methods should match the compute of the two methods, e.g., assembling across multiple prompts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Do any non-math datasets benefit from CoT? On the right side of Figure 2, we show the top 10 outliers from our observed trend, namely papers with high CoT deltas averaged across experiments in tasks other than math, symbolic, or logical reasoning. Although not categorized as math or logic, several of these are related to logical, mathematical or symbolic reasoning in some way. From this list, the dataset which benefits the most most from CoT is BIG-bench Hard (BBH) (Suzgun et al., 2023), a benchmark consisting largely of problems requiring algorithmic, arithmetic or logical reasoning. For instance, BIG-bench Navigate is a spatial reasoning task, but relies heavily on a mathematical primitive of counting steps taken to derive a final conclusion. Similarly, while BIG-bench Temporal is a temporal reasoning task (answering questions about when certain events could have occurred), it requires deductive reasoning to solve. In addition, Legal Argument Reasoning (SemEval-2024 Task 5) (Bongard et al., 2022) was categorized as context-aware QA, but also requires substantial reasoning ability. Finally, MMLU-Moral Scenarios (Hendrycks et al., 2021a) requires answering two independent questions at once, which essentially involves a symbolic combination of two simpler questions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.113, + 0.805, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.101, + 0.691, + 0.113 + ], + "angle": 0, + "content": "CoT Performance Improvement Across Tasks Aggregated by Paper and Category" + }, + { + "type": "image_caption", + "bbox": [ + 0.174, + 0.482, + 0.825, + 0.499 + ], + "angle": 0, + "content": "Figure 2: Results from our meta-analysis (grey dots) aggregated by paper and category (blue dots)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.828, + 0.664 + ], + "angle": 0, + "content": "There are a few outliers that less clearly follow the trend. ScienceQA (Lu et al., 2022) consists of multiple choice questions across a range of natural and social science disciplines, though it is hard to interpret gains without knowing breaking down performance by subject or question type. The dialogue evaluation dataset from Jia et al. (2024) sees large improvements with CoT, but this is a proprietary dataset, and we note that other essay scoring results in our meta-analysis (Li et al., 2024; Stahl et al., 2024) did not show improvements with CoT. Other non-math, symbolic or logical datasets that benefit from CoT are Commitment Bank (de Marneffe et al., 2019) and the task of eliciting verbalized confidence (Xiong et al., 2024). Nevertheless, these are exceptions to the rule. The majority of the reported benefits from using CoT in the NLP and ML literature comes from math or math-related tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.686, + 0.462, + 0.701 + ], + "angle": 0, + "content": "4 RESULTS FROM EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.717, + 0.378, + 0.731 + ], + "angle": 0, + "content": "4.1 EXPERIMENTAL SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.744, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Dataset, Models, Prompts All datasets, models, and prompts we evaluate over can be found in detail in the tables 3, 4, and 5 of Appendix C. We restricted our experiments to English models commonly used and benchmarked on general reasoning datasets. Our datasets include those which are widely used in CoT and reasoning literature, including a mix of non-symbolic, semisymbolic, and symbolic reasoning. They span different formats, including multiple-choice, short-answer, and free-response; however, most of these datasets are multiple choice or short answer, as CoT is not typically used in long-form response settings. We also categorize each dataset into a larger category of reasoning required to solve it: Commonsense, Knowledge, Symbolic, Mathematical, and Soft Reasoning. We define Soft Reasoning as questions relying on commonsense and natural language but going beyond simple inferences about these statements. Finally, we explore several prompting strategies for eliciting reasoning from language models, as past work has emphasized the importance of the prompt (Yang et al., 2024). However, we generally found slight performance differences; see Appendix D for details. We therefore focus on prompts similar to Kojima et al. (2022) and Wei et al." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.079, + 0.805, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.331, + 0.825, + 0.388 + ], + "angle": 0, + "content": "Figure 3: Left: Performance gain from using CoT for each reasoning category. Right: Performance gain from using CoT for each dataset, averaged across models and broken out across 5 representative models. Red lines indicate median improvement. In both plots we see a consistent trend: most improvements from using CoT are from math and symbolic reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.419, + 0.825, + 0.463 + ], + "angle": 0, + "content": "(2022) for zero-shot and few-shot settings, respectively, with alterations to improve the model's ability to produce desired behavior (i.e., formats that allow for easily parsed answers). We upload all our prompts and outputs for each model for each prompting strategy on Huggingface.4." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.48, + 0.825, + 0.581 + ], + "angle": 0, + "content": "Implementation Details We use a high-throughput inference package, vLLM (Kwon et al., 2023), for the model inference process. We use greedy decoding on all models. Our prompts are taken from the Llama 3.1 evaluations when available (Dubey et al., 2024), and minor adjustments are made to unify prompting strategies. For other datasets, we either use the standard prompt for the dataset from the corresponding original paper or implement our own prompt. Our answer parser (extract) is tailored to each dataset and model. Specific details about each dataset, its prompts, and answer extractor can be found in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.6, + 0.279, + 0.613 + ], + "angle": 0, + "content": "4.2 RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.825, + 0.657 + ], + "angle": 0, + "content": "Where does zero-shot CoT improve over direct prompts? On datasets that require math (MATH, GSM8K) or formal logic (ContextHub, MuSR to a lesser degree) to answer the problem." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.663, + 0.825, + 0.777 + ], + "angle": 0, + "content": "Figure 3 on the left shows the average CoT performance improvement for each reasoning category from Figure 1 (right); raw numbers can be found in Table 6 of the Appendix. On the right, Figure 3 shows the performance gain from using CoT for each dataset, averaged across all models and for a selection of individual models. On non-symbolic reasoning categories and datasets, specifically those that contain questions primarily involving commonsense (CSQA, PIQA, SiQA), language understanding (WinoGrande), and reading comprehension (AGI LSAT, ARC-Easy, ARC-Challenge), there is little to no separation between the performance of zero-shot CoT and zero-shot direct answer. Despite these datasets involving reasoning, CoT does not yield improvement." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.781, + 0.827, + 0.894 + ], + "angle": 0, + "content": "By contrast, the mathematical and symbolic categories get larger boosts in improvements alongside symbolic and many semi-symbolic datasets. MATH and GSM8K show gains as large as \\(41.6\\%\\) and \\(66.9\\%\\), respectively. The semi-symbolic datasets like ContextHub and MuSR Murder Mysteries show moderate gains. These datasets require the application of logical rules to reach the answer, e.g., first-order logic parsed from simple natural language (ContextHub) or more complex commonsense statements (MuSR Murder Mysteries). All results are shown in the Appendix F.1 as well as a full list of numeric results for both CoT and direct answer prompting in Table 7. We also explored the few-shot setting and found it had little impact on when CoT will help; see Appendix E." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.859, + 0.925 + ], + "angle": 0, + "content": "4https://huggingface.co/collections/TAUR-Lab/cot-analysis-project-66bbb9e5e0156e65059895f5" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "Does the answer format impact where CoT will help? Not much. Free response capabilities required for BigGen Bench may not benefit from pre-planning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.239 + ], + "angle": 0, + "content": "Many of the commonly-used datasets for problems other than math are multiple choice. We highlight here that CoT has similar performance to direct answer across models for two datasets that are not multiple-choice and contain varying levels of non-symbolic reasoning. First, MuSiQue (Trivedi et al., 2022) is a short-form QA task requiring multi-hop reasoning. We consider this a semi-symbolic dataset as the questions have an explicit multi-hop structure. Because answer spans in MuSiQue can be paraphrased in many different ways, we use GPT-4o to judge if two answer spans are equivalent. Despite being semi-symbolic, we see no overall improvement from CoT." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.828, + 0.399 + ], + "angle": 0, + "content": "Second, BiGGen Bench (Kim et al., 2024) uses free-form responses as the answer to a question, and an LLM-as-a-judge is used to evaluate these responses on a scale of 1 to 5. Because free-form responses blur the lines between CoT and direct answering, we create a new prompt that asks the language model to plan the free response before giving it. We then only pass the free response to the judge (GPT-4o-mini in our case) with the prompt from Kim et al. (2024). We also filter out any questions that explicitly state \"Think step-by-step\". We plot the performance of BiGGen Bench as the number of times a response receives a score of 4 or better. Despite including many reasoning questions (including several categories of math) and other categories, such as planning, we only see a mild improvement here. Because previous experiments show CoT helping on similar types of questions in the QA format, the lack of similar improvements here could imply that pre-planning is insufficient for unlocking reasoning capabilities in the LLM. Future work is needed to prove this." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.412, + 0.825, + 0.441 + ], + "angle": 0, + "content": "Are the gains in Knowledge, Soft Reasoning, and Commonsense significant? Mostly no, except for MMLU, StrategyQA, and MuSR." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.447, + 0.827, + 0.614 + ], + "angle": 0, + "content": "We tested the significance of the improvements from CoT on the 13 datasets in the Knowledge, Soft Reasoning, and Commonsense reasoning categories using paired bootstrapping to assess whether CoT gives a significant improvement. To account for multiple comparisons, we applied a Bonferroni correction, setting the p-value to 0.00027 to account for the 14 models and 13 datasets. About \\(32\\%\\) (59) of the datasets that show a benefit in these three reasoning categories were considered significant. Nearly half of these comparisons (26) are on MMLU and MMLU Pro. On these datasets, we find that CoT is mainly helping on math-related questions. StrategyQA and MuSR also received a consistent performance boost across 10 and 6 models respectively. StrategyQA is often used to benchmark reasoning methods and is built specifically to get a benefit from methods that decompose the question into steps, so a gain in performance is not unprecedented. MuSR, similarly, was built to have multiple steps of complex natural language reasoning, which may receive benefits from CoT. The remaining datasets that receive significant benefits are spread across the datasets and models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.628, + 0.825, + 0.713 + ], + "angle": 0, + "content": "Why do MMLU and MMLU Pro get a boost? MMLU and MMLU Pro contain many different questions requiring different types of reasoning. We separated MMLU and MMLU Pro questions into two bins, those related to math and those not related to math, by checking if the questions text or generated response from the LLM includes an “=”. Figure 4 shows that a majority of the performance gain seen from MMLU and MMLU Pro is from the math slices of each dataset. See more details in Appendix G." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.732, + 0.744, + 0.748 + ], + "angle": 0, + "content": "5 STRENGTHS AND WEAKNESSES OF COT AT FORMAL REASONING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.763, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Previous sections establish that CoT primarily helps with symbolic reasoning tasks, but not why. Many symbolic and semi-symbolic tasks be broken down into two stages (Ye et al., 2023; Pan et al., 2023; Jiang et al., 2024): planning, either via a formal or informal specification via prompting (Sun et al., 2024; Wang et al., 2023b), and execution, using the same LM or external solvers. In this section, we attribute the performance gains from CoT on symbolic tasks to these two stages." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.839, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Given a question that requires symbolic reasoning, we define the planning stage as extracting all variables from the context into a formal specification and defining their relations. The execution stage uses a solver that takes as input a plan and can be run in an orderly fashion to derive the final answer. Using our notation from Section 2, let \\( f(\\mathbf{q}) = \\mathcal{I}_{\\mathrm{planning}}^{m}(\\mathbf{q}) \\) be a mapping of the question \\( \\mathbf{q} \\) to a symbolic plan \\( S_{\\mathrm{plan}} \\) that can be executed by the language model or by an external symbolic solver, \\( \\hat{a} = \\mathrm{solve}(S_{\\mathrm{plan}}) \\), where \\( \\hat{a} \\) is the final answer for \\( \\mathbf{q} \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.324, + 0.093, + 0.672, + 0.109 + ], + "angle": 0, + "content": "Improvement of CoT over direct on = vs. no =" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.114, + 0.812, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.283, + 0.825, + 0.341 + ], + "angle": 0, + "content": "Figure 4: CoT deltas between MMLU and MMLU Pro performance when a question or generated response contains an “=” (With =) or not (Without =). We filter out any questions that do not result in a final answer (degeneration, etc.). CoT primarily helps on the pairs of questions and generations that contain an “=”, which indicates math-related questions." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.362, + 0.819, + 0.389 + ], + "angle": 0, + "content": "Q: Courtney said that there were 48 people, but Kelly said that Courtney had overstated the number by \\(20\\%\\). If Kelly was right, how many people were there?" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.389, + 0.818, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.568, + 0.825, + 0.64 + ], + "angle": 0, + "content": "Figure 5: Prompt variants that separate planning and execution for GSM8K. For all prompt variants besides direct answer and CoT (not shown), we few-shot prompt an LLM to first generate a Python program as a solution plan. For Plan + Direct Solver, the LLM is prompted to directly give an answer from the plan; for Plan + CoT Solver, the LLM is prompted to solve the plan step-by-step with CoT and give an answer; for Plan + Tool Solver, we feed the plan into a Python interpreter." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.825, + 0.728 + ], + "angle": 0, + "content": "By separating planning and execution in this way, we can test how much a language model can gain from only having a plan, to having a plan and solving it with CoT, or to having a plan and then solving it with an external symbolic solver. Given a plan \\( S_{\\mathrm{plan}} \\sim \\mathcal{I}_{\\mathrm{planning}}^{m}(\\mathbf{q}) \\), we compare the performance of the settings below to evaluate at which stage LM is most effective and falls short." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.749, + 0.375, + 0.763 + ], + "angle": 0, + "content": "5.1 SETTINGS EVALUATED" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.776, + 0.825, + 0.806 + ], + "angle": 0, + "content": "Settings 1 and 2: Few-shot direct answer and CoT: We use the few-shot direct answer and CoT prompts from Section 4.1 as baselines. Figure 5 includes an example of each setting on GSM8K." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.811, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Settings 3 and 4: Plan + Direct Solver and Plan + CoT Solver: Here we use inspiration from Xu et al. (2024a) and generate a symbolic plan using the same strategy as Ye et al. (2023). Specifically, we use a few-shot prompt \\(\\mathcal{I}_{\\mathrm{planning}}^m\\) to generate a formal specification \\(S_{\\mathrm{plan}}\\) that should be executable by a symbolic solver. In the same prompt LMs are asked to solve their generated specification \\(S_{\\mathrm{plan}}\\) and derive the final answer \\(\\tilde{\\mathbf{y}} \\sim p(\\mathbf{y} \\mid \\mathcal{I}_{\\mathrm{da}}(S_{\\mathrm{plan}}))\\), either directly giving the answer after generating the specification (\\(Plan + Direct Solver\\)) or providing step-by-step explanations and tracking of intermediate steps for the derivation (\\(Plan + CoT Solver\\)). Particularly, \\(S_{\\mathrm{plan}}\\) is a Python program for math datasets, and is a set of first-order logic specifications for logical reasoning datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.074, + 0.825, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.341, + 0.825, + 0.397 + ], + "angle": 0, + "content": "Figure 6: Performance of prompt variants that separate planning and execution for math and logical reasoning datasets. Despite outperforming direct answer for solving a formal plan and deriving the final answer, CoT is still limited in performing symbolic computations: there is a large performance boost from Plan + Tool Solver over CoT and Plan + CoT Solver on average across all models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.425, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Setting 5: Plan + Tool Solver: We then evaluate how effective CoT can be at performing symbolic computations compared with external symbolic solvers. Following prior work on augmenting LMs with tools for math and logic questions (Ye et al., 2023; Pan et al., 2023; Gao et al., 2023; Chen et al., 2023), we generate \\( S_{\\mathrm{plan}} \\) the same way as in CoT Solver, but now feed in the plan into a symbolic solver (Python interpreter or a SMT Solver), such that \\( \\hat{a} = \\operatorname{solve}(S_{\\mathrm{plan}}) \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.502, + 0.825, + 0.559 + ], + "angle": 0, + "content": "Evaluation Setup: We compare the performance of each setting on math (GSM8K) and logical reasoning (ContextHub and FOLIO) datasets. We follow Gao et al. (2023) to include GSM8K-Hard, a minimally modified version that replaces numbers of GSM8K with larger numbers, to account for the possibility of recent LLMs overfitting GSM8K by data contamination (Zhang et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.565, + 0.827, + 0.664 + ], + "angle": 0, + "content": "For Plan + Direct solver and Plan + CoT solver, we use the few-shot prompts from Ye et al. (2023). For Plan + Tool solver, we use state-of-the-art tool-augmented prompting methods. Particularly, for GSM8K, we use Program-aided Language Model (Gao et al., 2023, PAL) that executes the LM-generated plan with a Python interpreter. For logical reasoning datasets, we use Satisfiability-Aided Language Model (Ye et al., 2023, SatLM) that uses automated theorem prover Z3 (De Moura & Bjørner, 2008) to solve the generated specifications. If the generated plan cannot be parsed by the tool, we use random guessing when the question is multiple choice, and mark it incorrect otherwise." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.681, + 0.375, + 0.695 + ], + "angle": 0, + "content": "5.2 EVALUATION RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.707, + 0.825, + 0.737 + ], + "angle": 0, + "content": "Figure 6 shows the results across a representative selection of models. Detailed numerical results, including the unparseable rates of model-generated plans, can be found in Appendix H." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.742, + 0.825, + 0.813 + ], + "angle": 0, + "content": "When comparing direct answer with Plan + Direct solver and Plan + CoT solver, we note that for many datasets and models, only having a plan does not account for most of the performance gain. Compared with direct answer, CoT or Plan + CoT solver is needed for strong performance. Tracking the execution with one of these methods gives the strongest accuracy benefit, especially for math-heavy datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.819, + 0.825, + 0.862 + ], + "angle": 0, + "content": "Despite their strength over direct answer and Plan + Direct solver, CoT and Plan + CoT solver are dominated by Plan + Tool solver in most settings. LLMs are limited by their ability to execute and track steps compared with symbolic solvers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.826, + 0.926 + ], + "angle": 0, + "content": "We argue that these results provide an explanation of why CoT helps on symbolic tasks. While all tasks could feasibly benefit from a detailed description of how to solve each individual question (e.g., a plan in the context of this section), CoT only outperforms direct answer when these steps require a substantial amount of tracing and computation. In these settings, we can see clear performance" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "benefit from using symbolic solvers; CoT appears to be a poor (but universal) approximation to such solvers. When possible, LLMs should be paired with symbolic solvers at inference time when solving symbolic tasks to achieve consistently better performance over direct answer and CoT." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.166, + 0.498, + 0.182 + ], + "angle": 0, + "content": "6 DISCUSSION AND RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.197, + 0.827, + 0.421 + ], + "angle": 0, + "content": "Where is CoT helping and why? Our results showing CoT improvement for math and logic aligns well with early work on CoT for LLMs such as Scratchpads (Nye et al., 2022). As CoT gained popularity, its application has broadened to tasks that canonically do not require multiple steps. It can often yield small improvements over direct answering. We believe this led to the current prevailing sentiment that deliberation should improve performance on any task requiring some type of reasoning (our original claim from Section 2). However, our results show a clear separation between performance on non-symbolic and symbolic tasks. If, in theory, any question could benefit from deliberation, why is CoT only benefiting the questions that can be solved through symbolic manipulation? Our results from Section 5 suggest that the primary benefit of CoT comes in the ability to execute symbolic steps and track their output. Not all tasks have this feature: for example, questions from CommonsenseQA can hardly be translated into formally grounded and executable solution plans. Datasets like StrategyQA may feature multiple steps of reasoning, but executing those steps is not complex, so the benefits of CoT are small. It is unclear whether explicitly instilling models with particular modes of deliberation, like process of elimination for multiple choice questions, might make them more effective for non-symbolic tasks, or whether there's a fundamental limitation imposed by their pre-training data. We leave this distinction for future work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.827, + 0.56 + ], + "angle": 0, + "content": "Can we improve CoT further? Our work treats chain-of-thought variants that explicitly don't involve multiple inferences. There is evidence that using additional calls to LLMs can help (Du et al., 2023; Yao et al., 2023; Besta et al., 2023; Chen et al., 2024), but these methods use significantly increased computation, and careful benchmarking sometimes reveals that naive techniques are as good as iterative ones (Olausson et al., 2024). However, past theoretical results show that Transformers are augmented in a fundamental way by CoT (Liu et al., 2023b; Merrill & Sabharwal, 2024); we believe this indicates the potential for improving CoT beyond prompt-based CoT. On the other hand, recent methods showing benefit from \"internalizing\" CoT (Deng et al., 2024) may indicate that explicit generation of intermediate tokens is not used to its full potential." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.573, + 0.825, + 0.645 + ], + "angle": 0, + "content": "Limitations One set of tasks we do not cover in our experiments (except for BiGGen Bench) is long-horizon planning. However, many works in the literature have already discussed the efficacy of planning with CoT. We also do not address the data contamination of some of these models on the datasets. We try to mitigate this by including multiple models, datasets (new and old), and our meta-analysis. For more discussion of planning and dataset contamination, see Appendix I." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.662, + 0.321, + 0.679 + ], + "angle": 0, + "content": "7 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.825, + 0.792 + ], + "angle": 0, + "content": "In this work, we characterize the performance of prompt-based CoT through a meta-analysis of the literature and experiments across different models, datasets, and prompts. We find that CoT predominantly helps on math and formal logic, largely due to its ability to trace the intermediate steps of a problem. But CoT rarely outperforms tool-augmented approaches for these same problems. We believe that CoT remains a powerful technique, but to give improvement across a wider range of NLP tasks, research should move beyond prompt-based CoT to new paradigms like search, interacting agents, or better fine-tuned models." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.81, + 0.334, + 0.826 + ], + "angle": 0, + "content": "REPRODUCIBILITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.825, + 0.926 + ], + "angle": 0, + "content": "For our experiments, we provide in-depth details of how we evaluated models on each dataset in Section 4.1 and Appendix C. Furthermore, we release all prompts for every dataset on Huggingface, including per model output and sampling parameters. For our meta-analysis of the literature, we describe our filtering criteria and process of annotating experiments into high-level categories in Section 3 and Appendix B. We also release the full list of papers in our meta-analysis together with extracted experimental comparisons and task category annotations." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.357, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.219 + ], + "angle": 0, + "content": "We acknowledge George Tsoukalas for providing insightful feedback throughout the project. We also thank Kaj Bostrom and Eunsol Choi for reviewing and providing feedback on drafts of the work. This work was partially supported by NSF CAREER Award IIS-2145280 (to Durrett), NSF CAREER Award 2339729 (to Mahowald), the NSF AI Institute for Foundations of Machine Learning (IFML), the Sloan Foundation via a Sloan Research Fellowship, and a grant from Open Philanthropy." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.238, + 0.288, + 0.253 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.261, + 0.825, + 0.484 + ], + "angle": 0, + "content": "Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Hassan Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Singh Behl, Alon Benhaim, Misha Bilenko, Johan Bjorck, Sébastien Bubeck, Martin Cai, Caio C'esar Teodoro Mendes, Weizhu Chen, Vishrav Chaudhary, Parul Chopra, Allison Del Giorno, Gustavo de Rosa, Matthew Dixon, Ronen Eldan, Dan Iter, Abhishek Goswami, Suriya Gunasekar, Emman Haider, Junheng Hao, Russell J. Hewett, Jamie Huynh, Mojan Javaheripi, Xin Jin, Piero Kauffmann, Nikos Karampatziakis, Dongwoo Kim, Mahmoud Khademi, Lev Kurilenko, James R. Lee, Yin Tat Lee, Yuanzhi Li, Chen Liang, Weishung Liu, Eric Lin, Zeqi Lin, Piyush Madan, Arindam Mitra, Hardik Modi, Anh Nguyen, Brandon Norick, Barun Patra, Daniel Perez-Becker, Thomas Portet, Reid Pryzant, Heyang Qin, Marko Radmilac, Corby Rosset, Sambudha Roy, Olli Saarikivi, Amin Saied, Adil Salim, Michael Santacroce, Shital Shah, Ning Shang, Hiteshi Sharma, Xianmin Song, Olatunjri Ruwase, Xin Wang, Rachel Ward, Guanhua Wang, Philipp Witte, Michael Wyatt, Can Xu, Jiahang Xu, Sonali Yadav, Fan Yang, Ziyi Yang, Donghan Yu, Cheng-Yuan Zhang, Cyril Zhang, Jianwen Zhang, Li Lyna Zhang, Yi Zhang, Yunan Zhang, and Xiren Zhou. Phi-3 technical report: A highly capable language model locally on your phone. ArXiv, abs/2404.14219, 2024. URL https://apisemantic scholar.org/CorpusID:269293048." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.492, + 0.822, + 0.535 + ], + "angle": 0, + "content": "Anthropic. The Claude 3 Model Family: Opus, Sonnet, Haiku. a. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.544, + 0.821, + 0.573 + ], + "angle": 0, + "content": "Anthropic. Claude 3.5 Sonnet Model Card Addendum. b. URL https://www-cdn.anthropic.com/fed9cc193a14b84131812372d8d5857f8f304c52/Model_Card_Claude_3_Addendum.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.581, + 0.825, + 0.65 + ], + "angle": 0, + "content": "Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefer. Graph of thoughts: Solving elaborate problems with large language models. In AAAI Conference on Artificial Intelligence, 2023. URL https://api-semanticscholar.org/CorpusID:261030303." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.66, + 0.825, + 0.702 + ], + "angle": 0, + "content": "Yonatan Bisk, Rowan Zellers, Ronan Le Bras, Jianfeng Gao, and Yejin Choi. Piqa: Reasoning about physical commonsense in natural language. In AAAI Conference on Artificial Intelligence, 2019. URL https://api-semanticscholar.org/CorpusID:208290939." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.71, + 0.825, + 0.795 + ], + "angle": 0, + "content": "Leonard Bongard, Lena Held, and Ivan Habernal. The legal argument reasoning task in civil procedure. In Nikolaos Aletras, Ilias Chalkidis, Leslie Barrett, Catalina Goantă, and Daniel Preotić-Pietro (eds.), Proceedings of the Natural Legal Language Processing Workshop 2022, pp. 194-207, Abu Dhabi, United Arab Emirates (Hybrid), December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.nllp-1.17. URL https://aclanthology.org/2022.nllp-1.17." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.803, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Chen Bowen, Rune Sætre, and Yusuke Miyao. A comprehensive evaluation of inductive reasoning capabilities and problem solving in large language models. In Yvette Graham and Matthew Purver (eds.), Findings of the Association for Computational Linguistics: EACL 2024, pp. 323-339, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024-findings-eacl.22." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh," + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.261, + 0.825, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.104, + 0.825, + 0.203 + ], + "angle": 0, + "content": "Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bcbd4967418bfb8ac142f64a-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.209, + 0.825, + 0.267 + ], + "angle": 0, + "content": "Hyungjoo Chae, Yeonghyeon Kim, Seungone Kim, Kai Tzu-iunn Ong, Beong-woo Kwak, Moohyeon Kim, Seonghwan Kim, Taeyoon Kwon, Jiwan Chung, Youngjae Yu, et al. Language Models as Compilers: Simulating Pseudocode Execution Improves Algorithmic Reasoning in Language Models. arXiv preprint arXiv:2404.02575, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.273, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Chih Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse LLMs, 2024. URL https://openreview.net/forum?id=Yo16nUVIJD." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.324, + 0.825, + 0.38 + ], + "angle": 0, + "content": "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.388, + 0.825, + 0.431 + ], + "angle": 0, + "content": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge. arXiv:1803.05457v1, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.438, + 0.825, + 0.495 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. ArXiv, abs/2110.14168, 2021. URL https://api(semanticscholar.org/CorpusID:239998651." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.502, + 0.825, + 0.544 + ], + "angle": 0, + "content": "Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The CommitmentBank: Investigating projection in naturally occurring discourse. In Proceedings of Sinn und Bedeutung 23, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.552, + 0.825, + 0.609 + ], + "angle": 0, + "content": "Leonardo De Moura and Nikolaj Björner. Z3: An efficient SMT solver. In Proceedings of the Theory and Practice of Software, 14th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, TACAS'08/ETAPS'08, pp. 337-340, Berlin, Heidelberg, 2008. Springer-Verlag. ISBN 3540787992." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.616, + 0.825, + 0.647 + ], + "angle": 0, + "content": "Yuntian Deng, Yejin Choi, and Stuart Shieber. From Explicit CoT to Implicit CoT: Learning to Internalize CoT Step by Step. arXiv preprint arXiv:2405.14838, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.653, + 0.825, + 0.695 + ], + "angle": 0, + "content": "Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. arXiv preprint arXiv:2305.14325, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.703, + 0.825, + 0.746 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The Llama 3 Herd of Models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.753, + 0.825, + 0.796 + ], + "angle": 0, + "content": "Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. AlpacaFarm: A Simulation Framework for Methods that Learn from Human Feedback, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.803, + 0.825, + 0.875 + ], + "angle": 0, + "content": "Nouha Dziri, Ximing Lu, Melanie Sclar, Xiang Lorraine Li, Liwei Jiang, Bill Yuchen Lin, Sean Welleck, Peter West, Chandra Bhagavatula, Ronan Le Bras, Jena D. Hwang, Soumya Sanyal, Xiang Ren, Allyson Ettinger, Zaid Harchaoui, and Yejin Choi. Faith and fate: Limits of transformers on compositionality. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Fkckkr3ya8." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=yf1icZHC-19." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.104, + 0.825, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. Pal: program-aided language models. In Proceedings of the 40th International Conference on Machine Learning, ICML'23. JMLR.org, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.158, + 0.826, + 0.215 + ], + "angle": 0, + "content": "Mor Geva, Daniel Khashabi, Elad Segal, Tushar Khot, Dan Roth, and Jonathan Berant. Did Aristotle use a laptop? A question answering benchmark with implicit reasoning strategies. Transactions of the Association for Computational Linguistics, 9:346-361, February 2021. ISSN 2307-387X. doi: 10.1162/tacl_a_00370." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.226, + 0.826, + 0.283 + ], + "angle": 0, + "content": "L. Guan, Yifan Zhou, Denis Liu, Yantian Zha, Heni Ben Amor, and Subbarao Kambhampati. \"Task Success\" is not Enough: Investigating the Use of Video-Language Models as Behavior Critics for Catching Undesirable Agent Behaviors. ArXiv, abs/2402.04210, 2024. URL https://api.sementicscholar.org/CorpusID:267500077." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.295, + 0.826, + 0.351 + ], + "angle": 0, + "content": "Atharva Gundawar, Mudit Verma, L. Guan, Karthik Valmeekam, Siddhant Bhambri, and Subbarao Kambhampati. Robust Planning with LLM-Modulo Framework: Case Study in Travel Planning. ArXiv, abs/2405.20625, 2024. URL https://api(semanticscholar.org/CorpusID:270199944." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.363, + 0.826, + 0.448 + ], + "angle": 0, + "content": "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Luke Benson, Lucy Sun, Ekaterina Zubova, Yujie Qiao, Matthew Burtell, David Peng, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Shafiq Joty, Alexander R. Fabbri, Wojciech Kryscinski, Xi Victoria Lin, Caiming Xiong, and Dragomir Radev. FOLIO: Natural Language Reasoning with First-Order Logic. arXiv preprint arXiv:2209.00840, 2022. URL https://arxiv.org/abs/2209.00840." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.459, + 0.826, + 0.543 + ], + "angle": 0, + "content": "Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.555, + 0.826, + 0.599 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. Proceedings of the International Conference on Learning Representations (ICLR), 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.609, + 0.826, + 0.652 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring Mathematical Problem Solving With the MATH Dataset. NeurIPS, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.663, + 0.826, + 0.693 + ], + "angle": 0, + "content": "Hanxu Hu, Hongyuan Lu, Huajian Zhang, Wai Lam, and Yue Zhang. Chain-of-symbol prompting elicits planning in large language models, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.704, + 0.826, + 0.761 + ], + "angle": 0, + "content": "Wenyue Hua, Kaijie Zhu, Lingyao Li, Lizhou Fan, Shuhang Lin, Mingyu Jin, Haochen Xue, Zelong Li, Jindong Wang, and Yongfeng Zhang. Disentangling Logic: The Role of Context in Large Language Model Reasoning Capabilities. ArXiv, abs/2406.02787, 2024. URL https://apisemantic scholar.org/CorpusID:270258104." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.772, + 0.826, + 0.815 + ], + "angle": 0, + "content": "Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. arXiv preprint arXiv:2201.07207, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.826, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Jinghan Jia, Abi Komma, Timothy Leffel, Xujun Peng, Ajay Nagesh, Tamer Soliman, Aram Galstyan, and Anoop Kumar. Leveraging LLMs for dialogue quality measurement. In Yi Yang, Aida Davani, Avi Sil, and Anoop Kumar (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 6: Industry Track), pp. 359-367, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-industry.30. URL https://aclanthology.org/2024.nacl-industry.30." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.175 + ], + "angle": 0, + "content": "Albert Qiaochu Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de Las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, L'elio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. ArXiv, abs/2310.06825, 2023. URL https://api-semanticscholar.org/CorpusID:263830494." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.183, + 0.828, + 0.283 + ], + "angle": 0, + "content": "Dongwei Jiang, Marcio Fonseca, and Shay B. Cohen. Leanreasoner: Boosting complex logical reasoning with lean. In Kevin Duh, Helena Gómez-Adorno, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 7497-7510. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.NAACL-LONG.416. URL https://doi.org/10.18653/v1/2024.naacl-long.416." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.29, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Brihi Joshi, Ziyi Liu, Sahana Ramnath, Aaron Chan, Zhewei Tong, Shaoliang Nie, Qifan Wang, Yejin Choi, and Xiang Ren. Are Machine Rationales (Not) Useful to Humans? Measuring and Improving Human Utility of Free-text Rationales. ArXiv, abs/2305.07095, 2023. URL https://api_semanticscholar.org/CorpusID:258676376." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.356, + 0.825, + 0.399 + ], + "angle": 0, + "content": "Subbarao Kambhampati. Can large language models reason and plan? Annals of the New York Academy of Sciences, 1534:15 - 18, 2024. URL https://api-semanticscholar.org/CorpusID:268249961." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.407, + 0.825, + 0.506 + ], + "angle": 0, + "content": "Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp (eds.), Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 22895-22907. PMLR, 21-27 Jul 2024a. URL https://proceedings.mlr.org/press/v235/kambhampati24a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.515, + 0.825, + 0.572 + ], + "angle": 0, + "content": "Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In *Forty-first International Conference on Machine Learning*, 2024b. URL https://openreview.net/forum?id=Th8JPEmH4z." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.581, + 0.825, + 0.61 + ], + "angle": 0, + "content": "Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in llms. arXiv preprint arXiv:2404.11041, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.619, + 0.825, + 0.661 + ], + "angle": 0, + "content": "Marzena Karpinska, Katherine Thai, Kyle Lo, Tanya Goyal, and Mohit Iyyer. One thousand and one pairs: A \"novel\" challenge for long-context language models. ArXiv, abs/2406.16264, 2024. URL https://api-semanticscholar.org/CorpusID:270703648." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.671, + 0.825, + 0.728 + ], + "angle": 0, + "content": "Tushar Khot, H. Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The International Conference on Learning Representations, volume abs/2210.02406, 2023. URL https://api_semanticscholar.org/CorpusID:252715485." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.736, + 0.825, + 0.793 + ], + "angle": 0, + "content": "Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The BiGGen Bench: A Principled Benchmark for Fine-grained Evaluation of Language Models with Language Models. arXiv preprint arXiv:2406.05761, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.802, + 0.825, + 0.858 + ], + "angle": 0, + "content": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. In Proceedings of the 36th International Conference on Neural Information Processing Systems, Red Hook, NY, USA, 2022. Curran Associates Inc. ISBN 9781713871088." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.104, + 0.826, + 0.188 + ], + "angle": 0, + "content": "Brenden M. Lake and Marco Baroni. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 2879-2888. PMLR, 2018. URL http://proceedings.mlr.press/v80/lake18a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.2, + 0.825, + 0.243 + ], + "angle": 0, + "content": "Tamera Lanham, Anna Chen, Ansh Radhakrishnan, Benoit Steiner, Carson Denison, Danny Hernandez, Dustin Li, Esin Durmus, Evan Hubinger, Jackson Kernion, et al. Measuring faithfulness in chain-of-thought reasoning. arXiv preprint arXiv:2307.13702, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.254, + 0.825, + 0.352 + ], + "angle": 0, + "content": "Fangyu Lei, Qian Liu, Yiming Huang, Shizhu He, Jun Zhao, and Kang Liu. S3Eval: A synthetic, scalable, systematic evaluation suite for large language model. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1259-1286, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.69. URL https://aclanthology.org/2024.naacl-long.69." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.363, + 0.825, + 0.461 + ], + "angle": 0, + "content": "Tianwen Li, Zhexiong Liu, Lindsay Matsumura, Elaine Wang, Diane Litman, and Richard Correnti. Using large language models to assess young students' writing revisions. In Ekaterina Kochmar, Marie Bexte, Jill Burstein, Andrea Horbach, Ronja Laermann-Quante, Anaïs Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 365–380, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.30." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.473, + 0.825, + 0.53 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=v8L0pN6E0i." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.541, + 0.825, + 0.584 + ], + "angle": 0, + "content": "B. Liu, Yuqian Jiang, Xiaohan Zhang, Qian Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. Llm+p: Empowering large language models with optimal planning proficiency. ArXiv, abs/2304.11477, 2023a. URL https://api-semanticscholar.org/CorpusID:258298051." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.595, + 0.825, + 0.639 + ], + "angle": 0, + "content": "Bingbin Liu, Jordan T. Ash, Surbhi Goel, Akshay Krishnamurthy, and Cyril Zhang. Transformers learn shortcuts to automata. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=De4FYqjFueZ." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.65, + 0.825, + 0.748 + ], + "angle": 0, + "content": "Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bitstream/11332b6b6cf4485b84afadb1352d3a9a-AbsAbstract-Conference.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.759, + 0.825, + 0.817 + ], + "angle": 0, + "content": "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At Which Training Stage Does Code Data Help LLMs Reasoning? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=KIPJKST4gw." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.825, + 0.871 + ], + "angle": 0, + "content": "Aman Madaan and Amir Yazdanbakhsh. Text and patterns: For effective chain of thought, it takes two to tango. ArXiv, abs/2209.07686, 2022. URL https://api-semanticscholar.org/ CorpusID:252355328." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The International Conference on Learning Representations*, volume abs/2310.07923, 2024. URL https://api-semanticscholar.org/CorpusID:263909434." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.162 + ], + "angle": 0, + "content": "Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2022. URL https://openreview.net/forum?id=iedYJm92o0a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.826, + 0.215 + ], + "angle": 0, + "content": "Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=y0GJXRungR." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.222, + 0.822, + 0.253 + ], + "angle": 0, + "content": "OpenAI. GPT-4 Technical Report. ArXiv, abs/2303.08774, 2023. URL https://apisemantic scholar.org/CorpusID:257532815." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.263, + 0.826, + 0.348 + ], + "angle": 0, + "content": "Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023.findings-emnlp.248." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.357, + 0.826, + 0.444 + ], + "angle": 0, + "content": "Xiangyu Peng, Siyan Li, Sarah Wiegrefe, and Mark Riedl. Inferring the reader: Guiding automated story generation with commonsense reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 7008-7029, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.520. URL https://aclanthology.org/2022-findings-emnlp.520." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.452, + 0.825, + 0.484 + ], + "angle": 0, + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual Reasoning Makes Smaller LLMs Stronger Problem-Solvers. arXiv preprint arXiv:2408.06195, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.492, + 0.826, + 0.577 + ], + "angle": 0, + "content": "Xin Quan, Marco Valentino, Louise Dennis, and Andre Freitas. Enhancing ethical explanations of large language models through iterative symbolic refinement. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1-22, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.1." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.586, + 0.825, + 0.631 + ], + "angle": 0, + "content": "Rachel Reid and et. al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. ArXiv, abs/2403.05530, 2024. URL https://api(semanticscholar.org/ CorpusID:268297180." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.64, + 0.826, + 0.684 + ], + "angle": 0, + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A Graduate-Level Google-Proof Q&A Benchmark, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.693, + 0.825, + 0.724 + ], + "angle": 0, + "content": "Gemma Team Morgane Riviere and et. al. Gemma 2: Improving open language models at a practical size. 2024. URL https://api_semanticscholar.org/CorpusID:270843326." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.733, + 0.826, + 0.777 + ], + "angle": 0, + "content": "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. WinoGrande: an adversarial winograd schema challenge at scale. Commun. ACM, 64(9):99-106, aug 2021. ISSN 0001-0782. doi: 10.1145/3474381. URL https://doi.org/10.1145/3474381." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.786, + 0.826, + 0.872 + ], + "angle": 0, + "content": "Maarten Sap, Hannah Rashkin, Derek Chen, Ronan Le Bras, and Yejin Choi. Social IQa: Commonsense reasoning about social interactions. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 4463-4473, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1454. URL https://aclanthology.org/D19-1454." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=jenyYQzue1." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.826, + 0.214 + ], + "angle": 0, + "content": "Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.226, + 0.826, + 0.31 + ], + "angle": 0, + "content": "Maja Stahl, Leon Biermann, Andreas Nehring, and Henning Wachsmuth. Exploring LLM prompting strategies for joint essay scoring and feedback generation. In Ekaterina Kochmar, Marie Bexe, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Anais Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 283–298, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.23." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.321, + 0.826, + 0.364 + ], + "angle": 0, + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. ArXiv, abs/2402.08115, 2024a. URL https://api_semanticscholar.org/CorpusID:267637077." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.375, + 0.826, + 0.417 + ], + "angle": 0, + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. 2024b. URL https://api-semanticscholar.org/CorpusID: 269626390." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.429, + 0.826, + 0.513 + ], + "angle": 0, + "content": "Simeng Sun, Yang Liu, Shuohang Wang, Dan Iter, Chenguang Zhu, and Mohit Iyyer. PEARL: Prompting large language models to plan and execute actions over long documents. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 469-486, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.29." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.524, + 0.826, + 0.622 + ], + "angle": 0, + "content": "Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.634, + 0.826, + 0.718 + ], + "angle": 0, + "content": "Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. *CommonsenseQA: A question answering challenge targeting commonsense knowledge.* In *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies*, Volume 1 (Long and Short Papers), pp. 4149-4158, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1421. URL https://aclanthology.org/N19-1421." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.73, + 0.826, + 0.924 + ], + "angle": 0, + "content": "Hugo Touvron, Louis Martin, Kevin R. Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Daniel M. Bikel, Lukas Blecher, Cristian Cantón Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony S. Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel M. Kloumann, A. V. Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, R. Subramanian, Xia Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zhengxu Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. ArXiv, abs/2307.09288, 2023. URL https://api(semanticscholar.org/CorpusID: 259950998." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. MuSiQue: Multi-hop questions via single-hop question composition. Transactions of the Association for Computational Linguistics, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.153, + 0.826, + 0.21 + ], + "angle": 0, + "content": "Karthik Valmeekam, Matthew Marquez, Sarath Sreedharan, and Subbarao Kambhampati. On the planning abilities of large language models - a critical investigation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=X6dEqXIsEW." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.218, + 0.826, + 0.275 + ], + "angle": 0, + "content": "Karthik Valmeekam, Matthew Marquez, Alberto Olmo, Sarath Sreedharan, and Subbarao Kambhampati. PlanBench: An extensible benchmark for evaluating large language models on planning and reasoning about change. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024. Curran Associates Inc." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.281, + 0.826, + 0.338 + ], + "angle": 0, + "content": "Mudit Verma, Siddhant Bhambri, and Subbarao Kambhampati. Theory of mind abilities of large language models in human-robot interaction: An illusion? Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction, 2024. URL https://apisemantic scholar.org/CorpusID:266902529." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.345, + 0.826, + 0.43 + ], + "angle": 0, + "content": "Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.436, + 0.826, + 0.493 + ], + "angle": 0, + "content": "Lei Wang, Wanyu Xu, Yihuai Lan, Zhiqiang Hu, Yunshi Lan, Roy Ka-Wei Lee, and Ee-Peng Lim. Plan-and-solve prompting: Improving zero-shot chain-of-thought reasoning by large language models. In Annual Meeting of the Association for Computational Linguistics, 2023b. URL https://apisemantic scholar.org/CorpusID:258558102." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.499, + 0.826, + 0.557 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023c. URL https://openreview.net/forum?id=1PL1NIMMrw." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.563, + 0.826, + 0.621 + ], + "angle": 0, + "content": "Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.627, + 0.826, + 0.671 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.677, + 0.826, + 0.734 + ], + "angle": 0, + "content": "Li Siang Wong, Gabriel Grand, Alexander K. Lew, Noah D. Goodman, Vikash K. Mansinghka, Jacob Andreas, and Joshua B. Tenenbaum. From word models to world models: Translating from natural language to the probabilistic language of thought. ArXiv, abs/2306.12672, 2023. URL https://api_semanticscholar.org/CorpusID:259224900." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.741, + 0.826, + 0.784 + ], + "angle": 0, + "content": "Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. TravelPlanner: A Benchmark for Real-World Planning with Language Agents. ArXiv, abs/2402.01622, 2024. URL https://api_semanticscholar.org/CorpusID:267406800." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.79, + 0.826, + 0.848 + ], + "angle": 0, + "content": "Miao Xiong, Zhiyuan Hu, Xinyang Lu, Yifei Li, Jie Fu, Junxian He, and Bryan Hooi. Can LLMs Express Their Uncertainty? An Empirical Evaluation of Confidence Elicitation in LLMs. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=gjeQKFxFpZ." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.854, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Jundong Xu, Hao Fei, Liangming Pan, Qian Liu, Mong-Li Lee, and Wynne Hsu. Faithful logical reasoning via symbolic chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13326-13365, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.acl-long.720." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Xiaohan Xu, Chongyang Tao, Tao Shen, Can Xu, Hongbo Xu, Guodong Long, and Jian-Guang Lou. Re-reading improves reasoning in language models, 2024b. URL https://openreview.net/forum?id=3jXCF5dNpC." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=Bb4VGOWELI." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.825, + 0.237 + ], + "angle": 0, + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of Thoughts: Deliberate problem solving with large language models, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.244, + 0.825, + 0.274 + ], + "angle": 0, + "content": "Xi Ye, Qiaochu Chen, Isil Dillig, and Greg Durrett. Satisfiability-aided language models using declarative prompting. In Advances in Neural Information Processing Systems, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.825, + 0.339 + ], + "angle": 0, + "content": "Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. ArXiv, abs/2405.00332, 2024. URL https://api-semanticscholar.org/CorpusID:269484687." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.347, + 0.825, + 0.405 + ], + "angle": 0, + "content": "Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, Heng-Tze Cheng, Ed H. Chi, Quoc V Le, and Denny Zhou. Take a step back: Evoking reasoning via abstraction in large language models. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3bq3jsvcQ1." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.413, + 0.825, + 0.483 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging LLM-as-a-judge with MT-bench and Chatbot Arena. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024b. Curran Associates Inc." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.492, + 0.825, + 0.535 + ], + "angle": 0, + "content": "Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.544, + 0.825, + 0.629 + ], + "angle": 0, + "content": "Ben Zhou, Kyle Richardson, Xiaodong Yu, and Dan Roth. Learning to decompose: Hypothetical question decomposition based on comparable texts. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2223-2235, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.142. URL https://aclanthology.org/2022.emnlp-main.142." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.637, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Ben Zhou, Hongming Zhang, Sihao Chen, Dian Yu, Hongwei Wang, Baolin Peng, Dan Roth, and Dong Yu. Conceptual and unbiased reasoning in language models. ArXiv, abs/2404.00205, 2024. URL https://api-semanticscholar.org/CorpusID:268820105." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.688, + 0.825, + 0.746 + ], + "angle": 0, + "content": "Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgtM." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.754, + 0.825, + 0.811 + ], + "angle": 0, + "content": "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large Language Models are Human-Level Prompt Engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.811 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.768, + 0.119 + ], + "angle": 0, + "content": "A META-ANALYSIS EXPANDED DETAILS ON CRITERIA AND PROCESS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.827, + 0.234 + ], + "angle": 0, + "content": "Automatic Selection and Paper Filtering We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). We filtered all 4,642 papers (2,259 from ICLR 2024 and 2,382 from the two ACL-affiliated conferences) for those with at least two occurrences of \"CoT\", \"chain-of-thought\", or \"chain of thought\", resulting in 516 papers. There are conceivably papers using CoT called by another name (e.g., Scratchpads), but we believe these 516 give a representative sample appropriate for systematic analysis." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.25, + 0.828, + 0.348 + ], + "angle": 0, + "content": "Manual Paper Filtering and Results Extraction We then filter down to papers that perform a comparison of CoT prompting vs. direct prompting, whether or not this is core to the paper's research question. We manually filtered the 516 papers in question and extracted the key results from those that remained. We excluded multimodal models, CoT-fine-tuned models, any experiments where the \"CoT\" method involves multiple forward passes (e.g., self-consistency (Wang et al., 2023c) and tree-of-thought (Yao et al., 2023)),5 and systems that augment LLMs with external tools (discussed more in Section 5)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.355, + 0.827, + 0.452 + ], + "angle": 0, + "content": "For each paper passing through these criteria, we manually extracted the results from key tables comparing CoT and direct answer prompts. We only include results where the CoT and direct prompts are run on the same model and same dataset while being on a scale of 0 to 100 (excluding Likert scale evaluations, for example) for a more direct comparison. When papers include various CoT or direct answer prompts (including zero/few-shot variants), we always take the best-performing prompt for both. We focus on key test results where applicable, excluding dev sets if they are reported alongside test and also excluding numbers from ablations or nonstandard subsets of datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.827, + 0.502 + ], + "angle": 0, + "content": "This resulted in a total of 1,218 experimental comparisons across 110 papers (35 from ICLR and 75 from NAACL and EACL) covering 264 datasets. Details and more information will be available in our GitHub Repo." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.518, + 0.827, + 0.603 + ], + "angle": 0, + "content": "Categorization Given the large number of tasks and datasets being compared, we grouped each task into a set of 14 categories. These categories were determined based on the description (and possibly examples) of the task, not taking into account system performance. These categories abstract over traditional NLP task classifications (e.g., NER, reading comprehension) and take into account both the task format and the kinds of reasoning involved. Definitions for several categories are shown in Table 1 and the full description is given in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.625, + 0.486, + 0.641 + ], + "angle": 0, + "content": "B QUANTITATIVE META-ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.807, + 0.672 + ], + "angle": 0, + "content": "See the full list of categories and their descriptions that we used for the meta-analysis in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.693, + 0.519, + 0.71 + ], + "angle": 0, + "content": "C EXPANDED EXPERIMENTAL DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.726, + 0.827, + 0.838 + ], + "angle": 0, + "content": "A full list of the datasets can be found in Table 4. Each model can be seen in Table 5. We use one answer parser for all datasets of the same answer response format (one for multiple choice, short answer, etc.); however, some datasets require special handling and have edge cases that we handle separately from the rest of the datasets. Similarly, for each model, we use the exact same prompt across them, except when closed source models require different prompts because they do not allow for partial completions (i.e., when we cannot put \"let's think step by step\" to warm-start the assistant's response). All prompts are given in our Huggingface repo, including the model output and what our answer parser extracted as the answer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.844, + 0.826, + 0.873 + ], + "angle": 0, + "content": "Experiments were conducted either by invoking APIs or by running open-source models on our own hardware, mostly on a machine with 8 A40s or 4 Quadro RTX 8000s. All locally hosted models were" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.926 + ], + "angle": 0, + "content": "5These systems use more compute than direct answer, and there is not a clear comparison to be made here. Moreover, our anecdotal coverage of these methods shows that they are most used for math, coding, and logic settings, for which we already have high representation among reported CoT methods." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.289, + 0.101, + 0.71, + 0.117 + ], + "angle": 0, + "content": "Table 2: Categories and their descriptions for the meta-analysis." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.127, + 0.825, + 0.743 + ], + "angle": 0, + "content": "
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Commonsense reasoningDatasets designed to test for commonsense knowledge and reasoning, i.e., world knowledge that most people would have, rather than specialized expert-level knowledge in a discipline acquired after years of study.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Spatial and temporal rea-soningDatasets designed to test for an understanding of space and spatial relations (e.g., navigation) or reasoning involving time and sequences over time.
Multi-hop QAQuestions involving the composition of multiple steps of reasoning in order to arrive at an answer, such as “What is the capital of the country whose scientist discovered penicillin?”
Context-aware QATasks such as closed-book QA and reading comprehension involving rea-soning about a given text in context. The context is often a short passage, but could also take the form of a knowledge graph (KBQA) or a table. This category also includes information extraction tasks, such as NER or relation extraction.
EntailmentTasks involving establishing the inferential relation between two texts, prototypically NLI, but also including fact verification.
Text classificationTasks involving the classification of a text into a small set of categories, such a topic or sentiment classification, but also involving tasks such as hate speech detection and misinformation detection.
GenerationTasks involving text generation, including machine translation, dialogue, question generation, as well as code generation. Tasks such as SQL execution (Lei et al., 2024) or systematic transformations of data (e.g., SCAN (Lake & Baroni, 2018)) are excluded because they can be solved by executing a program.
Meta-linguisticTasks probing for models' knowledge of linguistics, such as identifying the main subject of a sentence or solving linguistic puzzles.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
OtherTasks which did not fit in any of the other categories, such as evaluating AI safety, eliciting models' verbalized confidence, or melody retrieval.
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.825, + 0.8 + ], + "angle": 0, + "content": "hosted with vLLM. All parameters given to the vLLM API endpoint are given in the Huggingface repo as well." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.822, + 0.478, + 0.837 + ], + "angle": 0, + "content": "OTHER COT PROMPT VARIANTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.855, + 0.615, + 0.87 + ], + "angle": 0, + "content": "D.1 TESTING PERFORMANCE VOLATILITY ACROSS PROMPTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To test the impact of prompt choice on performance, we performed our zero-shot experiment on Llama 3.1 8B with 7 different datasets and 4 different zero-shot CoT prompting strategies common in the literature (Kojima et al., 2022; Wang et al., 2023b; Zhou et al., 2023b; Yang et al., 2024)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.14, + 0.825, + 0.21 + ], + "angle": 0, + "content": "Table 3: Models, datasets, and prompting strategies used in our experiments. Models marked with \\(\\dagger\\) are run with a 4k context size window. Note that Gemma has a larger than 4k context size window, but VLLM only supports up to a 4k context size window for it. Models marked with * indicate closed-source models that cannot handle prefixed assistant messages. Datasets marked with \\(\\triangle\\) do not have a few-shot setting." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.222, + 0.824, + 0.436 + ], + "angle": 0, + "content": "
ModelsLlama 2 7B Chat† (Touvron et al., 2023), Mistral 7B Instruct v0.3 (Jiang et al., 2023), Llama 3.1 8B Instruct (Dubey et al., 2024), Llama 3.1 70B Instruct, Gemma 2 9B It† (Riviere & et. al., 2024), Phi-3 Small 8k Instruct (Abdin et al., 2024), gpt-4o-mini-2024-07-18*, gpt-4o-2024-08-06*, Gemini 1.5 Flash* (Reid & et. al., 2024), Gemini 1.5 Pro* (Reid & et. al., 2024), claude-3-haiku-20240307* (Anthropic, a), claude-3-5-sonnet-20240620* (Anthropic, b)
DatasetsCommonsenseQA (Talmor et al., 2019), StrategyQA (Geva et al., 2021), SiQA△ Sap et al. (2019), PiQA△ (Bisk et al., 2019), Winogrande△ (Sakaguchi et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024), ContextHub (Levels 1 and 2 only) (Hua et al., 2024), ARC△ (Clark et al., 2018), AGIEval LSAT (Zhong et al., 2023), MMLU (Hendrycks et al., 2021a), MMLU Pro (Wang et al., 2024), MATH (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), GSM8K-hard (Gao et al., 2023), FOLIO (Han et al., 2022), MuSiQue△ (Trivedi et al., 2022), Big-Bench Hard (Suzgun et al., 2023; Srivastava et al., 2022), BiGGen Bench (Kim et al., 2024)
Promptszero-shot direct answer, zero-shot CoT (Kojima et al., 2022), few-shot direct answer (Brown et al., 2020), few-shot CoT (Wei et al., 2022)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.521, + 0.825, + 0.591 + ], + "angle": 0, + "content": "Table 4: List of datasets used in our experiments. We categorize each dataset into one of five categories based on the type of reasoning required: Commonsense, Knowledge, Soft Reasoning, Symbolic, or Mathematical. We also report answer formats. When we use few-shot prompts, we mark how many examples those prompts contain. BiGGen Bench has many categories of questions that explicitly ask for CoTs in the response; we ignore those categories for our evaluation." + }, + { + "type": "table", + "bbox": [ + 0.262, + 0.603, + 0.738, + 0.884 + ], + "angle": 0, + "content": "
DatasetTypeAnswer Formatm-Shots
CommonsenseQACommonsenseMultiple choice7
StrategyQACommonsenseTrue or False6
SIQACommonsenseMultiple choice0
PIQACommonsenseMultiple choice0
WinograndeCommonsenseMultiple choice0
Arc EasyKnowledgeMultiple choice0
Arc ChallengeKnowledgeMultiple choice0
AGIEval LSATSoft ReasoningMultiple choice3
BiGGen-BenchSoft ReasoningFree response0
MMLUKnowledgeMultiple Choice5
MMLU ProKnowledgeMultiple Choice5
BigBench-HardSymbolicMultiple Choice0
MuSRSoft ReasoningMultiple Choice1
GPQAMathematicalMultiple Choice3
MuSiQueSoft ReasoningShort Answer0
GSM8KMathematicalShort Answer8
GSM8K-HardMathematicalShort Answer8
FOLIOSymbolicTrue, False, or Unknown4
ContextHubSymbolicTrue, False, or Neither3
MATHMathematicalShort Answer4
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.827, + 0.158 + ], + "angle": 0, + "content": "Table 5: List of models for our experiments. We focus on contemporary instruction-tuned models; although pretrained and smaller language models could be used, they are not the focus of our study. Prompts and outputs used for each model are available on Huggingface. * Note that Gemma can accept more than 4k input tokens, but we are restricted to 4k by vLLM." + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.169, + 0.694, + 0.364 + ], + "angle": 0, + "content": "
ModelContext LengthIs Open Source
Llama 2 7B Chat4kTrue
Mistral 7B Instruct v0.38kTrue
Llama 3.1 8B Instruct128kTrue
Llama 3.1 70B Instruct128kTrue
Gemma 2 9B It4k*True
Qwen 7B Instruct131kTrue
Qwen 72B Instruct131kTrue
GPT4o-Mini128kFalse
GPT4o128kFalse
Gemini 1.5 Pro128kFalse
Gemini Flash1mFalse
Claude 3.5 Sonnet200kFalse
Claude 3 Haiku200kFalse
" + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.381, + 0.81, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.671, + 0.828, + 0.743 + ], + "angle": 0, + "content": "Figure 7: Performance of multiple prompts commonly used to elicit reasoning through CoT in the zero shot setting. Each prompt starts the assistant completion with a different phrase meant to elicit reasoning. All results are from using Llama 3.1 8B Instruct. For the Kojima variant, we explicitly place \"Let's think step by step.\" in the assistant message. There is very little variation between the CoT prompts on average." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.77, + 0.825, + 0.814 + ], + "angle": 0, + "content": "Figure 7 shows variation due to prompts is typically small and no prompt gives a consistent gain over the other. For our experiments, this suggests that different prompts have small effects on the overall outcome on average." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.835, + 0.423, + 0.851 + ], + "angle": 0, + "content": "E FEW-SHOT EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Compared to a zero-shot prompt, a few-shot prompt additionally contains demonstrations of the relevant reasoning mode on different problem instances \\(\\{(v(\\mathbf{q}_i),\\mathbf{y}_i^*)\\}\\). Few-shot prompts for direct answer simply encode the answer \\(a_{i}\\) as \\(\\mathbf{y}_i^*\\), whereas few-shot prompts for chain-of-thought include a reasoning trace ending in the correct answer. Now we can define the \\(m\\)-shot direct prompt as" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.105, + 0.818, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.413, + 0.825, + 0.471 + ], + "angle": 0, + "content": "Figure 8: Average performance improvement from using CoT across different models in the zero-shot and few-shot settings. Each bar represents how much CoT improves the accuracy for that specific setting. In general, CoT in the few-shot setting does not change the qualitative performance of CoT versus zero-shot, though it can change the magnitude for symbolic datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.496, + 0.825, + 0.528 + ], + "angle": 0, + "content": "\\(\\mathcal{I}_{\\mathrm{da}}^{m}(\\mathbf{q}) = v_{\\mathrm{da}}(\\mathbf{q}_{1})\\mathbf{a}_{1}v_{\\mathrm{da}}(\\mathbf{q}_{2})\\mathbf{a}_{2}\\dots v_{\\mathrm{da}}(\\mathbf{q}_{m})\\mathbf{a}_{m}v_{\\mathrm{da}}(\\mathbf{q})\\) and the \\(m\\)-shot cot prompt as \\(\\mathcal{I}_{\\mathrm{cot}}^{m}(\\mathbf{q}) = v_{\\mathrm{cot}}(\\mathbf{q}_{1})\\mathbf{y}_{1}^{*}v_{\\mathrm{cot}}(\\mathbf{q}_{2})\\mathbf{y}_{2}^{*}\\dots v_{\\mathrm{cot}}(\\mathbf{q}_{m})\\mathbf{y}_{m}^{*}v_{\\mathrm{cot}}(\\mathbf{q})\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.532, + 0.827, + 0.618 + ], + "angle": 0, + "content": "Figure 8 shows the difference between few-shot prompting and the zero-shot setting discussed in the main text of the paper. We see that using CoT in the few-shot setting largely does not change the datasets that benefit from it. Only one dataset, MuSR Team Allocation, starts to improve with few-shot; however, we believe this to be an exception because the final step to derive the answer is complex in the prompt and clearer in the examples. The magnitude of improvement over direct answer prompting when using CoT is also similar to the zero-shot setting." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.636, + 0.66, + 0.652 + ], + "angle": 0, + "content": "F EXPANDED COT VS DIRECT EXPERIMENTAL RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.668, + 0.407, + 0.683 + ], + "angle": 0, + "content": "F.1 FULL ZERO-SHOT RESULTS" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.102, + 0.784, + 0.117 + ], + "angle": 0, + "content": "Table 6: Direct answer and CoT accuracies for each reasoning category across models." + }, + { + "type": "table", + "bbox": [ + 0.178, + 0.128, + 0.821, + 0.349 + ], + "angle": 0, + "content": "
ModelCommonsenseKnowledgeMathematicalSymbolicSoft
DA %CoT %DA %CoT %DA %CoT %DA %CoT %DA %CoT %
Claude-3 Haiku74.377.273.076.118.148.238.648.755.956.6
Claude-3.5 Sonnet84.385.883.888.838.759.053.267.167.675.7
GPT-4o Mini81.883.273.683.122.959.748.160.961.163.5
Gemini 1.5 Flash80.376.878.281.027.255.747.059.760.662.6
Gemini 1.5 Pro80.478.380.983.835.458.552.962.664.167.8
Gemma 2 9b75.076.174.976.918.550.546.755.858.260.5
Gpt-4o87.387.782.988.636.563.355.768.365.974.0
Meta-Llama 2 7b51.450.944.146.69.317.222.435.437.237.6
Meta-Llama 3.1 70b84.284.782.485.624.954.949.060.065.769.5
Meta-Llama 3.1 8b72.973.470.174.116.047.834.851.655.056.2
Mistral 7b58.361.862.064.510.928.941.845.048.649.7
Phi-3 Small 8k70.872.576.179.717.847.151.258.757.956.4
Qwen 2 72b82.984.978.684.623.958.548.258.764.265.1
Qwen 2 7b64.066.165.271.315.953.543.852.354.449.4
Average74.875.773.377.522.650.245.256.158.360.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.23, + 0.371, + 0.767, + 0.386 + ], + "angle": 0, + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.396, + 0.778, + 0.916 + ], + "angle": 0, + "content": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Team AllocationsSoft ReasoningLlama 2 7b34.837.2
MuSR Team AllocationsSoft ReasoningMistral 7b38.846.8
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b44.048.0
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b65.266.8
MuSR Team AllocationsSoft ReasoningGemma 2 9b47.244.8
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k47.261.6
MuSR Team AllocationsSoft ReasoningQwen 2 7b42.049.6
MuSR Team AllocationsSoft ReasoningQwen 2 72b58.066.8
MuSR Team AllocationsSoft ReasoningGPT-4o Mini61.258.4
MuSR Team AllocationsSoft ReasoningGpt-4o64.063.6
MuSR Team AllocationsSoft ReasoningClaude-3 Haiku56.859.2
MuSR Team AllocationsSoft ReasoningClaude-3.5 Sonnet80.463.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash48.855.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Pro58.462.4
SiQACommonsenseLlama 2 7b53.455.9
SiQACommonsenseMistral 7b35.933.5
SiQACommonsenseLlama 3.1 8b73.573.5
SiQACommonsenseLlama 3.1 70b78.780.9
SiQACommonsenseGemma 2 9b74.976.3
SiQACommonsensePhi-3 Small 8k38.040.4
SiQACommonsenseQwen 2 7b37.339.3
SiQACommonsenseQwen 2 72b80.580.4
SiQACommonsenseGPT-4o Mini79.080.0
SiQACommonsenseGpt-4o81.981.5
SiQACommonsenseClaude-3 Haiku75.474.8
SiQACommonsenseClaude-3.5 Sonnet79.781.0
SiQACommonsenseGemini 1.5 Flash74.579.1
SiQACommonsenseGemini 1.5 Pro73.978.2
MuSiQueSoft ReasoningLlama 2 7b40.136.1
MuSiQueSoft ReasoningMistral 7b47.347.2
MuSiQueSoft ReasoningLlama 3.1 8b62.664.7
MuSiQueSoft ReasoningLlama 3.1 70b74.072.2
MuSiQueSoft ReasoningGemma 2 9b67.768.7
MuSiQueSoft ReasoningPhi-3 Small 8k58.364.3
MuSiQueSoft ReasoningQwen 2 7b60.765.1
MuSiQueSoft ReasoningQwen 2 72b56.369.0
MuSiQueSoft ReasoningGPT-4o Mini71.368.2
MuSiQueSoft ReasoningGpt-4o73.570.1
MuSiQueSoft ReasoningClaude-3 Haiku54.856.0
MuSiQueSoft ReasoningClaude-3.5 Sonnet66.970.4
MuSiQueSoft ReasoningGemini 1.5 Flash69.866.2
MuSiQueSoft ReasoningGemini 1.5 Pro69.871.3
AGIEval LSAT RCSoft ReasoningLlama 2 7b31.236.4
AGIEval LSAT RCSoft ReasoningMistral 7b61.761.0
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b71.068.8
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.487.0
AGIEval LSAT RCSoft ReasoningGemma 2 9b75.178.1
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k68.869.9
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.066.5
AGIEval LSAT RCSoft ReasoningQwen 2 72b83.684.4
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.23, + 0.099, + 0.768, + 0.114 + ], + "angle": 0, + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.124, + 0.778, + 0.915 + ], + "angle": 0, + "content": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.374.3
AGIEval LSAT RCSoft ReasoningGpt-4o88.181.4
AGIEval LSAT RCSoft ReasoningClaude-3 Haiku71.765.1
AGIEval LSAT RCSoft ReasoningClaude-3.5 Sonnet90.089.6
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash78.181.0
AGIEval LSAT RCSoft ReasoningGemini 1.5 Pro82.285.9
CommonsenseQACommonsenseLlama 2 7b49.454.6
CommonsenseQACommonsenseMistral 7b68.068.0
CommonsenseQACommonsenseLlama 3.1 8b68.574.9
CommonsenseQACommonsenseLlama 3.1 70b83.584.4
CommonsenseQACommonsenseGemma 2 9b79.280.1
CommonsenseQACommonsensePhi-3 Small 8k81.880.3
CommonsenseQACommonsenseQwen 2 7b78.579.0
CommonsenseQACommonsenseQwen 2 72b87.487.3
CommonsenseQACommonsenseGPT-4o Mini82.583.9
CommonsenseQACommonsenseGpt-4o86.587.3
CommonsenseQACommonsenseClaude-3 Haiku80.679.0
CommonsenseQACommonsenseClaude-3.5 Sonnet85.184.3
CommonsenseQACommonsenseGemini 1.5 Flash79.782.6
CommonsenseQACommonsenseGemini 1.5 Pro79.982.9
GPQAMathematicalLlama 2 7b28.324.3
GPQAMathematicalMistral 7b23.024.3
GPQAMathematicalLlama 3.1 8b24.125.9
GPQAMathematicalLlama 3.1 70b23.225.9
GPQAMathematicalGemma 2 9b26.321.2
GPQAMathematicalPhi-3 Small 8k22.320.8
GPQAMathematicalQwen 2 7b24.124.6
GPQAMathematicalQwen 2 72b21.018.1
GPQAMathematicalGPT-4o Mini21.024.0
GPQAMathematicalGpt-4o23.725.9
GPQAMathematicalClaude-3 Haiku25.422.3
GPQAMathematicalClaude-3.5 Sonnet25.425.9
GPQAMathematicalGemini 1.5 Flash22.322.8
GPQAMathematicalGemini 1.5 Pro21.023.7
AGIEval LSAT LRSoft ReasoningLlama 2 7b29.433.5
AGIEval LSAT LRSoft ReasoningMistral 7b44.147.8
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b59.053.9
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b81.481.0
AGIEval LSAT LRSoft ReasoningGemma 2 9b64.967.6
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.564.1
AGIEval LSAT LRSoft ReasoningQwen 2 7b50.658.4
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.375.1
AGIEval LSAT LRSoft ReasoningGPT-4o Mini65.368.2
AGIEval LSAT LRSoft ReasoningGpt-4o87.383.9
AGIEval LSAT LRSoft ReasoningClaude-3 Haiku55.754.7
AGIEval LSAT LRSoft ReasoningClaude-3.5 Sonnet83.782.7
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash70.071.2
AGIEval LSAT LRSoft ReasoningGemini 1.5 Pro79.480.4
PiQACommonsenseLlama 2 7b62.164.7
PiQACommonsenseMistral 7b78.677.7
PiQACommonsenseLlama 3.1 8b85.084.2
PiQACommonsenseLlama 3.1 70b91.890.6
PiQACommonsenseGemma 2 9b84.084.8
PiQACommonsensePhi-3 Small 8k89.185.5
PiQACommonsenseQwen 2 7b84.386.2
PiQACommonsenseQwen 2 72b92.989.1
PiQACommonsenseGPT-4o Mini93.188.6
PiQACommonsenseGpt-4o95.995.5
PiQACommonsenseClaude-3 Haiku85.986.6
PiQACommonsenseClaude-3.5 Sonnet94.694.5
PiQACommonsenseGemini 1.5 Flash84.689.8
PiQACommonsenseGemini 1.5 Pro88.191.3
Arc EasyKnowledgeLlama 2 7b71.169.8
Arc EasyKnowledgeMistral 7b87.586.7
Arc EasyKnowledgeLlama 3.1 8b93.092.5
Arc EasyKnowledgeLlama 3.1 70b97.597.9
Arc EasyKnowledgeGemma 2 9b94.995.8
Arc EasyKnowledgePhi-3 Small 8k96.096.3
Arc EasyKnowledgeQwen 2 7b89.584.7
Arc EasyKnowledgeQwen 2 72b97.997.4
Arc EasyKnowledgeGPT-4o Mini96.894.6
Arc EasyKnowledgeGpt-4o98.998.1
Arc EasyKnowledgeClaude-3 Haiku95.195.4
Arc EasyKnowledgeClaude-3.5 Sonnet98.698.4
Arc EasyKnowledgeGemini 1.5 Flash96.897.2
Arc EasyKnowledgeGemini 1.5 Pro97.294.6
Arc ChallengeKnowledgeLlama 2 7b49.245.2
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.23, + 0.099, + 0.768, + 0.114 + ], + "angle": 0, + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.124, + 0.778, + 0.917 + ], + "angle": 0, + "content": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
Arc ChallengeKnowledgeMistral 7b78.376.6
Arc ChallengeKnowledgeLlama 3.1 8b86.082.6
Arc ChallengeKnowledgeLlama 3.1 70b95.093.6
Arc ChallengeKnowledgeGemma 2 9b91.089.6
Arc ChallengeKnowledgePhi-3 Small 8k91.691.0
Arc ChallengeKnowledgeQwen 2 7b83.975.3
Arc ChallengeKnowledgeQwen 2 72b96.394.6
Arc ChallengeKnowledgeGPT-4o Mini93.382.6
Arc ChallengeKnowledgeGpt-4o96.095.3
Arc ChallengeKnowledgeClaude-3 Haiku89.389.3
Arc ChallengeKnowledgeClaude-3.5 Sonnet96.095.3
Arc ChallengeKnowledgeGemini 1.5 Flash92.393.6
Arc ChallengeKnowledgeGemini 1.5 Pro91.690.6
AGIEval LSAT ARSoft ReasoningLlama 2 7b17.017.4
AGIEval LSAT ARSoft ReasoningMistral 7b21.719.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b20.426.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b32.628.7
AGIEval LSAT ARSoft ReasoningGemma 2 9b24.823.0
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k28.326.5
AGIEval LSAT ARSoft ReasoningQwen 2 7b27.023.9
AGIEval LSAT ARSoft ReasoningQwen 2 72b29.128.3
AGIEval LSAT ARSoft ReasoningGPT-4o Mini32.223.0
AGIEval LSAT ARSoft ReasoningGpt-4o37.830.0
AGIEval LSAT ARSoft ReasoningClaude-3 Haiku24.823.5
AGIEval LSAT ARSoft ReasoningClaude-3.5 Sonnet38.333.9
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash27.827.8
AGIEval LSAT ARSoft ReasoningGemini 1.5 Pro30.031.7
BiGGen BenchSoft ReasoningLlama 2 7b61.656.8
BiGGen BenchSoft ReasoningMistral 7b70.168.1
BiGGen BenchSoft ReasoningLlama 3.1 8b66.567.7
BiGGen BenchSoft ReasoningLlama 3.1 70b78.976.9
BiGGen BenchSoft ReasoningGemma 2 9b64.764.5
BiGGen BenchSoft ReasoningPhi-3 Small 8k69.763.0
BiGGen BenchSoft ReasoningQwen 2 7b46.269.9
BiGGen BenchSoft ReasoningQwen 2 72b74.379.9
BiGGen BenchSoft ReasoningGPT-4o Mini70.377.7
BiGGen BenchSoft ReasoningGpt-4o86.082.0
BiGGen BenchSoft ReasoningClaude-3 Haiku80.080.0
BiGGen BenchSoft ReasoningClaude-3.5 Sonnet91.479.3
BiGGen BenchSoft ReasoningGemini 1.5 Flash73.968.5
BiGGen BenchSoft ReasoningGemini 1.5 Pro78.767.1
WinograndeCommonsenseLlama 2 7b49.950.4
WinograndeCommonsenseMistral 7b60.456.5
WinograndeCommonsenseLlama 3.1 8b66.563.3
WinograndeCommonsenseLlama 3.1 70b84.281.2
WinograndeCommonsenseGemma 2 9b68.767.7
WinograndeCommonsensePhi-3 Small 8k81.581.6
WinograndeCommonsenseQwen 2 7b67.160.7
WinograndeCommonsenseQwen 2 72b81.980.7
WinograndeCommonsenseGPT-4o Mini79.271.9
WinograndeCommonsenseGpt-4o89.786.5
WinograndeCommonsenseClaude-3 Haiku70.766.2
WinograndeCommonsenseClaude-3.5 Sonnet89.485.7
WinograndeCommonsenseGemini 1.5 Flash72.574.8
WinograndeCommonsenseGemini 1.5 Pro75.578.3
MMLUKnowledgeLlama 2 7b46.341.7
MMLUKnowledgeMistral 7b60.556.5
MMLUKnowledgeLlama 3.1 8b72.667.5
MMLUKnowledgeLlama 3.1 70b85.083.2
MMLUKnowledgeGemma 2 9b73.871.4
MMLUKnowledgePhi-3 Small 8k76.373.6
MMLUKnowledgeQwen 2 7b67.064.5
MMLUKnowledgeQwen 2 72b81.377.8
MMLUKnowledgeGPT-4o Mini79.974.8
MMLUKnowledgeGpt-4o87.583.4
MMLUKnowledgeClaude-3 Haiku72.268.4
MMLUKnowledgeClaude-3.5 Sonnet87.284.0
MMLUKnowledgeGemini 1.5 Flash76.374.7
MMLUKnowledgeGemini 1.5 Pro81.381.1
StrategyQACommonsenseLlama 2 7b39.531.2
StrategyQACommonsenseMistral 7b66.155.8
StrategyQACommonsenseLlama 3.1 8b73.768.6
StrategyQACommonsenseLlama 3.1 70b85.383.8
StrategyQACommonsenseGemma 2 9b73.766.4
StrategyQACommonsensePhi-3 Small 8k72.366.0
StrategyQACommonsenseQwen 2 7b63.254.8
StrategyQACommonsenseQwen 2 72b81.776.9
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.23, + 0.099, + 0.768, + 0.114 + ], + "angle": 0, + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.124, + 0.778, + 0.915 + ], + "angle": 0, + "content": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
StrategyQACommonsenseGPT-4o Mini82.284.5
StrategyQACommonsenseGpt-4o84.585.5
StrategyQACommonsenseClaude-3 Haiku73.465.0
StrategyQACommonsenseClaude-3.5 Sonnet80.176.3
StrategyQACommonsenseGemini 1.5 Flash72.575.2
StrategyQACommonsenseGemini 1.5 Pro74.071.4
MuSR Object PlacementsSoft ReasoningLlama 2 7b36.330.5
MuSR Object PlacementsSoft ReasoningMistral 7b50.843.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b55.553.5
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b65.643.8
MuSR Object PlacementsSoft ReasoningGemma 2 9b63.357.0
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k53.155.1
MuSR Object PlacementsSoft ReasoningQwen 2 7b48.848.4
MuSR Object PlacementsSoft ReasoningQwen 2 72b61.745.7
MuSR Object PlacementsSoft ReasoningGPT-4o Mini59.055.0
MuSR Object PlacementsSoft ReasoningGpt-4o67.645.3
MuSR Object PlacementsSoft ReasoningClaude-3 Haiku46.952.3
MuSR Object PlacementsSoft ReasoningClaude-3.5 Sonnet69.551.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash61.756.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Pro66.450.0
FOLIOSymbolicLlama 2 7b36.533.0
FOLIOSymbolicMistral 7b50.741.9
FOLIOSymbolicLlama 3.1 8b58.656.7
FOLIOSymbolicLlama 3.1 70b70.969.0
FOLIOSymbolicGemma 2 9b66.055.7
FOLIOSymbolicPhi-3 Small 8k68.059.6
FOLIOSymbolicQwen 2 7b60.651.2
FOLIOSymbolicQwen 2 72b65.065.0
FOLIOSymbolicGPT-4o Mini65.058.1
FOLIOSymbolicGpt-4o79.862.6
FOLIOSymbolicClaude-3 Haiku61.648.8
FOLIOSymbolicClaude-3.5 Sonnet73.968.5
FOLIOSymbolicGemini 1.5 Flash74.969.5
FOLIOSymbolicGemini 1.5 Pro73.974.4
ContextHub Deductive L2SymbolicLlama 2 7b34.812.6
ContextHub Deductive L2SymbolicMistral 7b48.855.1
ContextHub Deductive L2SymbolicLlama 3.1 8b52.821.5
ContextHub Deductive L2SymbolicLlama 3.1 70b50.041.1
ContextHub Deductive L2SymbolicGemma 2 9b50.043.0
ContextHub Deductive L2SymbolicPhi-3 Small 8k52.449.1
ContextHub Deductive L2SymbolicQwen 2 7b51.339.8
ContextHub Deductive L2SymbolicQwen 2 72b52.844.0
ContextHub Deductive L2SymbolicGPT-4o Mini47.042.0
ContextHub Deductive L2SymbolicGpt-4o54.545.6
ContextHub Deductive L2SymbolicClaude-3 Haiku45.241.8
ContextHub Deductive L2SymbolicClaude-3.5 Sonnet53.046.2
ContextHub Deductive L2SymbolicGemini 1.5 Flash45.039.5
ContextHub Deductive L2SymbolicGemini 1.5 Pro57.343.3
ContextHub Abductive L2SymbolicLlama 2 7b34.331.9
ContextHub Abductive L2SymbolicMistral 7b34.025.7
ContextHub Abductive L2SymbolicLlama 3.1 8b41.337.3
ContextHub Abductive L2SymbolicLlama 3.1 70b51.044.4
ContextHub Abductive L2SymbolicGemma 2 9b41.532.9
ContextHub Abductive L2SymbolicPhi-3 Small 8k44.332.8
ContextHub Abductive L2SymbolicQwen 2 7b37.833.4
ContextHub Abductive L2SymbolicQwen 2 72b45.532.2
ContextHub Abductive L2SymbolicGPT-4o Mini65.055.0
ContextHub Abductive L2SymbolicGpt-4o57.546.8
ContextHub Abductive L2SymbolicClaude-3 Haiku37.031.4
ContextHub Abductive L2SymbolicClaude-3.5 Sonnet56.840.4
ContextHub Abductive L2SymbolicGemini 1.5 Flash53.132.2
ContextHub Abductive L2SymbolicGemini 1.5 Pro53.543.7
MMLU ProKnowledgeLlama 2 7b19.919.6
MMLU ProKnowledgeMistral 7b31.628.4
MMLU ProKnowledgeLlama 3.1 8b44.838.0
MMLU ProKnowledgeLlama 3.1 70b64.955.0
MMLU ProKnowledgeGemma 2 9b48.142.7
MMLU ProKnowledgePhi-3 Small 8k54.843.7
MMLU ProKnowledgeQwen 2 7b45.036.2
MMLU ProKnowledgeQwen 2 72b62.844.3
MMLU ProKnowledgeGPT-4o Mini62.342.6
MMLU ProKnowledgeGpt-4o72.155.0
MMLU ProKnowledgeClaude-3 Haiku47.639.0
MMLU ProKnowledgeClaude-3.5 Sonnet73.457.2
MMLU ProKnowledgeGemini 1.5 Flash58.547.2
MMLU ProKnowledgeGemini 1.5 Pro65.357.4
MuSR Murder MysteriesSoft ReasoningLlama 2 7b50.050.0
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.23, + 0.099, + 0.768, + 0.114 + ], + "angle": 0, + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.124, + 0.778, + 0.916 + ], + "angle": 0, + "content": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Murder MysteriesSoft ReasoningMistral 7b62.855.6
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b70.457.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.669.6
MuSR Murder MysteriesSoft ReasoningGemma 2 9b76.861.6
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k61.658.8
MuSR Murder MysteriesSoft ReasoningQwen 2 7b59.253.2
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.864.4
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini71.263.6
MuSR Murder MysteriesSoft ReasoningGpt-4o87.670.8
MuSR Murder MysteriesSoft ReasoningClaude-3 Haiku62.456.8
MuSR Murder MysteriesSoft ReasoningClaude-3.5 Sonnet85.270.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.858.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Pro77.664.0
ContextHub Deductive L1SymbolicLlama 2 7b47.78.3
ContextHub Deductive L1SymbolicMistral 7b50.367.3
ContextHub Deductive L1SymbolicLlama 3.1 8b50.723.3
ContextHub Deductive L1SymbolicLlama 3.1 70b53.840.7
ContextHub Deductive L1SymbolicGemma 2 9b56.339.2
ContextHub Deductive L1SymbolicPhi-3 Small 8k54.850.2
ContextHub Deductive L1SymbolicQwen 2 7b59.343.3
ContextHub Deductive L1SymbolicQwen 2 72b51.544.0
ContextHub Deductive L1SymbolicGPT-4o Mini49.341.5
ContextHub Deductive L1SymbolicGpt-4o59.349.0
ContextHub Deductive L1SymbolicClaude-3 Haiku50.539.7
ContextHub Deductive L1SymbolicClaude-3.5 Sonnet54.547.0
ContextHub Deductive L1SymbolicGemini 1.5 Flash47.338.5
ContextHub Deductive L1SymbolicGemini 1.5 Pro57.346.0
ContextHub Abductive L1SymbolicLlama 2 7b29.416.4
ContextHub Abductive L1SymbolicMistral 7b46.925.8
ContextHub Abductive L1SymbolicLlama 3.1 8b43.624.2
ContextHub Abductive L1SymbolicLlama 3.1 70b55.343.9
ContextHub Abductive L1SymbolicGemma 2 9b61.958.9
ContextHub Abductive L1SymbolicPhi-3 Small 8k62.560.3
ContextHub Abductive L1SymbolicQwen 2 7b52.247.5
ContextHub Abductive L1SymbolicQwen 2 72b61.945.0
ContextHub Abductive L1SymbolicGPT-4o Mini61.142.2
ContextHub Abductive L1SymbolicGpt-4o74.265.6
ContextHub Abductive L1SymbolicClaude-3 Haiku35.322.8
ContextHub Abductive L1SymbolicClaude-3.5 Sonnet80.860.3
ContextHub Abductive L1SymbolicGemini 1.5 Flash66.447.2
ContextHub Abductive L1SymbolicGemini 1.5 Pro62.260.0
Big-Bench HardSymbolicLlama 2 7b29.831.9
Big-Bench HardSymbolicMistral 7b39.335.1
Big-Bench HardSymbolicLlama 3.1 8b62.845.6
Big-Bench HardSymbolicLlama 3.1 70b78.954.8
Big-Bench HardSymbolicGemma 2 9b58.750.8
Big-Bench HardSymbolicPhi-3 Small 8k70.055.1
Big-Bench HardSymbolicQwen 2 7b52.647.6
Big-Bench HardSymbolicQwen 2 72b75.159.0
Big-Bench HardSymbolicGPT-4o Mini77.749.7
Big-Bench HardSymbolicGpt-4o84.664.5
Big-Bench HardSymbolicClaude-3 Haiku62.447.3
Big-Bench HardSymbolicClaude-3.5 Sonnet83.656.9
Big-Bench HardSymbolicGemini 1.5 Flash71.355.4
Big-Bench HardSymbolicGemini 1.5 Pro71.650.3
MATHMathematicalLlama 2 7b4.24.0
MATHMathematicalMistral 7b12.46.1
MATHMathematicalLlama 3.1 8b47.213.8
MATHMathematicalLlama 3.1 70b64.422.8
MATHMathematicalGemma 2 9b45.619.1
MATHMathematicalPhi-3 Small 8k43.218.5
MATHMathematicalQwen 2 7b53.713.3
MATHMathematicalQwen 2 72b63.523.8
MATHMathematicalGPT-4o Mini69.624.3
MATHMathematicalGpt-4o73.335.2
MATHMathematicalClaude-3 Haiku32.717.4
MATHMathematicalClaude-3.5 Sonnet63.834.6
MATHMathematicalGemini 1.5 Flash54.531.3
MATHMathematicalGemini 1.5 Pro62.139.4
GSM8k-HardMathematicalLlama 2 7b6.71.8
GSM8k-HardMathematicalMistral 7b21.03.0
GSM8k-HardMathematicalLlama 3.1 8b34.46.0
GSM8k-HardMathematicalLlama 3.1 70b46.614.0
GSM8k-HardMathematicalGemma 2 9b40.98.8
GSM8k-HardMathematicalPhi-3 Small 8k33.06.9
GSM8k-HardMathematicalQwen 2 7b48.45.0
GSM8k-HardMathematicalQwen 2 72b54.813.7
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.23, + 0.099, + 0.768, + 0.114 + ], + "angle": 0, + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.125, + 0.778, + 0.341 + ], + "angle": 0, + "content": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
GSM8k-HardMathematicalGPT-4o Mini53.911.7
GSM8k-HardMathematicalGpt-4o60.326.0
GSM8k-HardMathematicalClaude-3 Haiku45.39.6
GSM8k-HardMathematicalClaude-3.5 Sonnet50.832.3
GSM8k-HardMathematicalGemini 1.5 Flash54.616.2
GSM8k-HardMathematicalGemini 1.5 Pro58.226.2
GSM8kMathematicalLlama 2 7b29.66.9
GSM8kMathematicalMistral 7b59.210.2
GSM8kMathematicalLlama 3.1 8b85.418.5
GSM8kMathematicalLlama 3.1 70b85.637.0
GSM8kMathematicalGemma 2 9b89.224.9
GSM8kMathematicalPhi-3 Small 8k90.024.9
GSM8kMathematicalQwen 2 7b87.920.7
GSM8kMathematicalQwen 2 72b94.640.1
GSM8kMathematicalGPT-4o Mini94.131.8
GSM8kMathematicalGpt-4o95.858.8
GSM8kMathematicalClaude-3 Haiku89.422.9
GSM8kMathematicalClaude-3.5 Sonnet96.162.2
GSM8kMathematicalGemini 1.5 Flash91.438.6
GSM8kMathematicalGemini 1.5 Pro92.752.4
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.375, + 0.398, + 0.389 + ], + "angle": 0, + "content": "F.2 FULL FEW-SHOT RESULTS" + }, + { + "type": "table_caption", + "bbox": [ + 0.231, + 0.402, + 0.766, + 0.417 + ], + "angle": 0, + "content": "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.428, + 0.772, + 0.919 + ], + "angle": 0, + "content": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT RCSoft ReasoningLlama 2 7b33.138.7
AGIEval LSAT RCSoft ReasoningMistral 7b52.457.2
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b60.270.3
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.488.8
AGIEval LSAT RCSoft ReasoningGemma 2 9b74.379.2
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k63.265.1
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.768.8
AGIEval LSAT RCSoft ReasoningQwen 2 72b85.985.9
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.371.4
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash79.281.8
AGIEval LSAT LRSoft ReasoningLlama 2 7b33.734.7
AGIEval LSAT LRSoft ReasoningMistral 7b46.148.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b55.758.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b83.385.1
AGIEval LSAT LRSoft ReasoningGemma 2 9b65.768.2
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.759.2
AGIEval LSAT LRSoft ReasoningQwen 2 7b54.161.2
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.579.6
AGIEval LSAT LRSoft ReasoningGPT-4o Mini68.464.5
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash68.672.9
GPQAMathematicalMistral 7b23.025.9
GPQAMathematicalLlama 3.1 8b22.127.2
GPQAMathematicalLlama 3.1 70b24.824.3
GPQAMathematicalGemma 2 9b19.922.3
GPQAMathematicalPhi-3 Small 8k23.922.5
GPQAMathematicalQwen 2 7b23.421.2
GPQAMathematicalQwen 2 72b22.819.9
GPQAMathematicalGPT-4o Mini20.020.0
GPQAMathematicalGemini 1.5 Flash21.924.6
CommonsenseQACommonsenseLlama 2 7b18.219.2
CommonsenseQACommonsenseMistral 7b73.670.4
CommonsenseQACommonsenseLlama 3.1 8b74.076.5
CommonsenseQACommonsenseLlama 3.1 70b84.784.6
CommonsenseQACommonsenseGemma 2 9b81.880.8
CommonsenseQACommonsensePhi-3 Small 8k80.880.4
CommonsenseQACommonsenseQwen 2 7b80.372.9
CommonsenseQACommonsenseQwen 2 72b88.487.8
CommonsenseQACommonsenseGPT-4o Mini84.784.7
CommonsenseQACommonsenseGemini 1.5 Flash81.783.3
AGIEval LSAT ARSoft ReasoningLlama 2 7b19.618.7
AGIEval LSAT ARSoft ReasoningMistral 7b20.922.6
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b24.826.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b36.130.9
AGIEval LSAT ARSoft ReasoningGemma 2 9b22.228.7
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k27.820.0
AGIEval LSAT ARSoft ReasoningQwen 2 7b24.323.0
AGIEval LSAT ARSoft ReasoningQwen 2 72b27.030.0
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.232, + 0.099, + 0.766, + 0.114 + ], + "angle": 0, + "content": "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.124, + 0.772, + 0.919 + ], + "angle": 0, + "content": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT ARSoft ReasoningGPT-4o Mini28.726.1
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash28.320.4
MMLUKnowledgeLlama 2 7b49.042.8
MMLUKnowledgeMistral 7b63.057.0
MMLUKnowledgeLlama 3.1 8b71.769.3
MMLUKnowledgeLlama 3.1 70b84.383.7
MMLUKnowledgeGemma 2 9b74.772.4
MMLUKnowledgePhi-3 Small 8k77.375.2
MMLUKnowledgeQwen 2 7b69.968.6
MMLUKnowledgeQwen 2 72b82.781.8
MMLUKnowledgeGPT-4o Mini82.377.8
MMLUKnowledgeGemini 1.5 Flash78.179.0
StrategyQACommonsenseLlama 2 7b57.930.9
StrategyQACommonsenseMistral 7b70.772.0
StrategyQACommonsenseLlama 3.1 8b74.465.8
StrategyQACommonsenseLlama 3.1 70b87.184.2
StrategyQACommonsenseGemma 2 9b77.173.3
StrategyQACommonsensePhi-3 Small 8k75.071.1
StrategyQACommonsenseQwen 2 7b71.958.9
StrategyQACommonsenseQwen 2 72b83.280.1
StrategyQACommonsenseGPT-4o Mini83.086.2
StrategyQACommonsenseGemini 1.5 Flash77.080.3
ContextHub Abductive L2SymbolicLlama 2 7b36.235.0
ContextHub Abductive L2SymbolicMistral 7b33.830.0
ContextHub Abductive L2SymbolicLlama 3.1 8b32.736.1
ContextHub Abductive L2SymbolicLlama 3.1 70b54.651.2
ContextHub Abductive L2SymbolicGemma 2 9b44.833.2
ContextHub Abductive L2SymbolicPhi-3 Small 8k49.834.2
ContextHub Abductive L2SymbolicQwen 2 7b39.635.0
ContextHub Abductive L2SymbolicQwen 2 72b54.734.9
ContextHub Abductive L2SymbolicGPT-4o Mini62.060.0
ContextHub Abductive L2SymbolicGemini 1.5 Flash48.647.8
ContextHub Abductive L1SymbolicLlama 2 7b21.416.7
ContextHub Abductive L1SymbolicMistral 7b23.621.7
ContextHub Abductive L1SymbolicLlama 3.1 8b40.036.1
ContextHub Abductive L1SymbolicLlama 3.1 70b62.258.9
ContextHub Abductive L1SymbolicGemma 2 9b48.959.4
ContextHub Abductive L1SymbolicPhi-3 Small 8k59.256.4
ContextHub Abductive L1SymbolicQwen 2 7b48.638.9
ContextHub Abductive L1SymbolicQwen 2 72b53.356.1
ContextHub Abductive L1SymbolicGPT-4o Mini77.259.2
ContextHub Abductive L1SymbolicGemini 1.5 Flash79.768.6
MuSR Murder MysteriesSoft ReasoningMistral 7b62.056.4
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b61.661.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.268.0
MuSR Murder MysteriesSoft ReasoningGemma 2 9b81.662.0
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k62.053.6
MuSR Murder MysteriesSoft ReasoningQwen 2 7b56.055.6
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.466.0
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini76.069.6
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.066.4
MuSR Team AllocationsSoft ReasoningMistral 7b42.843.2
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b59.651.6
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b89.263.6
MuSR Team AllocationsSoft ReasoningGemma 2 9b48.445.6
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k66.046.4
MuSR Team AllocationsSoft ReasoningQwen 2 7b34.040.8
MuSR Team AllocationsSoft ReasoningQwen 2 72b56.066.4
MuSR Team AllocationsSoft ReasoningGPT-4o Mini75.660.0
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash90.054.4
MMLU ProKnowledgeLlama 2 7b21.520.4
MMLU ProKnowledgeMistral 7b34.826.7
MMLU ProKnowledgeLlama 3.1 8b44.738.0
MMLU ProKnowledgeLlama 3.1 70b64.455.1
MMLU ProKnowledgeGemma 2 9b48.542.4
MMLU ProKnowledgePhi-3 Small 8k54.843.2
MMLU ProKnowledgeQwen 2 7b46.639.0
MMLU ProKnowledgeQwen 2 72b62.551.6
MMLU ProKnowledgeGPT-4o Mini63.045.0
MMLU ProKnowledgeGemini 1.5 Flash59.450.6
MuSR Object PlacementsSoft ReasoningMistral 7b55.541.0
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b66.850.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b67.257.4
MuSR Object PlacementsSoft ReasoningGemma 2 9b68.058.2
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k62.151.6
MuSR Object PlacementsSoft ReasoningQwen 2 7b46.943.8
MuSR Object PlacementsSoft ReasoningQwen 2 72b66.443.0
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.233, + 0.099, + 0.764, + 0.113 + ], + "angle": 0, + "content": "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + }, + { + "type": "table", + "bbox": [ + 0.227, + 0.124, + 0.772, + 0.564 + ], + "angle": 0, + "content": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
MuSR Object PlacementsSoft ReasoningGPT-4o Mini67.047.0
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash73.054.7
ContextHub Deductive L2SymbolicLlama 2 7b34.715.0
ContextHub Deductive L2SymbolicMistral 7b63.851.4
ContextHub Deductive L2SymbolicLlama 3.1 8b76.127.3
ContextHub Deductive L2SymbolicLlama 3.1 70b82.653.6
ContextHub Deductive L2SymbolicGemma 2 9b61.947.6
ContextHub Deductive L2SymbolicPhi-3 Small 8k61.554.0
ContextHub Deductive L2SymbolicQwen 2 7b55.336.4
ContextHub Deductive L2SymbolicQwen 2 72b80.254.0
ContextHub Deductive L2SymbolicGPT-4o Mini59.041.0
ContextHub Deductive L2SymbolicGemini 1.5 Flash90.242.5
ContextHub Deductive L1SymbolicLlama 2 7b34.716.0
ContextHub Deductive L1SymbolicMistral 7b46.259.2
ContextHub Deductive L1SymbolicLlama 3.1 8b73.023.0
ContextHub Deductive L1SymbolicLlama 3.1 70b67.550.0
ContextHub Deductive L1SymbolicGemma 2 9b66.045.7
ContextHub Deductive L1SymbolicPhi-3 Small 8k74.851.8
ContextHub Deductive L1SymbolicQwen 2 7b58.837.5
ContextHub Deductive L1SymbolicQwen 2 72b70.742.8
ContextHub Deductive L1SymbolicGPT-4o Mini59.244.3
ContextHub Deductive L1SymbolicGemini 1.5 Flash89.349.8
MATHMathematicalLlama 2 7b4.73.9
MATHMathematicalMistral 7b13.77.1
MATHMathematicalLlama 3.1 8b41.214.2
MATHMathematicalLlama 3.1 70b61.924.2
MATHMathematicalGemma 2 9b47.519.8
MATHMathematicalPhi-3 Small 8k42.418.9
MATHMathematicalQwen 2 7b55.015.0
MATHMathematicalQwen 2 72b65.326.2
MATHMathematicalGPT-4o Mini71.724.6
MATHMathematicalGemini 1.5 Flash54.732.3
GSM8KMathematicalLlama 2 7b29.07.7
GSM8KMathematicalMistral 7b56.212.5
GSM8KMathematicalLlama 3.1 8b86.420.1
GSM8KMathematicalLlama 3.1 70b96.139.1
GSM8KMathematicalGemma 2 9b89.224.9
GSM8KMathematicalPhi-3 Small 8k90.424.5
GSM8KMathematicalQwen 2 7b87.621.4
GSM8KMathematicalQwen 2 72b93.240.6
GSM8KMathematicalGPT-4o Mini94.232.8
GSM8KMathematicalGemini 1.5 Flash90.640.4
" + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.601, + 0.649, + 0.614 + ], + "angle": 0, + "content": "F.3 ANSWER EXTRACTOR AND AVERAGE ANSWER SPAN RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.627, + 0.824, + 0.696 + ], + "angle": 0, + "content": "In this section, we report the number of generations from each model on each dataset that our answer parser could not extract. \"1\" denotes that a model was not run on a certain dataset due to context length limitations in the few-shot setting. We see that these unparseable rates are generally low across the board. The weakest models struggle on some of the most challenging datasets, but unparseable rates are all at or below \\(15\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.704, + 0.824, + 0.759 + ], + "angle": 0, + "content": "We also report the average character index of the beginning of the answer span that the answer parser extracted. Of particular note is that the direct answer prompts all return an answer within the first 60 characters, indicating that the answers are returned almost immediately, as desired. CoT completions are much longer." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.781, + 0.516, + 0.795 + ], + "angle": 0, + "content": "G ZOOM-IN: MMLU AND MMLU PRO" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.813, + 0.824, + 0.924 + ], + "angle": 0, + "content": "MMLU and MMLU Pro show gains from adding CoT, but because these datasets are so broad, they defy simple characterization. We explore the performance of CoT on each category of MMLU to understand divergences in CoT performance between these domains. We list the top three categories where CoT gives the largest error reduction for Llama 3.1 8B and 70B on MMLU and MMLU Pro in Table 17. Some of these categories are explicitly mathematical in nature, as we might expect from Figure 3. We can also see that CoT is helping on categories like \"business\"; upon closer inspection, we found that these categories frequently involve math as well (e.g., business questions may involve computations surrounding wealth). We need to more carefully characterize MMLU at the instance" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.95, + 0.508, + 0.96 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.099, + 0.717, + 0.111 + ], + "angle": 0, + "content": "CoT vs direct answer prompting in zero-shot setting (sorted by CoT delta)" + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.113, + 0.825, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.207, + 0.825, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.293, + 0.825, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.377, + 0.825, + 0.456 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.462, + 0.825, + 0.541 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.546, + 0.825, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.631, + 0.825, + 0.711 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.716, + 0.825, + 0.795 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.8, + 0.825, + 0.877 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.877, + 0.825, + 0.92 + ], + "angle": 0, + "content": "Figure 9: Performance of zero-shot direct (blue) and zero-shot CoT (orange) across datasets and models. Graphs are sorted in ascending order by median delta (CoT, direct). The datasets benefiting substantially are all symbolic or semi-symbolic in nature." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.167, + 0.825, + 0.208 + ], + "angle": 0, + "content": "Table 9: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above \\(15\\%\\)." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.22, + 0.825, + 0.442 + ], + "angle": 0, + "content": "
Zero-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMeLaLima 2-7bMauu1/0MeLaLima 3-1-9bMeLaLima 3-1-7bGamma 2-9bPru-5 Small 8kQeu-2-7bQeu-2-7bGpr-ho MthGpr-loClauh-3-HouClauh-5-SomertGemini-1.5 Path
CommonsenseQA1.92.51.10.00.80.11.60.70.00.00.10.00.2
StrategyQA0.01.90.10.011.70.54.92.70.00.00.00.00.2
SiQA0.26.60.00.13.90.30.13.00.10.10.00.00.4
PiQA0.46.00.00.13.32.10.05.50.20.00.10.00.9
Winogrande0.03.00.10.02.10.25.10.40.00.00.00.03.6
Arc Easy0.01.80.50.00.00.29.10.73.50.40.20.03.2
Arc Challenge0.02.31.00.00.30.710.70.710.00.70.00.05.0
AGIeval LSAT LR0.40.00.00.00.00.20.00.00.02.50.00.00.2
AGIeval LSAT AR0.40.00.00.04.33.90.00.00.08.70.00.00.0
AGIeval LSAT RC0.40.40.00.00.00.00.00.00.09.70.00.00.4
ContextHub Deductive L10.00.00.00.01.20.02.30.00.00.00.20.00.2
ContextHub Deductive L20.00.00.00.00.00.02.21.00.00.02.80.00.0
ContextHub Abductive L10.00.00.00.00.00.30.00.00.00.00.00.00.0
ContextHub Abductive L20.00.00.00.00.00.11.50.20.00.00.80.00.0
MuSR Murder Mysteries0.00.00.00.00.00.00.00.00.00.00.00.00.0
MuSR Team Allocations0.00.00.00.03.60.00.00.00.00.00.08.40.4
MuSR Object Placements0.00.00.00.00.00.00.00.00.00.00.00.00.0
MMLU0.10.00.00.00.10.23.61.20.60.01.30.30.2
MMLU Pro0.71.31.00.31.03.76.812.20.40.30.30.40.6
GPQA1.37.10.00.08.712.75.415.20.00.01.60.00.7
MATH0.66.90.30.20.10.13.53.00.80.00.30.00.4
GSM8k0.24.12.50.02.70.01.70.20.00.012.75.50.0
BigGen Bench4.60.30.90.10.51.01.31.01.30.00.00.10.4
GSM8k-Hard4.87.62.00.40.40.23.21.10.10.55.20.50.2
MuSiQue0.10.00.00.00.00.10.00.00.00.00.00.20.1
Folio4.40.00.00.00.00.03.90.00.012.30.00.00.5
BigBench-Hard0.00.00.07.40.00.20.00.00.00.30.04.512.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.581, + 0.825, + 0.622 + ], + "angle": 0, + "content": "Table 10: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above \\(15\\%\\)." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.636, + 0.825, + 0.855 + ], + "angle": 0, + "content": "
datasetZero-shot CoT Unparseable Answer Rate by Percentage
Meta-Lima 2-TbMeta-10Meta-Lima 3.1-80Meta-Lima 3.1-70bGensim 19bPir-3-Small6kQwen-2-TbQwen-2-TbGPT-4MoGPT-4oCiaJa-3-HuluCiaJa-5-SonarGeminl 1.5 Plan
CommonsenseQA2.91.38.60.00.60.10.00.01.60.00.20.32.4
StrategyQA1.00.11.10.80.30.40.30.00.00.00.02.14.4
SiQA0.81.80.30.11.60.00.10.10.00.00.30.13.5
PiQA1.61.60.20.12.80.30.50.30.00.01.40.34.6
Winogrande0.91.40.20.20.90.40.30.00.00.00.02.03.4
Arc Easy0.20.40.20.00.51.61.60.00.50.00.00.00.4
Arc Challenge0.00.70.00.00.00.00.70.00.00.00.00.00.7
AGIEval LSAT LR3.32.20.00.01.20.02.00.00.00.00.00.00.2
AGIEval LSAT AR4.87.06.12.25.75.24.30.41.31.30.00.41.7
AGIEval LSAT RC7.11.10.00.00.73.06.70.00.00.00.00.00.4
ContextHub Deductive L10.71.30.00.00.00.00.00.00.00.00.00.00.3
ContextHub Deductive L20.20.40.00.00.00.00.00.00.00.00.00.00.4
ContextHub Abductive L10.60.00.00.00.00.00.00.00.00.00.00.00.0
ContextHub Abductive L20.00.20.10.00.00.30.00.00.00.00.00.00.4
MuSR Murder Mysteries0.00.40.00.00.011.60.40.00.00.00.06.83.6
MuSR Team Allocations5.23.20.80.00.80.40.40.00.00.00.00.00.0
MuSR Object Placements0.01.60.00.00.40.80.00.00.00.00.02.00.4
MMLU1.90.61.00.21.51.00.40.20.00.10.03.13.2
MMLU Pro4.45.413.13.312.53.65.42.02.41.90.45.04.4
GPQA4.510.39.41.68.51.83.80.70.00.00.011.815.0
MATH1.65.58.22.52.31.63.00.40.40.50.91.71.0
GSM8k1.71.40.710.50.40.60.40.00.00.00.30.00.1
BigGen Bench5.00.40.50.10.50.40.39.50.00.00.00.10.4
GSM8k-Hard2.18.710.24.510.73.23.51.00.80.53.01.82.7
MuSiQue1.40.08.30.10.00.00.00.00.00.00.00.73.1
Folio0.00.01.50.00.00.00.00.00.00.00.02.01.5
BigBench-Hard3.85.41.80.41.30.10.40.30.00.00.01.20.9
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.175, + 0.825, + 0.218 + ], + "angle": 0, + "content": "Table 11: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above \\(15\\%\\)." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.23, + 0.825, + 0.433 + ], + "angle": 0, + "content": "
Few-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMetaLlama 2-7bMistral 7bMetaLlama 3-1-8bMetaLlama 3-1-7bGamma 2-9bPhi-3 Small 8kQwen 2-7bQwen 2-7bGPT4o MiniGemini 1.5 Flash
CommonsenseQA0.00.10.20.01.30.99.91.30.00.6
AGIEval LSAT LR6.70.00.00.00.00.00.00.00.00.2
AGIEval LSAT AR2.60.00.00.03.55.20.00.00.00.0
AGIEval LSAT RC0.00.00.00.00.00.00.00.00.00.0
ContextHub Deductive L10.02.80.00.00.010.70.30.00.00.0
ContextHub Deductive L20.00.10.00.00.00.30.20.00.00.0
ContextHub Abductive L10.02.80.00.00.00.60.00.00.00.0
ContextHub Abductive L20.02.00.00.00.00.00.00.00.00.0
MuSR Murder Mysteries-1.00.00.00.00.00.00.00.00.00.4
MuSR Team Allocations-1.00.00.00.00.00.00.00.00.00.0
MuSR Object Placements-1.00.00.00.00.41.20.00.00.00.0
MMLU4.20.20.00.00.10.00.40.10.00.2
MMLU Pro5.11.22.40.31.09.10.52.60.40.5
GPQA-1.01.30.00.03.67.413.41.10.00.0
MATH0.35.90.30.20.10.11.62.20.00.3
GSM8k0.10.10.50.00.12.20.00.20.00.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.59, + 0.825, + 0.632 + ], + "angle": 0, + "content": "Table 12: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above \\(15\\%\\)." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.643, + 0.825, + 0.847 + ], + "angle": 0, + "content": "
Few-shot CoT Unparseable Answer Rate by Percentage
datasetMetaLlama2-7bMetaLlama3-18bMetaLlama3-178bMetaLlama3-178bGamma2-9bPhi5Small8cQwen2-7bQwen2-7bGPT40MiniGamin1.5Flush
CommonsenseQA0.70.91.80.10.20.10.00.00.03.4
AGIEval LSAT LR0.60.80.40.01.43.10.80.00.00.6
AGIEval LSAT AR2.29.13.90.911.73.03.51.70.01.3
AGIEval LSAT RC7.85.90.00.01.99.32.60.00.02.2
ContextHub Deductive L10.20.00.20.00.00.00.00.00.00.3
ContextHub Deductive L20.90.00.20.00.00.00.00.00.00.3
ContextHub Abductive L10.80.00.00.00.00.00.00.00.00.0
ContextHub Abductive L23.10.05.30.10.00.20.00.00.00.7
MuSR Murder Mysteries-1.01.20.00.00.40.80.00.00.014.0
MuSR Team Allocations-1.02.40.00.00.00.00.80.00.00.4
MuSR Object Placements-1.00.40.00.01.20.40.00.00.00.0
MMLU0.60.81.10.21.50.70.30.20.22.5
MMLU Pro0.61.98.52.114.11.81.90.81.13.9
GPQA-1.012.110.30.912.96.05.63.30.013.6
MATH1.56.88.22.411.12.62.91.10.51.8
GSM8k0.81.31.00.10.50.50.10.00.10.1
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.159, + 0.825, + 0.229 + ], + "angle": 0, + "content": "Table 13: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.24, + 0.825, + 0.451 + ], + "angle": 0, + "content": "
Zero-shot Direct Answer Span Location By Character Index
datasetMeta-Liuna 2-7bMetaLiuHa 3-18bMetaLiuHa 3-17bMetaLiuHa 3-18bGamma 2-9bPhi-3 Small 8kOpen-2-7bOpen-2-7bGPT-4a-MiniGPT-40Clude-3 HakuClude-3.5 SorenGeminl-1.5 Pals
CommonsenseQA98278881088107788
StrategyQA444527444444464488424187
SiQA888888298886688
PiQA788888258884588
Winogrande89888898895488
Arc Easy98888898887788
Arc Challenge88888898887788
AGIEval LSAT LR2524242424242524432125252626
AGIEval LSAT AR2524242424242624482325252627
AGIEval LSAT RC2524242424242524311825252625
ContextHub Deductive L11919191920191919191920201920
ContextHub Deductive L21919191919191919191920201919
ContextHub Abductive L11919191920191919191920201919
ContextHub Abductive L21919191920191919191920201919
MuSR Minder Mysteries882788888886488
MuSR Team Allocations272219192723262288302088
MuSR Object Placements882788888887688
MMLU1918191920181818191919191920
MMLU Pro2019381921191920191920201919
GPQA1919191921191919191920201920
MATH3031282830303333282831292828
GSM8k2229302828372428282829282828
GSM8k-Hard95711219134020788888
Folio3988888311381656870
BigBench-Hard3922252126322926281928281016
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.573, + 0.825, + 0.645 + ], + "angle": 0, + "content": "Table 14: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.655, + 0.825, + 0.864 + ], + "angle": 0, + "content": "
Zero-shot CoT Answer Span Location By Character Index
datasetMeiLima 27bMeiLima 76MeiLima 3.1 8bMeiLima 3.1 70bGenma 29bPrl-3 Small 8kQues 27bQues 27bGPT-hMiaGPT-4Cluide-3 HabiCluide-5 SorenGenim 1.5 PlixB
CommonsenseQA441564845123723646657734189910866261103214165
StrategyQA726434996113126746036335869210337541158256195
SiQA56942384196523552847242084710946021016196169
PiQA6994558699142075324473646839355781092200150
Winogrande377324645694187326391298634750408889200173
Arc Easy6845811154131936761053435599012397891222340231
Arc Challenge76364411781316422596571387102012698281240372267
AGIEval LSAT LR205313241163167552468915607689499981561728906886
AGIEval LSAT AR1377179114222182712102718191264123011511202849817871
AGIEval LSAT RC1977103211031575739590117066097310791628786703709
ContextHub Deductive L1694368759711383327539402540580542556320254
ContextHub Deductive L28424721095990614442789585840758777655515503
ContextHub Abductive L1577461747879464440754638788879683594368325
ContextHub Abductive L28616001270122968657197685611151113894894601551
MuSR Murder Mysteries4951592195818471210124612411718196119651671175913491213
MuSR Team Allocations12121845229423101513143320212213256226981479185615961607
MuSR Object Placements917625135412666956419048191593153612101455616429
MMLU834512663622503277497407400461447409630413
MMLU Pro1371513788716640518954699926940590653660774
GPQA10347789179018065001018628541666486472981735
MATH7421118122211797486701189114511251153677675679698
GSM8k57263783471945352170964510481035708680541437
GSM8k-Hard916939102710695557661083105313501266594815605512
Folio72476514791379733668919488128515839071194934492
BigBench-Hard596230876861429349315443877973545863455346
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.161, + 0.825, + 0.232 + ], + "angle": 0, + "content": "Table 15: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.244, + 0.825, + 0.447 + ], + "angle": 0, + "content": "
Few-shot Direct Answer Span Location By Character Index
datasetMetaLlama 2.7bMistral 7bMetaLlama 3.1.8bMetaLlama 3.1.70bGemma 2.9bPhi-3 Small BkQwen 2.7bQwen 2.72bGPT4o MiniGemini 1.5 Flash
CommonsenseQA8782788810888
AGIEval LSAT LR25242424242424243124
AGIEval LSAT AR25242424242424242724
AGIEval LSAT RC25242424242424242524
ContextHub Deductive L119191919191919191919
ContextHub Deductive L219191919191919191919
ContextHub Abductive L119191919191919191919
ContextHub Abductive L219191919191919191919
MuSR Murder Mysteries-18278888888
MuSR Team Allocations-12119192721272388
MuSR Object Placements-18278888888
MMLU19181919191818181919
MMLU Pro19193819202019191919
GPQA-1191919191919191919
MATH29362929283030412828
GSM8k22232322222322242728
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.576, + 0.825, + 0.646 + ], + "angle": 0, + "content": "Table 16: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.658, + 0.825, + 0.861 + ], + "angle": 0, + "content": "
Few-shot CoT Answer Span Location By Character Index
datasetMetaLima 27bMetaLima 7bMetaLima 3.186MetaLima 3.170bGamma 2.98Phi-3 Small BkQwen 2.78Qwen 2.77bGPT-4o MiniGemini 1.5 Flash
CommonsenseQA301195470921145192280174219158
AGIEval LSAT LR1037510464539437359530599894523
AGIEval LSAT AR1024124788676857310257508351033670
AGIEval LSAT RC7993781312061641112412051086266
ContextHub Deductive L1383386406376359376388364416366
ContextHub Deductive L2736767829822823855612807884809
ContextHub Abductive L1301386428450431413541447575379
ContextHub Abductive L2709586967754804784829821905815
MuSR Murder Mysteries-1128016931702122513381246171919741419
MuSR Team Allocations-1219520872160162817552181215626321841
MuSR Object Placements-1907110412137069196769631351853
MMLU282266333245265260267243392218
MMLU Pro429397424411516425541325681396
GPQA-1848782774615711662703670594
MATH63070558464074752910748481261553
GSM8k374332352352398372415341651314
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.827, + 0.158 + ], + "angle": 0, + "content": "Table 17: The top 3 slices benefiting the most from CoT across MMLU and MMLU Pro for Llama 3.1 8b and 70b. 6 out of 12 of these top slices directly contain \"math\" or \"mathematics.\" We dive deeper into each category subsequently and observe that the questions leading to improvements in the other categories are mathematical in nature as well." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.169, + 0.825, + 0.27 + ], + "angle": 0, + "content": "
MMLUMMLU Pro
ModelSubjectDirect (%)CoT (%)Err. Red. (%)NSubjectDirect (%)CoT (%)Err. Red. (%)N
Llama 3.1 8belementary mathematics46.888.478.1378math23.644.827.81350
Llama 3.1 8bhigh_school mathematics39.671.552.8270business29.445.623.0789
Llama 3.1 8bmiscellaneous83.989.937.3783physics27.941.418.81299
Llama 3.1 70belementary mathematics82.394.770.1378math44.568.342.91351
Llama 3.1 70bmedical_genetics93.097.057.1100business44.067.842.5789
Llama 3.1 70bhigh_school mathematics61.582.253.8270chemistry40.564.039.61132
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.825, + 0.327 + ], + "angle": 0, + "content": "level. In doing so, we can test our hypotheses with much finer granularity than possible by relying on subjective groupings into tasks and categories." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.34, + 0.825, + 0.438 + ], + "angle": 0, + "content": "Breakdown by the presence of equations We aim to design an instance-level classifier to determine if CoT is expected to help on a question or not. That is, we want a function \\( g: \\mathbf{q} \\to \\{0,1\\} \\) where \\( g(\\mathbf{q}) \\) returns 1 if \\( \\text{extract}(\\tilde{\\mathbf{y}}_{\\text{cot}}) = \\mathbf{y}^* \\) and \\( \\text{extract}(\\tilde{\\mathbf{y}}_{da}) \\neq \\mathbf{y}^* \\) where \\( \\mathbf{y}^* \\) is the gold answer to \\( \\mathbf{q} \\). We explored different forms of \\( g \\); however, we ultimately found it most effective to use a classifier \\( g: (\\mathbf{q}, \\tilde{\\mathbf{y}}_{\\text{cot}}) \\to \\{0,1\\} \\) which also consults the chain-of-thought produced by the model. This allows us to featurize how the LM solves the problem, particularly whether it uses symbolic reasoning or not." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.444, + 0.825, + 0.488 + ], + "angle": 0, + "content": "We find that \\( g \\) can be implemented with a single feature: does \\( \\mathbf{q} \\) or \\( \\tilde{\\mathbf{y}}_{\\mathrm{cot}} \\) contain a “=”? The “=” token very strongly indicates the presence of equations in the problem or its solution, which turn out to be a strong hallmark of symbolic reasoning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.493, + 0.826, + 0.606 + ], + "angle": 0, + "content": "We plot the overall CoT delta (performance of CoT minus the performance of direct answer) for both MMLU and MMLU Pro across multiple models between two bins according to this classifier \\( g \\), labeled as \"With =\" and \"Without =\", in Figure 4. We also report the amount of performance gain explained by questions having an \" ==\" vs. not in Appendix G.1. We find that the majority of the performance gain from CoT on MMLU and MMLU Pro comes from questions that have an \" ==\" in the question or generated responses. Because \" ==\" are usually found in math problems, we equate this to CoT primarily benefiting MMLU and MMLU Pro on the math-related questions with very little to no gain (depending on the model) for non-math questions." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.622, + 0.654, + 0.637 + ], + "angle": 0, + "content": "G.1 PERFORMANCE IMPACTS OF “=” ON MMLU AND MMLU PRO" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.648, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Tables 18 and 19 show the amount of total improvement from using CoT over direct prompting that can be explained by the presence of “=” on MMLU and MMLU Pro over multiple models." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.697, + 0.781, + 0.713 + ], + "angle": 0, + "content": "H FULL RESULTS OF EVALUATIONS ON FORMAL REASONING DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.728, + 0.825, + 0.827 + ], + "angle": 0, + "content": "As discussed in Section 5, we include detailed evaluation results of few-shot direct answer, few-shot CoT, direct answer solver, CoT solver, and tool-augmented prompting in Table 20. The unparseable rate stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except tool-augmented prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.878 + ], + "angle": 0, + "content": "We note that all models have a low unparseable rate \\((< 10\\%)\\) for all methods except tool-augmented prompting. By manually inspecting the outputs, we observe that the high unparseable rate for some models with tool-augmented prompting is caused by these models generating Python programs or" + }, + { + "type": "page_footnote", + "bbox": [ + 0.17, + 0.885, + 0.825, + 0.926 + ], + "angle": 0, + "content": "6We explored implementing \\( g \\) with a logistic regression classifier with tfidf features over the \\( (\\mathbf{q},\\tilde{\\mathbf{y}}_{\\mathrm{cot}}) \\) pairs, trained over a subset of the data from MMLU and MMLU Pro. This classifier actually allowed us to discover the “=” feature, but its accuracy did not exceed the accuracy of that single feature." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.184, + 0.825, + 0.211 + ], + "angle": 0, + "content": "Table 18: Total CoT deltas on MMLU broken down by the total gain from questions and responses with an “=” vs. without an “=”" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.224, + 0.825, + 0.424 + ], + "angle": 0, + "content": "
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b6.00.65.49.8%10.9%
Mistral 7b4.11.22.928.6%9.8%
Llama 3.1 8b5.52.92.652.9%9.6%
Llama 3.1 70b1.91.80.194.0%10.6%
Gemma 2 9b2.62.00.678.5%10.0%
Phi-3 Small 8k3.11.51.747.4%8.3%
Qwen 2 7b2.53.0-0.5100.0%9.8%
Qwen 2 72b3.52.41.167.8%9.6%
GPT-4o Mini5.23.51.766.9%10.5%
GPT-4o4.22.41.857.6%10.3%
Claude-3 Haiku3.72.41.364.4%9.3%
Claude-3.5 Sonnet3.22.30.972.1%10.7%
Gemini 1.5 Flash3.01.71.259.0%10.1%
Gemini 1.5 Pro1.91.00.951.9%9.6%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.598, + 0.825, + 0.626 + ], + "angle": 0, + "content": "Table 19: Total CoT deltas on MMLU Pro broken down by the total gain from questions and responses with an “=” vs. without an “=”" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.638, + 0.825, + 0.838 + ], + "angle": 0, + "content": "
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b1.61.30.379.6%43.6%
Mistral 7b3.81.91.950.7%41.8%
Llama 3.1 8b12.410.02.480.8%35.2%
Llama 3.1 70b11.411.10.397.6%39.6%
Gemma 2 9b7.67.40.297.9%40.2%
Phi-3 Small 8k11.69.91.785.7%42.7%
Qwen 2 7b10.08.91.188.6%41.6%
Qwen 2 72b19.016.12.984.7%41.4%
GPT-4o Mini20.618.42.389.0%44.0%
GPT-4o17.717.10.696.7%44.1%
Claude-3 Haiku8.77.80.990.1%42.0%
Claude-3.5 Sonnet16.214.81.391.9%43.4%
Gemini 1.5 Flash12.911.81.191.3%42.3%
Gemini 1.5 Pro10.08.61.485.7%41.8%
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.101, + 0.825, + 0.199 + ], + "angle": 0, + "content": "Table 20: Performance and unparseable rates for few-shot direct answer, few-shot CoT, Plan + Direct Solver, Plan + CoT Solver, and Plan + Tool Solver Solver. \"Acc.\" stands for accuracy and \"% Unp.\" stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except Plan + Tool Solver prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect." + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.21, + 0.819, + 0.499 + ], + "angle": 0, + "content": "
DatasetMethodMistral 7bLlama 3.1 8bLlama 3.1 70bGPT-4o Mini
Acc.% Unp.Acc.% Unp.Acc.% Unp.Acc.% Unp.
GSM8KDirect Answer12.50.120.10.539.10.032.80.0
GSM8KCoT56.21.486.41.096.10.194.20.1
GSM8KPlan + CoT Solver45.01.078.70.494.70.092.00.1
GSM8KPlan + Direct Solver10.60.119.60.142.20.039.30.0
GSM8KPlan + Tool Solver59.88.680.31.394.40.490.51.5
GSM8K-HardDirect Answer2.90.74.40.612.80.712.37.6
GSM8K-HardCoT20.35.032.49.647.84.452.20.5
GSM8K-HardPlan + CoT Solver18.72.632.41.349.70.651.50.3
GSM8K-HardPlan + Direct Solver3.00.55.50.815.80.117.40.3
GSM8K-HardPlan + Tool Solver44.28.957.91.268.00.570.41.4
ContextHub Deductive L1Direct Answer59.22.823.00.050.00.044.30.0
ContextHub Deductive L1CoT46.20.273.00.267.50.059.20.0
ContextHub Deductive L1Plan + CoT Solver49.50.064.80.065.50.063.20.0
ContextHub Deductive L1Plan + Direct Solver45.83.055.80.053.50.056.20.0
ContextHub Deductive L1Plan + Tool Solver68.827.884.211.891.79.890.77.8
ContextHub Abductive L1Direct Answer21.72.836.10.058.90.059.20.0
ContextHub Abductive L1CoT23.90.040.00.062.20.076.90.0
ContextHub Abductive L1Plan + CoT Solver38.30.042.50.065.60.074.20.0
ContextHub Abductive L1Plan + Direct Solver46.93.933.30.363.10.061.70.0
ContextHub Abductive L1Plan + Tool Solver59.235.870.89.773.94.274.710.3
FOLIODirect Answer56.212.359.60.069.50.064.00.0
FOLIOCoT53.71.556.72.572.42.070.40.0
FOLIOPlan + CoT Solver53.70.055.70.073.90.570.40.0
FOLIOPlan + Direct Solver52.70.054.20.072.90.063.50.0
FOLIOPlan + Tool Solver48.846.854.228.670.016.762.625.1
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.825, + 0.583 + ], + "angle": 0, + "content": "formal specifications that fail to follow the format of the formal language (Python or z3) and that lead to execution errors. Such an issue is particularly severe for the smaller models. However, we note that despite the high unparseable rate, the overall accuracy of these models with tool augmentation is still on par with or outperforms other methods." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.601, + 0.451, + 0.617 + ], + "angle": 0, + "content": "I DISCUSSION OF LIMITATIONS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.632, + 0.406, + 0.646 + ], + "angle": 0, + "content": "I.1 LONG HORIZON PLANNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.659, + 0.825, + 0.827 + ], + "angle": 0, + "content": "One set of tasks where symbolic reasoning helps substantially that our experiments haven't covered as thoroughly (with the exception of BiGGen-Bench) is long-horizon planning (Valmeekam et al., 2023; Xie et al., 2024; Gundawar et al., 2024; Valmeekam et al., 2024). There are two reasons we don't treat it here. First, we are primarily interested in tasks that are conveyed in language, and we see less complex planning in language-only tasks. Second, there has already been a large debate on the effectiveness of CoT, both pro (Huang et al., 2022; Hu et al., 2023) and against (Valmeekam et al., 2023; Kambhampati, 2024; Kambhampati et al., 2024b; Stechly et al., 2024a; Guan et al., 2024; Verma et al., 2024; Gundawar et al., 2024; Stechly et al., 2024b) using CoT and its derivatives like tree-of-thought (Yao et al., 2023; Kang et al., 2024), that has resulted in complex systems to help solve planning problems better. While story generation and interpretation involve elements of planning with natural language (Peng et al., 2022; Karpinska et al., 2024), such tasks are not conventionally formalized and benchmarked as planning and reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.843, + 0.4, + 0.857 + ], + "angle": 0, + "content": "I.2 DATASET CONTAMINATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "One limitation of our study is the presence of possible data contamination: it is unknown which benchmarks may have been explicitly pre-trained on by language models. If a model had memorized answers to benchmark questions, we would expect direct answering to close some of the gap with CoT, as the model can just reproduce a known answer rather than deriving it from scratch. We argue" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.104, + 0.825, + 0.201 + ], + "angle": 0, + "content": "there are four reasons that our general conclusions are still trustworthy. First, we use a range of language model scales, including small models that have less capacity to memorize. Second, datasets with poor direct answering performance like GSM8K-Hard are unlikely to have been substantially memorized. Third, the inclusion of recent datasets such as MuSR (Sprague et al., 2024) and BiGGen Bench (Kim et al., 2024) helps to defray this risk. Fourth, our survey of the literature includes papers that were submitted to conferences in 2023, representing a range of older LLMs trained at various times." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.373, + 0.119 + ], + "angle": 0, + "content": "J EXAMPLE PROMPTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.251, + 0.827, + 0.281 + ], + "angle": 0, + "content": "We will release all prompts and model responses on our Huggingface repo. We list a few prompt response pairs here in this section." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.463, + 0.604, + 0.479 + ], + "angle": 0, + "content": "AGIEval LSAT AR zero-shot CoT prompt for Llama 3.1 70B" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.492, + 0.53, + 0.508 + ], + "angle": 0, + "content": "<|start_header_id|>user<|end_header_id|>" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.52, + 0.789, + 0.576 + ], + "angle": 0, + "content": "Explain your reasoning step-by-step for each question before answering. Give your final answer in the format \\The answer is therefore \\(\\)\". Failure to comply with the answer formatting will result in no credit." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.576, + 0.791, + 0.882 + ], + "angle": 0, + "content": "Of the eight students\\u2014George, Helen, Irving, Kyle, Lenore, Nina, Olivia, and Robert\\u2014in a seminar, exactly six will give individual oral reports during three consecutive days\\u2014Monday, Tuesday, and Wednesday. Exactly two reports will be given each day\\u2014one in the morning and one in the afternoon\\u2014according to the following conditions: Tuesday is the only day on which George can give a report. Neither Olivia nor Robert can give an afternoon report. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday.Q: Which one of the following could be the schedule of the students' reports? Answer Choices: (A)Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle (B)Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore (C)Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving (D)Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore (E)Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George Let's think step by step.<|eot_id|><|start_header_id|>assistant<|end_header_id|>" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.106, + 0.614, + 0.121 + ], + "angle": 0, + "content": "AGIEval LSAT AR zero-shot CoT response for Llama 3.1 70B" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.135, + 0.8, + 0.702 + ], + "angle": 0, + "content": "To determine which one of the following could be the schedule of the students' reports, let's analyze each answer choice in light of the given conditions. First, let's recall the conditions: 1. Exactly six students will give reports over three days. 2. Tuesday is the only day George can give a report. 3. Neither Olivia nor Robert can give an afternoon report. 4. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Let's evaluate each answer choice: (A) Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle - This choice does not violate any conditions directly. However, we need to check if it aligns with all the rules, especially considering the Nina rule, which isn't directly applicable here since Nina isn't scheduled. George's requirement is also not met since George isn't scheduled on Tuesday. (B) Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Since Nina's report is on Wednesday, this condition is technically not violated, but we need to ensure all other conditions are met. George is not scheduled on Tuesday, which is a requirement. (C) Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving - This choice meets the condition that George gives a report on Tuesday. It also does not violate any other conditions directly. However, we need to ensure that the Nina rule isn't applicable or is met, which in this case, it's not applicable since Nina isn't scheduled. (D) Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports. Here, Helen is already scheduled on the same day as Nina, and while Irving is scheduled the next day, the condition specifies both Helen and Irving must give reports the next day, which is not the case since Helen's report is on the same day as Nina's. (E) Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George - This choice violates the condition that Tuesday is the only day George can give a report, as George is scheduled on Wednesday. Given the analysis, the only choice that does not directly violate any of the given conditions and meets the specific requirements (especially considering the placement of George and the handling of the Nina rule where applicable) is: The answer is therefore (C)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "code_caption", + "bbox": [ + 0.199, + 0.105, + 0.524, + 0.121 + ], + "angle": 0, + "content": "MATH zero-shot CoT prompt for Llama 3.1 70B" + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.133, + 0.8, + 0.525 + ], + "angle": 0, + "content": "<|begin_of_text>|<|start_header_id>|>user<|end_header_id>|> \nSolve the following math problem efficiently and clearly: \n- For simple problems (2 steps or fewer):\\nProvide a concise solution with minimal explanation. \n- For complex problems (3 steps or more): \nUse this step-by-step format:\\n## Step 1: [Concise description]\\n[Brief explanation and calculations] \n# Step 2: [Concise description] \n[Brief explanation and calculations] \n... \nRegardless of the approach, always conclude with: \nTherefore, the final answer is: \\(\\mathbb{S}\\backslash\\) boxed{answer}$. I hope it is correct. \nWhere [answer] is just the final number or expression that solves the problem. \nProblem: Chris labels every lattice point in the coordinate plane with the square of the distance from the point to the origin (a lattice point is a point such that both of its coordinates are integers). How many times does he label a point with the number $25$?<|eot_id>|><|start_header_id|>assistant" + }, + { + "type": "code_caption", + "bbox": [ + 0.199, + 0.657, + 0.533, + 0.672 + ], + "angle": 0, + "content": "MATH zero-shot CoT response for Llama 3.1 70B" + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.686, + 0.8, + 0.91 + ], + "angle": 0, + "content": "## Step 1: Understand the problem Chris labels each lattice point in the coordinate plane with the square of the distance from the point to the origin. We need to find how many times he labels a point with the number 25. ## Step 2: Determine the equation for the distance The distance from a point (x, y) to the origin (0, 0) is given by the equation \\(\\mathbb{S}\\backslash \\mathrm{sqrt}\\{\\mathbf{x}^{\\wedge}2 +\\) \\(y^{\\wedge}2\\}\\). Since Chris labels the points with the square of the distance, we are looking for points where \\(\\$ x^{\\wedge}2 + y^{\\wedge}2 = 25\\). ## Step 3: Find the integer solutions to the equation We need to find all integer values of x and y that satisfy the equation \\(\\$ x^{\\wedge}2 + y^{\\wedge}2 = 25\\). The possible values of \\(\\$ x^{\\wedge}2\\) and \\(\\$ y^{\\wedge}2\\) that add up to 25 are: (0, 25), (25, 0), (1, 24), (24, 1), (4, 21), (21, 4), (9, 16), (16, 9). ## Step 4: Count the number of solutions However, we need to consider that some of these pairs have negative counterparts, and we should also count those. For example, (-0, 5) and (0, -5) are also solutions, as well as (-5, 0) and (5, 0). Similarly, (-3, 4), (3, -4), (-4, 3), and (4, -3) are also solutions. So, we have a total of 12 solutions. The final answer is: \\)\\backslash$ boxed{12}." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.105, + 0.654, + 0.121 + ], + "angle": 0, + "content": "CommonsenseQA zero-shot direct answer prompt for Llama 3.1 70B" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.134, + 0.546, + 0.15 + ], + "angle": 0, + "content": "<|start_header_id|>system<|end_header_id|>" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.162, + 0.79, + 0.232 + ], + "angle": 0, + "content": "You are a helpful AI assistant that will answer reasoning questions. You will always say at the end \\\"Answer: \\\". You must only pick one answer and you must end your response with \\\"Answer: \\\" every time!<|eot_id>|<|start_header_id|>user<|end_header_id|>" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.244, + 0.773, + 0.273 + ], + "angle": 0, + "content": "Question: Where would you find magazines along side many other printed works?" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.287, + 0.357, + 0.371 + ], + "angle": 0, + "content": "Answer Choices: \n(A) doctor \n(B) bookstore \n(C) market \n(D) train station \n(E) mortuary" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.383, + 0.797, + 0.44 + ], + "angle": 0, + "content": "Only write the answer. Write the answer in the following format: \\(\"Answer:\"Your answer>\\)\" . You must always give an answer. You may only pick one answer choice, if you think multiple are correct only pick the one you think is best.<|eot_id|><|start_header_id|>assistant<|end_header_id|>" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.489, + 0.663, + 0.506 + ], + "angle": 0, + "content": "CommonsenseQA zero-shot direct answer response for Llama 3.1 70B" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.52, + 0.211, + 0.53 + ], + "angle": 0, + "content": "B" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "45" + } + ] +] \ No newline at end of file diff --git a/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_origin.pdf b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..549787582372372d58dcb767ce3ed7a31cf506ee --- /dev/null +++ b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/78080855-33d6-4037-9b8c-edc307a2e575_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ab9c6f9845d42f37cc96961bce07a0ce78e3c381f00c9324516b0107c9c9793 +size 6536138 diff --git a/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/full.md b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2b5f57b18ec9600083d8ff68e35dfaae4fbe7384 --- /dev/null +++ b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/full.md @@ -0,0 +1,549 @@ +# TO COT OR NOT TO COT? CHAIN-OF-THOUGHT HELPS MAINLY ON MATH AND SYMBOLIC REASONING + +Zayne Sprague\*, Fangcong Yin\*, Juan Diego Rodriguez\*, Dongwei Jiang\*, Manya Wadhwa\*, Prasann Singhal\*, Xinyu Zhao\*, Xi Ye $^{\text{心}}$ , Kyle Mahowald\*, Greg Durrett\* + +$\spadesuit$ The University of Texas at Austin, $\diamond$ Johns Hopkins University, $\diamond$ Princeton University zaynesprague@utexas.edu + +# ABSTRACT + +Chain-of-thought (CoT) via prompting is the de facto method for eliciting reasoning capabilities from large language models (LLMs). But for what kinds of tasks is this extra "thinking" really helpful? To analyze this, we conducted a quantitative meta-analysis covering over 100 papers using CoT and ran our own evaluations of 20 datasets across 14 models. Our results show that CoT gives strong performance benefits primarily on tasks involving math or logic, with much smaller gains on other types of tasks. On MMLU, directly generating the answer without CoT leads to almost identical accuracy as CoT unless the question or model's response contains an equals sign, indicating symbolic operations and reasoning. Following this finding, we analyze the behavior of CoT on these problems by separating planning and execution and comparing against tool-augmented LLMs. Much of CoT's gain comes from improving symbolic execution, but it underperforms relative to using a symbolic solver. Our results indicate that CoT can be applied selectively, maintaining performance while saving inference costs. Furthermore, they suggest a need to move beyond prompt-based CoT to new paradigms that better leverage intermediate computation across the whole range of LLM applications1. + +![](images/38c07fa5ea4155601f6a3a8985cb6d7c7786e3a335ede3c9f4558f388673790c.jpg) +Figure 1: Left: meta-analysis of CoT literature; each point is a reported delta of CoT over direct answering for some (LLM, task) pair. Right: average performance of using zero-shot CoT v.s. direct answer prompts across five general reasoning categories, covering 20 datasets with 14 LLMs evaluated on each. In both sets of results, math and other kinds of symbolic reasoning are the domains that consistently see substantial improvements from CoT (red dotted line indicates the mean improvement from CoT across experiments). + +# 1 INTRODUCTION + +Chain-of-thought (CoT) (Nye et al., 2022; Wei et al., 2022) has become a widely used prompting technique for eliciting reasoning from language models. CoT can provide human-readable explanations of how problems are solved (Joshi et al., 2023; Lanham et al., 2023), but most frequently it is invoked to improve an LLM's ability to answer complex questions via intermediate computation (Madaan & Yazdanbakhsh, 2022; Wang et al., 2023a; Dziri et al., 2023). Current post-training schemes for LLMs heavily infuse CoT capabilities into models: systems like ChatGPT or Llama 3.1 default to CoT when given reasoning problems (OpenAI, 2023; Dubey et al., 2024). + +CoT has seen widespread usage, but it is most heavily explored in the domain of mathematical reasoning (Zhou et al., 2023a; Fu et al., 2023; Chae et al., 2024; Xu et al., 2024b; Qi et al., 2024). In fact, many "reasoning" methods for LLMs are evaluated only in the math domain; for instance, Lightman et al. (2024) frame their paper as "complex multi-step reasoning" and Mixtral-Large2's release cited effort "enhancing the model's reasoning capabilities", but performance is only reported on GSM8K and MATH. CoT is reported to be effective across a wide range of studies, but many of these studies focus on a narrow slice of the task space. In areas beyond math, results show that CoT is not as useful (Kambhampati et al., 2024a) or can even hurt performance (Wang et al., 2024). + +In this work, we aim to evaluate where prompt-based CoT helps and why. We begin with a systematic meta-analysis of recent literature that reports performance of CoT versus direct answering (DA). We then augment this picture by conducting experiments on 20 datasets and 14 contemporary LLMs across zero-shot and few-shot prompt settings. Finding 1: CoT only helps substantially on problems requiring mathematical, logical, or algorithmic reasoning. Figure 1 shows this holds both across the literature and our own experiments. We find only a few cases of large gain in other kinds of tasks, and many of these outliers feature some component of symbolic reasoning. For instance, on MMLU (Hendrycks et al., 2021a) and MMLU Pro (Wang et al., 2024), we analyze the improvements from CoT and find that CoT only gives benefit on math slices of the dataset. As much as $95\%$ of the total performance gain from CoT on MMLU is attributed to questions containing “ $=$ ” in the question or generated output. For non-math questions, we find no features to indicate when CoT will help. + +How can we better understand why CoT improves on these questions and only these questions? The math and formal logical reasoning datasets we consider can be broken down into two stages of processing: a planning step (e.g., parsing a problem into equations) and an execution step (building intermediate outputs and working towards a solution) (Ye et al., 2023; Wang et al., 2023b; Sun et al., 2024). Finding 2: CoT primarily helps with the execution step that performs computation and symbolic manipulation, but falls short of what LLMs with tool augmentation can do. We find that LMs prompted with CoT can generate executable formal solution plans and execute those plans better than direct answering. But using LMs to generate a solution plan and then using an external symbolic solver to solve the plan outperforms using CoT for both steps for these tasks. + +These results paint a picture that CoT's utility is often circumscribed by tool augmentation: on problems where CoT helps, we already have more powerful tools than CoT that we can employ, and on "soft reasoning" problems like commonsense where no tools exist, we see limited benefit from CoT. This characterization has two major implications. First, CoT is unnecessary for many problems where it is widely employed: there exist more efficient prompting strategies that yield similar performance for much lower inference cost. Second, we see a critical need to move beyond prompt-based CoT to more sophisticated approaches based on search, interacting agents, or models more heavily fine-tuned for CoT. Future work can explore how intermediate computation can be better used to solve challenging problems outside of the math and symbolic reasoning domains. + +# 2 BACKGROUND: CHAIN-OF-THOUGHT + +The tasks we consider in this work consist of a question $\mathbf{q} \in \Sigma^{*}$ for a vocabulary $\Sigma$ and an answer $a \in \mathcal{L}(\mathbf{q})$ for a label set $\mathcal{L}(\mathbf{q})$ . $\mathcal{L}(\mathbf{q})$ can consist of a data type like boolean or integer, classification labels, or problem-dependent labels like names of entities from $\mathbf{q}$ . One exception that we still + +explore is BiGGen Bench (Kim et al., 2024), which instead relies on an LLM-as-a-judge (Dubois et al., 2023; Zheng et al., 2024b) to provide a label for generated long-form responses. + +Prompting and chain-of-thought for reasoning A large language model places distributions over strings $p(\mathbf{y}) = \prod_{i=1}^{n} p_{\mathrm{LM}}(y_i)$ where $\mathbf{y} \in \Sigma^*$ . In practice, we can interpret these as conditional distributions $p(\mathbf{y} \mid \mathbf{x})$ where $\mathbf{x}$ is a user's prompt. Typical invocation of an LLM involves forming a prompt $\mathcal{I}(\mathbf{q})$ that wraps the question with additional instruction, then drawing a sample response $\tilde{\mathbf{y}} \sim p(\mathbf{y} \mid \mathcal{I}(\mathbf{q}))$ , and finally returning $a = \text{extract}(\tilde{\mathbf{y}})$ using some kind of answer extractor. + +For the tasks we consider in this work, the output $\tilde{\mathbf{y}}$ can take one of two forms. A direct answer only contains a string realization of $a$ ; e.g., $\mathbf{y} = (-185,4)$ which is tokenized as the answer $a = 1854$ . A chain of thought is a longer sequence $\mathbf{y}$ including other tokens beyond the answer, e.g., $\mathbf{y} = (-185,6,-\mathrm{minus},-2,-\mathrm{equals},-185,4)$ . In both cases, the extract function must parse and detokenize the output; in CoT, there is some extra work to spot where the answer is placed. + +Our prompts can explicitly encourage use of direct answer or chain of thought as strategies, which we denote as $\mathcal{I}_{\mathrm{da}}$ and $\mathcal{I}_{\mathrm{cot}}$ . For eliciting CoT, this includes strategies like telling a model to "think step by step" (Kojima et al., 2022). For directly answering a question, a prompt may say "immediately generate the answer". We track the average location of the answer in the generated output for both CoT and direct prompts in Appendix F.3 to ensure that direct answer prompts give the answer early in the output. We also ensure that extract can parse answers from the generated output for each model, prompt, and dataset combination used in our experiments, tailoring the extract function as needed to ensure low unparseable rates for each model and task. All prompts and outputs per dataset per model have been uploaded to Huggingface and we include examples of some of our prompts in the Appendix J. We also experiment with few-shot CoT prompts, which we find perform similarly to zero-shot prompts; details about these are given in Appendix E. + +Symbolic reasoning Of key importance to this work is whether problems feature symbolic reasoning or not. We consider a problem to be symbolic if it can be grounded in a natural, well agreed-upon formal system. “ $12 \times 4$ ” is an example of a symbolic problem, which can be grounded in mathematics. Other systems include first-order logic (Saparov & He, 2023; Hua et al., 2024) or planning languages (Liu et al., 2023a; Valmeekam et al., 2023). Formally, for symbolic problems, we define a function $f$ that acts as a map that produces some symbolic expression $S = f(\mathbf{q})$ from the question. $S$ can be used as input for a solver to derive an answer, $\hat{a} = \operatorname{solve}(S)$ . + +Conversely, a problem like where on a river can you hold a cup upright to catch water on a sunny day? from CommonsenseQA (Talmor et al., 2019) is non-symbolic by our definition. While this problem could be formalized with some kind of predicate logic (Zhou et al., 2022; Quan et al., 2024; Zhou et al., 2024) or grounded in some kind of physical simulation (Hao et al., 2023; Wong et al., 2023), there is not a natural nor well agreed-upon framework for solving it. + +We view non-symbolic to symbolic reasoning as a spectrum. MuSR (Sprague et al., 2024) is a "semisymbolic" dataset in that it does contain an underlying formal system (e.g., for its murder mysteries portion, the notion that $\mathrm{motive}(X)\wedge \mathrm{means}(X)\wedge \mathrm{opportunity}(X)\Rightarrow \mathrm{murderer}(X))$ , but also involves substantial commonsense reasoning that does not map onto a formal system. In these cases, we can still form $S = f(\mathbf{q})$ , but $f$ must rely heavily on a language model and instantiate new information for $S$ that is not directly represented in $\mathbf{q}$ . + +Central claim Figure 1 shows that there are a large number of positive results on CoT reported in the literature. Informally, we believe many readers of the literature to hold the following view: $\mathcal{I}_{\mathrm{cot}}$ will outperform $\mathcal{I}_{\mathrm{da}}$ on nearly all reasoning problems, whether those problems involve symbolic or non-symbolic reasoning. Our evidence does not support this conjecture. We will show that this performance boost is strongest for symbolic and semi-symbolic tasks, while giving little to no improvement (or even hurting performance) on non-symbolic tasks. + +Table 1: A few categories for experimental comparisons. Full list in Appendix B. + +
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
......
+ +# 3 RESULTS FROM THE LITERATURE + +Criteria and Process We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). This resulted in 4,642 papers total that filtered using automatic and manual methods to papers including experiments comparing chain-of-thought, $\mathcal{I}_{\mathrm{cot}}$ , against direct answering prompts, $\mathcal{I}_{\mathrm{direct}}$ . A total of 110 papers were found that matched our criteria with 1,218 experimental comparisons. We then grouped the comparisons by the types of tasks and datasets being evaluated. More details on our automatic and manual filtering, as well as our categorization, can be found in Appendix A and B. + +Results Figure 2 shows the distribution of CoT deltas (CoT prompt minus the direct answer prompt performance) across our categorization of different task types found in the literature. Compared to Figure 1, we take the mean results per paper per category, indicated by blue dots, showing the trend across papers in the literature. The categories are ranked in order of ascending median CoT delta. The three categories which benefited the most from CoT are symbolic reasoning, math, and logical reasoning, with average improvements of 14.2, 12.3, 6.9, respectively. Average performance on these top three tasks with CoT was 56.9, whereas performance without CoT was 45.5. For other categories, the average performance with CoT was 56.8, compared to 56.1 without CoT. We do not consider this small improvement a victory for CoT. CoT involves more computation than direct answering, and a truly fair comparison between the methods should match the compute of the two methods, e.g., assembling across multiple prompts. + +Do any non-math datasets benefit from CoT? On the right side of Figure 2, we show the top 10 outliers from our observed trend, namely papers with high CoT deltas averaged across experiments in tasks other than math, symbolic, or logical reasoning. Although not categorized as math or logic, several of these are related to logical, mathematical or symbolic reasoning in some way. From this list, the dataset which benefits the most most from CoT is BIG-bench Hard (BBH) (Suzgun et al., 2023), a benchmark consisting largely of problems requiring algorithmic, arithmetic or logical reasoning. For instance, BIG-bench Navigate is a spatial reasoning task, but relies heavily on a mathematical primitive of counting steps taken to derive a final conclusion. Similarly, while BIG-bench Temporal is a temporal reasoning task (answering questions about when certain events could have occurred), it requires deductive reasoning to solve. In addition, Legal Argument Reasoning (SemEval-2024 Task 5) (Bongard et al., 2022) was categorized as context-aware QA, but also requires substantial reasoning ability. Finally, MMLU-Moral Scenarios (Hendrycks et al., 2021a) requires answering two independent questions at once, which essentially involves a symbolic combination of two simpler questions. + +![](images/190393a2874564aba0ecc379cb8f7318fa75276598feeed1d23850a9185d0ff8.jpg) +CoT Performance Improvement Across Tasks Aggregated by Paper and Category +Figure 2: Results from our meta-analysis (grey dots) aggregated by paper and category (blue dots). + +There are a few outliers that less clearly follow the trend. ScienceQA (Lu et al., 2022) consists of multiple choice questions across a range of natural and social science disciplines, though it is hard to interpret gains without knowing breaking down performance by subject or question type. The dialogue evaluation dataset from Jia et al. (2024) sees large improvements with CoT, but this is a proprietary dataset, and we note that other essay scoring results in our meta-analysis (Li et al., 2024; Stahl et al., 2024) did not show improvements with CoT. Other non-math, symbolic or logical datasets that benefit from CoT are Commitment Bank (de Marneffe et al., 2019) and the task of eliciting verbalized confidence (Xiong et al., 2024). Nevertheless, these are exceptions to the rule. The majority of the reported benefits from using CoT in the NLP and ML literature comes from math or math-related tasks. + +# 4 RESULTS FROM EXPERIMENTS + +# 4.1 EXPERIMENTAL SETUP + +Dataset, Models, Prompts All datasets, models, and prompts we evaluate over can be found in detail in the tables 3, 4, and 5 of Appendix C. We restricted our experiments to English models commonly used and benchmarked on general reasoning datasets. Our datasets include those which are widely used in CoT and reasoning literature, including a mix of non-symbolic, semisymbolic, and symbolic reasoning. They span different formats, including multiple-choice, short-answer, and free-response; however, most of these datasets are multiple choice or short answer, as CoT is not typically used in long-form response settings. We also categorize each dataset into a larger category of reasoning required to solve it: Commonsense, Knowledge, Symbolic, Mathematical, and Soft Reasoning. We define Soft Reasoning as questions relying on commonsense and natural language but going beyond simple inferences about these statements. Finally, we explore several prompting strategies for eliciting reasoning from language models, as past work has emphasized the importance of the prompt (Yang et al., 2024). However, we generally found slight performance differences; see Appendix D for details. We therefore focus on prompts similar to Kojima et al. (2022) and Wei et al. + +![](images/e1e540c0c8c5b46aa7692952347b23ada9c5a504bb629430beda49a0a2daf1ba.jpg) +Figure 3: Left: Performance gain from using CoT for each reasoning category. Right: Performance gain from using CoT for each dataset, averaged across models and broken out across 5 representative models. Red lines indicate median improvement. In both plots we see a consistent trend: most improvements from using CoT are from math and symbolic reasoning. + +(2022) for zero-shot and few-shot settings, respectively, with alterations to improve the model's ability to produce desired behavior (i.e., formats that allow for easily parsed answers). We upload all our prompts and outputs for each model for each prompting strategy on Huggingface.4. + +Implementation Details We use a high-throughput inference package, vLLM (Kwon et al., 2023), for the model inference process. We use greedy decoding on all models. Our prompts are taken from the Llama 3.1 evaluations when available (Dubey et al., 2024), and minor adjustments are made to unify prompting strategies. For other datasets, we either use the standard prompt for the dataset from the corresponding original paper or implement our own prompt. Our answer parser (extract) is tailored to each dataset and model. Specific details about each dataset, its prompts, and answer extractor can be found in Appendix C. + +# 4.2 RESULTS + +Where does zero-shot CoT improve over direct prompts? On datasets that require math (MATH, GSM8K) or formal logic (ContextHub, MuSR to a lesser degree) to answer the problem. + +Figure 3 on the left shows the average CoT performance improvement for each reasoning category from Figure 1 (right); raw numbers can be found in Table 6 of the Appendix. On the right, Figure 3 shows the performance gain from using CoT for each dataset, averaged across all models and for a selection of individual models. On non-symbolic reasoning categories and datasets, specifically those that contain questions primarily involving commonsense (CSQA, PIQA, SiQA), language understanding (WinoGrande), and reading comprehension (AGI LSAT, ARC-Easy, ARC-Challenge), there is little to no separation between the performance of zero-shot CoT and zero-shot direct answer. Despite these datasets involving reasoning, CoT does not yield improvement. + +By contrast, the mathematical and symbolic categories get larger boosts in improvements alongside symbolic and many semi-symbolic datasets. MATH and GSM8K show gains as large as $41.6\%$ and $66.9\%$ , respectively. The semi-symbolic datasets like ContextHub and MuSR Murder Mysteries show moderate gains. These datasets require the application of logical rules to reach the answer, e.g., first-order logic parsed from simple natural language (ContextHub) or more complex commonsense statements (MuSR Murder Mysteries). All results are shown in the Appendix F.1 as well as a full list of numeric results for both CoT and direct answer prompting in Table 7. We also explored the few-shot setting and found it had little impact on when CoT will help; see Appendix E. + +Does the answer format impact where CoT will help? Not much. Free response capabilities required for BigGen Bench may not benefit from pre-planning. + +Many of the commonly-used datasets for problems other than math are multiple choice. We highlight here that CoT has similar performance to direct answer across models for two datasets that are not multiple-choice and contain varying levels of non-symbolic reasoning. First, MuSiQue (Trivedi et al., 2022) is a short-form QA task requiring multi-hop reasoning. We consider this a semi-symbolic dataset as the questions have an explicit multi-hop structure. Because answer spans in MuSiQue can be paraphrased in many different ways, we use GPT-4o to judge if two answer spans are equivalent. Despite being semi-symbolic, we see no overall improvement from CoT. + +Second, BiGGen Bench (Kim et al., 2024) uses free-form responses as the answer to a question, and an LLM-as-a-judge is used to evaluate these responses on a scale of 1 to 5. Because free-form responses blur the lines between CoT and direct answering, we create a new prompt that asks the language model to plan the free response before giving it. We then only pass the free response to the judge (GPT-4o-mini in our case) with the prompt from Kim et al. (2024). We also filter out any questions that explicitly state "Think step-by-step". We plot the performance of BiGGen Bench as the number of times a response receives a score of 4 or better. Despite including many reasoning questions (including several categories of math) and other categories, such as planning, we only see a mild improvement here. Because previous experiments show CoT helping on similar types of questions in the QA format, the lack of similar improvements here could imply that pre-planning is insufficient for unlocking reasoning capabilities in the LLM. Future work is needed to prove this. + +Are the gains in Knowledge, Soft Reasoning, and Commonsense significant? Mostly no, except for MMLU, StrategyQA, and MuSR. + +We tested the significance of the improvements from CoT on the 13 datasets in the Knowledge, Soft Reasoning, and Commonsense reasoning categories using paired bootstrapping to assess whether CoT gives a significant improvement. To account for multiple comparisons, we applied a Bonferroni correction, setting the p-value to 0.00027 to account for the 14 models and 13 datasets. About $32\%$ (59) of the datasets that show a benefit in these three reasoning categories were considered significant. Nearly half of these comparisons (26) are on MMLU and MMLU Pro. On these datasets, we find that CoT is mainly helping on math-related questions. StrategyQA and MuSR also received a consistent performance boost across 10 and 6 models respectively. StrategyQA is often used to benchmark reasoning methods and is built specifically to get a benefit from methods that decompose the question into steps, so a gain in performance is not unprecedented. MuSR, similarly, was built to have multiple steps of complex natural language reasoning, which may receive benefits from CoT. The remaining datasets that receive significant benefits are spread across the datasets and models. + +Why do MMLU and MMLU Pro get a boost? MMLU and MMLU Pro contain many different questions requiring different types of reasoning. We separated MMLU and MMLU Pro questions into two bins, those related to math and those not related to math, by checking if the questions text or generated response from the LLM includes an “=”. Figure 4 shows that a majority of the performance gain seen from MMLU and MMLU Pro is from the math slices of each dataset. See more details in Appendix G. + +# 5 STRENGTHS AND WEAKNESSES OF COT AT FORMAL REASONING + +Previous sections establish that CoT primarily helps with symbolic reasoning tasks, but not why. Many symbolic and semi-symbolic tasks be broken down into two stages (Ye et al., 2023; Pan et al., 2023; Jiang et al., 2024): planning, either via a formal or informal specification via prompting (Sun et al., 2024; Wang et al., 2023b), and execution, using the same LM or external solvers. In this section, we attribute the performance gains from CoT on symbolic tasks to these two stages. + +Given a question that requires symbolic reasoning, we define the planning stage as extracting all variables from the context into a formal specification and defining their relations. The execution stage uses a solver that takes as input a plan and can be run in an orderly fashion to derive the final answer. Using our notation from Section 2, let $f(\mathbf{q}) = \mathcal{I}_{\mathrm{planning}}^{m}(\mathbf{q})$ be a mapping of the question $\mathbf{q}$ to a symbolic plan $S_{\mathrm{plan}}$ that can be executed by the language model or by an external symbolic solver, $\hat{a} = \mathrm{solve}(S_{\mathrm{plan}})$ , where $\hat{a}$ is the final answer for $\mathbf{q}$ . + +![](images/3a5204f7a54b1eee1699edc2a7d6768cf0cb8700b42d38a92db5a2dc23c86aea.jpg) +Improvement of CoT over direct on = vs. no = + +Q: Courtney said that there were 48 people, but Kelly said that Courtney had overstated the number by $20\%$ . If Kelly was right, how many people were there? + +![](images/a174b5c5f50875f9f7bc69fcc6118cec554fc9fed20fb542bddadf8601a330f4.jpg) +Figure 4: CoT deltas between MMLU and MMLU Pro performance when a question or generated response contains an “=” (With =) or not (Without =). We filter out any questions that do not result in a final answer (degeneration, etc.). CoT primarily helps on the pairs of questions and generations that contain an “=”, which indicates math-related questions. +Figure 5: Prompt variants that separate planning and execution for GSM8K. For all prompt variants besides direct answer and CoT (not shown), we few-shot prompt an LLM to first generate a Python program as a solution plan. For Plan + Direct Solver, the LLM is prompted to directly give an answer from the plan; for Plan + CoT Solver, the LLM is prompted to solve the plan step-by-step with CoT and give an answer; for Plan + Tool Solver, we feed the plan into a Python interpreter. + +By separating planning and execution in this way, we can test how much a language model can gain from only having a plan, to having a plan and solving it with CoT, or to having a plan and then solving it with an external symbolic solver. Given a plan $S_{\mathrm{plan}} \sim \mathcal{I}_{\mathrm{planning}}^{m}(\mathbf{q})$ , we compare the performance of the settings below to evaluate at which stage LM is most effective and falls short. + +# 5.1 SETTINGS EVALUATED + +Settings 1 and 2: Few-shot direct answer and CoT: We use the few-shot direct answer and CoT prompts from Section 4.1 as baselines. Figure 5 includes an example of each setting on GSM8K. + +Settings 3 and 4: Plan + Direct Solver and Plan + CoT Solver: Here we use inspiration from Xu et al. (2024a) and generate a symbolic plan using the same strategy as Ye et al. (2023). Specifically, we use a few-shot prompt $\mathcal{I}_{\mathrm{planning}}^m$ to generate a formal specification $S_{\mathrm{plan}}$ that should be executable by a symbolic solver. In the same prompt LMs are asked to solve their generated specification $S_{\mathrm{plan}}$ and derive the final answer $\tilde{\mathbf{y}} \sim p(\mathbf{y} \mid \mathcal{I}_{\mathrm{da}}(S_{\mathrm{plan}}))$ , either directly giving the answer after generating the specification ( $Plan + Direct Solver$ ) or providing step-by-step explanations and tracking of intermediate steps for the derivation ( $Plan + CoT Solver$ ). Particularly, $S_{\mathrm{plan}}$ is a Python program for math datasets, and is a set of first-order logic specifications for logical reasoning datasets. + +![](images/e6aa2aaa042764a5eb4e3bba9942d2c9afc160099caf5058485deba84fcaa205.jpg) +Figure 6: Performance of prompt variants that separate planning and execution for math and logical reasoning datasets. Despite outperforming direct answer for solving a formal plan and deriving the final answer, CoT is still limited in performing symbolic computations: there is a large performance boost from Plan + Tool Solver over CoT and Plan + CoT Solver on average across all models. + +Setting 5: Plan + Tool Solver: We then evaluate how effective CoT can be at performing symbolic computations compared with external symbolic solvers. Following prior work on augmenting LMs with tools for math and logic questions (Ye et al., 2023; Pan et al., 2023; Gao et al., 2023; Chen et al., 2023), we generate $S_{\mathrm{plan}}$ the same way as in CoT Solver, but now feed in the plan into a symbolic solver (Python interpreter or a SMT Solver), such that $\hat{a} = \operatorname{solve}(S_{\mathrm{plan}})$ . + +Evaluation Setup: We compare the performance of each setting on math (GSM8K) and logical reasoning (ContextHub and FOLIO) datasets. We follow Gao et al. (2023) to include GSM8K-Hard, a minimally modified version that replaces numbers of GSM8K with larger numbers, to account for the possibility of recent LLMs overfitting GSM8K by data contamination (Zhang et al., 2024). + +For Plan + Direct solver and Plan + CoT solver, we use the few-shot prompts from Ye et al. (2023). For Plan + Tool solver, we use state-of-the-art tool-augmented prompting methods. Particularly, for GSM8K, we use Program-aided Language Model (Gao et al., 2023, PAL) that executes the LM-generated plan with a Python interpreter. For logical reasoning datasets, we use Satisfiability-Aided Language Model (Ye et al., 2023, SatLM) that uses automated theorem prover Z3 (De Moura & Bjørner, 2008) to solve the generated specifications. If the generated plan cannot be parsed by the tool, we use random guessing when the question is multiple choice, and mark it incorrect otherwise. + +# 5.2 EVALUATION RESULTS + +Figure 6 shows the results across a representative selection of models. Detailed numerical results, including the unparseable rates of model-generated plans, can be found in Appendix H. + +When comparing direct answer with Plan + Direct solver and Plan + CoT solver, we note that for many datasets and models, only having a plan does not account for most of the performance gain. Compared with direct answer, CoT or Plan + CoT solver is needed for strong performance. Tracking the execution with one of these methods gives the strongest accuracy benefit, especially for math-heavy datasets. + +Despite their strength over direct answer and Plan + Direct solver, CoT and Plan + CoT solver are dominated by Plan + Tool solver in most settings. LLMs are limited by their ability to execute and track steps compared with symbolic solvers. + +We argue that these results provide an explanation of why CoT helps on symbolic tasks. While all tasks could feasibly benefit from a detailed description of how to solve each individual question (e.g., a plan in the context of this section), CoT only outperforms direct answer when these steps require a substantial amount of tracing and computation. In these settings, we can see clear performance + +benefit from using symbolic solvers; CoT appears to be a poor (but universal) approximation to such solvers. When possible, LLMs should be paired with symbolic solvers at inference time when solving symbolic tasks to achieve consistently better performance over direct answer and CoT. + +# 6 DISCUSSION AND RELATED WORK + +Where is CoT helping and why? Our results showing CoT improvement for math and logic aligns well with early work on CoT for LLMs such as Scratchpads (Nye et al., 2022). As CoT gained popularity, its application has broadened to tasks that canonically do not require multiple steps. It can often yield small improvements over direct answering. We believe this led to the current prevailing sentiment that deliberation should improve performance on any task requiring some type of reasoning (our original claim from Section 2). However, our results show a clear separation between performance on non-symbolic and symbolic tasks. If, in theory, any question could benefit from deliberation, why is CoT only benefiting the questions that can be solved through symbolic manipulation? Our results from Section 5 suggest that the primary benefit of CoT comes in the ability to execute symbolic steps and track their output. Not all tasks have this feature: for example, questions from CommonsenseQA can hardly be translated into formally grounded and executable solution plans. Datasets like StrategyQA may feature multiple steps of reasoning, but executing those steps is not complex, so the benefits of CoT are small. It is unclear whether explicitly instilling models with particular modes of deliberation, like process of elimination for multiple choice questions, might make them more effective for non-symbolic tasks, or whether there's a fundamental limitation imposed by their pre-training data. We leave this distinction for future work. + +Can we improve CoT further? Our work treats chain-of-thought variants that explicitly don't involve multiple inferences. There is evidence that using additional calls to LLMs can help (Du et al., 2023; Yao et al., 2023; Besta et al., 2023; Chen et al., 2024), but these methods use significantly increased computation, and careful benchmarking sometimes reveals that naive techniques are as good as iterative ones (Olausson et al., 2024). However, past theoretical results show that Transformers are augmented in a fundamental way by CoT (Liu et al., 2023b; Merrill & Sabharwal, 2024); we believe this indicates the potential for improving CoT beyond prompt-based CoT. On the other hand, recent methods showing benefit from "internalizing" CoT (Deng et al., 2024) may indicate that explicit generation of intermediate tokens is not used to its full potential. + +Limitations One set of tasks we do not cover in our experiments (except for BiGGen Bench) is long-horizon planning. However, many works in the literature have already discussed the efficacy of planning with CoT. We also do not address the data contamination of some of these models on the datasets. We try to mitigate this by including multiple models, datasets (new and old), and our meta-analysis. For more discussion of planning and dataset contamination, see Appendix I. + +# 7 CONCLUSION + +In this work, we characterize the performance of prompt-based CoT through a meta-analysis of the literature and experiments across different models, datasets, and prompts. We find that CoT predominantly helps on math and formal logic, largely due to its ability to trace the intermediate steps of a problem. But CoT rarely outperforms tool-augmented approaches for these same problems. We believe that CoT remains a powerful technique, but to give improvement across a wider range of NLP tasks, research should move beyond prompt-based CoT to new paradigms like search, interacting agents, or better fine-tuned models. + +# REPRODUCIBILITY + +For our experiments, we provide in-depth details of how we evaluated models on each dataset in Section 4.1 and Appendix C. Furthermore, we release all prompts for every dataset on Huggingface, including per model output and sampling parameters. For our meta-analysis of the literature, we describe our filtering criteria and process of annotating experiments into high-level categories in Section 3 and Appendix B. We also release the full list of papers in our meta-analysis together with extracted experimental comparisons and task category annotations. + +# ACKNOWLEDGMENTS + +We acknowledge George Tsoukalas for providing insightful feedback throughout the project. We also thank Kaj Bostrom and Eunsol Choi for reviewing and providing feedback on drafts of the work. This work was partially supported by NSF CAREER Award IIS-2145280 (to Durrett), NSF CAREER Award 2339729 (to Mahowald), the NSF AI Institute for Foundations of Machine Learning (IFML), the Sloan Foundation via a Sloan Research Fellowship, and a grant from Open Philanthropy. + +# REFERENCES + +Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Hassan Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Singh Behl, Alon Benhaim, Misha Bilenko, Johan Bjorck, Sébastien Bubeck, Martin Cai, Caio C'esar Teodoro Mendes, Weizhu Chen, Vishrav Chaudhary, Parul Chopra, Allison Del Giorno, Gustavo de Rosa, Matthew Dixon, Ronen Eldan, Dan Iter, Abhishek Goswami, Suriya Gunasekar, Emman Haider, Junheng Hao, Russell J. Hewett, Jamie Huynh, Mojan Javaheripi, Xin Jin, Piero Kauffmann, Nikos Karampatziakis, Dongwoo Kim, Mahmoud Khademi, Lev Kurilenko, James R. Lee, Yin Tat Lee, Yuanzhi Li, Chen Liang, Weishung Liu, Eric Lin, Zeqi Lin, Piyush Madan, Arindam Mitra, Hardik Modi, Anh Nguyen, Brandon Norick, Barun Patra, Daniel Perez-Becker, Thomas Portet, Reid Pryzant, Heyang Qin, Marko Radmilac, Corby Rosset, Sambudha Roy, Olli Saarikivi, Amin Saied, Adil Salim, Michael Santacroce, Shital Shah, Ning Shang, Hiteshi Sharma, Xianmin Song, Olatunjri Ruwase, Xin Wang, Rachel Ward, Guanhua Wang, Philipp Witte, Michael Wyatt, Can Xu, Jiahang Xu, Sonali Yadav, Fan Yang, Ziyi Yang, Donghan Yu, Cheng-Yuan Zhang, Cyril Zhang, Jianwen Zhang, Li Lyna Zhang, Yi Zhang, Yunan Zhang, and Xiren Zhou. Phi-3 technical report: A highly capable language model locally on your phone. ArXiv, abs/2404.14219, 2024. URL https://apisemantic scholar.org/CorpusID:269293048. +Anthropic. The Claude 3 Model Family: Opus, Sonnet, Haiku. a. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf. +Anthropic. Claude 3.5 Sonnet Model Card Addendum. b. URL https://www-cdn.anthropic.com/fed9cc193a14b84131812372d8d5857f8f304c52/Model_Card_Claude_3_Addendum.pdf. +Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefer. Graph of thoughts: Solving elaborate problems with large language models. In AAAI Conference on Artificial Intelligence, 2023. URL https://api-semanticscholar.org/CorpusID:261030303. +Yonatan Bisk, Rowan Zellers, Ronan Le Bras, Jianfeng Gao, and Yejin Choi. Piqa: Reasoning about physical commonsense in natural language. In AAAI Conference on Artificial Intelligence, 2019. URL https://api-semanticscholar.org/CorpusID:208290939. +Leonard Bongard, Lena Held, and Ivan Habernal. The legal argument reasoning task in civil procedure. In Nikolaos Aletras, Ilias Chalkidis, Leslie Barrett, Catalina Goantă, and Daniel Preotić-Pietro (eds.), Proceedings of the Natural Legal Language Processing Workshop 2022, pp. 194-207, Abu Dhabi, United Arab Emirates (Hybrid), December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.nllp-1.17. URL https://aclanthology.org/2022.nllp-1.17. +Chen Bowen, Rune Sætre, and Yusuke Miyao. A comprehensive evaluation of inductive reasoning capabilities and problem solving in large language models. In Yvette Graham and Matthew Purver (eds.), Findings of the Association for Computational Linguistics: EACL 2024, pp. 323-339, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024-findings-eacl.22. +Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, + +Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bcbd4967418bfb8ac142f64a-Paper.pdf. +Hyungjoo Chae, Yeonghyeon Kim, Seungone Kim, Kai Tzu-iunn Ong, Beong-woo Kwak, Moohyeon Kim, Seonghwan Kim, Taeyoon Kwon, Jiwan Chung, Youngjae Yu, et al. Language Models as Compilers: Simulating Pseudocode Execution Improves Algorithmic Reasoning in Language Models. arXiv preprint arXiv:2404.02575, 2024. +Chih Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse LLMs, 2024. URL https://openreview.net/forum?id=Yo16nUVIJD. +Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd. +Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge. arXiv:1803.05457v1, 2018. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. ArXiv, abs/2110.14168, 2021. URL https://api(semanticscholar.org/CorpusID:239998651. +Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The CommitmentBank: Investigating projection in naturally occurring discourse. In Proceedings of Sinn und Bedeutung 23, 2019. +Leonardo De Moura and Nikolaj Björner. Z3: An efficient SMT solver. In Proceedings of the Theory and Practice of Software, 14th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, TACAS'08/ETAPS'08, pp. 337-340, Berlin, Heidelberg, 2008. Springer-Verlag. ISBN 3540787992. +Yuntian Deng, Yejin Choi, and Stuart Shieber. From Explicit CoT to Implicit CoT: Learning to Internalize CoT Step by Step. arXiv preprint arXiv:2405.14838, 2024. +Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. arXiv preprint arXiv:2305.14325, 2023. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The Llama 3 Herd of Models. arXiv preprint arXiv:2407.21783, 2024. +Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. AlpacaFarm: A Simulation Framework for Methods that Learn from Human Feedback, 2023. +Nouha Dziri, Ximing Lu, Melanie Sclar, Xiang Lorraine Li, Liwei Jiang, Bill Yuchen Lin, Sean Welleck, Peter West, Chandra Bhagavatula, Ronan Le Bras, Jena D. Hwang, Soumya Sanyal, Xiang Ren, Allyson Ettinger, Zaid Harchaoui, and Yejin Choi. Faith and fate: Limits of transformers on compositionality. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Fkckkr3ya8. +Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=yf1icZHC-19. + +Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. Pal: program-aided language models. In Proceedings of the 40th International Conference on Machine Learning, ICML'23. JMLR.org, 2023. +Mor Geva, Daniel Khashabi, Elad Segal, Tushar Khot, Dan Roth, and Jonathan Berant. Did Aristotle use a laptop? A question answering benchmark with implicit reasoning strategies. Transactions of the Association for Computational Linguistics, 9:346-361, February 2021. ISSN 2307-387X. doi: 10.1162/tacl_a_00370. +L. Guan, Yifan Zhou, Denis Liu, Yantian Zha, Heni Ben Amor, and Subbarao Kambhampati. "Task Success" is not Enough: Investigating the Use of Video-Language Models as Behavior Critics for Catching Undesirable Agent Behaviors. ArXiv, abs/2402.04210, 2024. URL https://api.sementicscholar.org/CorpusID:267500077. +Atharva Gundawar, Mudit Verma, L. Guan, Karthik Valmeekam, Siddhant Bhambri, and Subbarao Kambhampati. Robust Planning with LLM-Modulo Framework: Case Study in Travel Planning. ArXiv, abs/2405.20625, 2024. URL https://api(semanticscholar.org/CorpusID:270199944. +Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Luke Benson, Lucy Sun, Ekaterina Zubova, Yujie Qiao, Matthew Burtell, David Peng, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Shafiq Joty, Alexander R. Fabbri, Wojciech Kryscinski, Xi Victoria Lin, Caiming Xiong, and Dragomir Radev. FOLIO: Natural Language Reasoning with First-Order Logic. arXiv preprint arXiv:2209.00840, 2022. URL https://arxiv.org/abs/2209.00840. +Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507. +Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. Proceedings of the International Conference on Learning Representations (ICLR), 2021a. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring Mathematical Problem Solving With the MATH Dataset. NeurIPS, 2021b. +Hanxu Hu, Hongyuan Lu, Huajian Zhang, Wai Lam, and Yue Zhang. Chain-of-symbol prompting elicits planning in large language models, 2023. +Wenyue Hua, Kaijie Zhu, Lingyao Li, Lizhou Fan, Shuhang Lin, Mingyu Jin, Haochen Xue, Zelong Li, Jindong Wang, and Yongfeng Zhang. Disentangling Logic: The Role of Context in Large Language Model Reasoning Capabilities. ArXiv, abs/2406.02787, 2024. URL https://apisemantic scholar.org/CorpusID:270258104. +Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. arXiv preprint arXiv:2201.07207, 2022. +Jinghan Jia, Abi Komma, Timothy Leffel, Xujun Peng, Ajay Nagesh, Tamer Soliman, Aram Galstyan, and Anoop Kumar. Leveraging LLMs for dialogue quality measurement. In Yi Yang, Aida Davani, Avi Sil, and Anoop Kumar (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 6: Industry Track), pp. 359-367, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-industry.30. URL https://aclanthology.org/2024.nacl-industry.30. + +Albert Qiaochu Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de Las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, L'elio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. ArXiv, abs/2310.06825, 2023. URL https://api-semanticscholar.org/CorpusID:263830494. +Dongwei Jiang, Marcio Fonseca, and Shay B. Cohen. Leanreasoner: Boosting complex logical reasoning with lean. In Kevin Duh, Helena Gómez-Adorno, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 7497-7510. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.NAACL-LONG.416. URL https://doi.org/10.18653/v1/2024.naacl-long.416. +Brihi Joshi, Ziyi Liu, Sahana Ramnath, Aaron Chan, Zhewei Tong, Shaoliang Nie, Qifan Wang, Yejin Choi, and Xiang Ren. Are Machine Rationales (Not) Useful to Humans? Measuring and Improving Human Utility of Free-text Rationales. ArXiv, abs/2305.07095, 2023. URL https://api_semanticscholar.org/CorpusID:258676376. +Subbarao Kambhampati. Can large language models reason and plan? Annals of the New York Academy of Sciences, 1534:15 - 18, 2024. URL https://api-semanticscholar.org/CorpusID:268249961. +Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp (eds.), Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 22895-22907. PMLR, 21-27 Jul 2024a. URL https://proceedings.mlr.org/press/v235/kambhampati24a.html. +Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In *Forty-first International Conference on Machine Learning*, 2024b. URL https://openreview.net/forum?id=Th8JPEmH4z. +Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in llms. arXiv preprint arXiv:2404.11041, 2024. +Marzena Karpinska, Katherine Thai, Kyle Lo, Tanya Goyal, and Mohit Iyyer. One thousand and one pairs: A "novel" challenge for long-context language models. ArXiv, abs/2406.16264, 2024. URL https://api-semanticscholar.org/CorpusID:270703648. +Tushar Khot, H. Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The International Conference on Learning Representations, volume abs/2210.02406, 2023. URL https://api_semanticscholar.org/CorpusID:252715485. +Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The BiGGen Bench: A Principled Benchmark for Fine-grained Evaluation of Language Models with Language Models. arXiv preprint arXiv:2406.05761, 2024. +Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. In Proceedings of the 36th International Conference on Neural Information Processing Systems, Red Hook, NY, USA, 2022. Curran Associates Inc. ISBN 9781713871088. +Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023. + +Brenden M. Lake and Marco Baroni. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 2879-2888. PMLR, 2018. URL http://proceedings.mlr.press/v80/lake18a.html. +Tamera Lanham, Anna Chen, Ansh Radhakrishnan, Benoit Steiner, Carson Denison, Danny Hernandez, Dustin Li, Esin Durmus, Evan Hubinger, Jackson Kernion, et al. Measuring faithfulness in chain-of-thought reasoning. arXiv preprint arXiv:2307.13702, 2023. +Fangyu Lei, Qian Liu, Yiming Huang, Shizhu He, Jun Zhao, and Kang Liu. S3Eval: A synthetic, scalable, systematic evaluation suite for large language model. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1259-1286, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.69. URL https://aclanthology.org/2024.naacl-long.69. +Tianwen Li, Zhexiong Liu, Lindsay Matsumura, Elaine Wang, Diane Litman, and Richard Correnti. Using large language models to assess young students' writing revisions. In Ekaterina Kochmar, Marie Bexte, Jill Burstein, Andrea Horbach, Ronja Laermann-Quante, Anaïs Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 365–380, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.30. +Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=v8L0pN6E0i. +B. Liu, Yuqian Jiang, Xiaohan Zhang, Qian Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. Llm+p: Empowering large language models with optimal planning proficiency. ArXiv, abs/2304.11477, 2023a. URL https://api-semanticscholar.org/CorpusID:258298051. +Bingbin Liu, Jordan T. Ash, Surbhi Goel, Akshay Krishnamurthy, and Cyril Zhang. Transformers learn shortcuts to automata. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=De4FYqjFueZ. +Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bitstream/11332b6b6cf4485b84afadb1352d3a9a-AbsAbstract-Conference.html. +Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At Which Training Stage Does Code Data Help LLMs Reasoning? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=KIPJKST4gw. +Aman Madaan and Amir Yazdanbakhsh. Text and patterns: For effective chain of thought, it takes two to tango. ArXiv, abs/2209.07686, 2022. URL https://api-semanticscholar.org/ CorpusID:252355328. +William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The International Conference on Learning Representations*, volume abs/2310.07923, 2024. URL https://api-semanticscholar.org/CorpusID:263909434. + +Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2022. URL https://openreview.net/forum?id=iedYJm92o0a. +Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=y0GJXRungR. +OpenAI. GPT-4 Technical Report. ArXiv, abs/2303.08774, 2023. URL https://apisemantic scholar.org/CorpusID:257532815. +Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023.findings-emnlp.248. +Xiangyu Peng, Siyan Li, Sarah Wiegrefe, and Mark Riedl. Inferring the reader: Guiding automated story generation with commonsense reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 7008-7029, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.520. URL https://aclanthology.org/2022-findings-emnlp.520. +Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual Reasoning Makes Smaller LLMs Stronger Problem-Solvers. arXiv preprint arXiv:2408.06195, 2024. +Xin Quan, Marco Valentino, Louise Dennis, and Andre Freitas. Enhancing ethical explanations of large language models through iterative symbolic refinement. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1-22, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.1. +Rachel Reid and et. al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. ArXiv, abs/2403.05530, 2024. URL https://api(semanticscholar.org/ CorpusID:268297180. +David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A Graduate-Level Google-Proof Q&A Benchmark, 2023. +Gemma Team Morgane Riviere and et. al. Gemma 2: Improving open language models at a practical size. 2024. URL https://api_semanticscholar.org/CorpusID:270843326. +Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. WinoGrande: an adversarial winograd schema challenge at scale. Commun. ACM, 64(9):99-106, aug 2021. ISSN 0001-0782. doi: 10.1145/3474381. URL https://doi.org/10.1145/3474381. +Maarten Sap, Hannah Rashkin, Derek Chen, Ronan Le Bras, and Yejin Choi. Social IQa: Commonsense reasoning about social interactions. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 4463-4473, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1454. URL https://aclanthology.org/D19-1454. +Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V. + +Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=jenyYQzue1. +Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615, 2022. +Maja Stahl, Leon Biermann, Andreas Nehring, and Henning Wachsmuth. Exploring LLM prompting strategies for joint essay scoring and feedback generation. In Ekaterina Kochmar, Marie Bexe, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Anais Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 283–298, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.23. +Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. ArXiv, abs/2402.08115, 2024a. URL https://api_semanticscholar.org/CorpusID:267637077. +Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. 2024b. URL https://api-semanticscholar.org/CorpusID: 269626390. +Simeng Sun, Yang Liu, Shuohang Wang, Dan Iter, Chenguang Zhu, and Mohit Iyyer. PEARL: Prompting large language models to plan and execute actions over long documents. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 469-486, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.29. +Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824. +Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. *CommonsenseQA: A question answering challenge targeting commonsense knowledge.* In *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies*, Volume 1 (Long and Short Papers), pp. 4149-4158, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1421. URL https://aclanthology.org/N19-1421. +Hugo Touvron, Louis Martin, Kevin R. Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Daniel M. Bikel, Lukas Blecher, Cristian Cantón Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony S. Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel M. Kloumann, A. V. Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, R. Subramanian, Xia Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zhengxu Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. ArXiv, abs/2307.09288, 2023. URL https://api(semanticscholar.org/CorpusID: 259950998. + +Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. MuSiQue: Multi-hop questions via single-hop question composition. Transactions of the Association for Computational Linguistics, 2022. +Karthik Valmeekam, Matthew Marquez, Sarath Sreedharan, and Subbarao Kambhampati. On the planning abilities of large language models - a critical investigation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=X6dEqXIsEW. +Karthik Valmeekam, Matthew Marquez, Alberto Olmo, Sarath Sreedharan, and Subbarao Kambhampati. PlanBench: An extensible benchmark for evaluating large language models on planning and reasoning about change. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024. Curran Associates Inc. +Mudit Verma, Siddhant Bhambri, and Subbarao Kambhampati. Theory of mind abilities of large language models in human-robot interaction: An illusion? Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction, 2024. URL https://apisemantic scholar.org/CorpusID:266902529. +Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153. +Lei Wang, Wanyu Xu, Yihuai Lan, Zhiqiang Hu, Yunshi Lan, Roy Ka-Wei Lee, and Ee-Peng Lim. Plan-and-solve prompting: Improving zero-shot chain-of-thought reasoning by large language models. In Annual Meeting of the Association for Computational Linguistics, 2023b. URL https://apisemantic scholar.org/CorpusID:258558102. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023c. URL https://openreview.net/forum?id=1PL1NIMMrw. +Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark, 2024. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837, 2022. +Li Siang Wong, Gabriel Grand, Alexander K. Lew, Noah D. Goodman, Vikash K. Mansinghka, Jacob Andreas, and Joshua B. Tenenbaum. From word models to world models: Translating from natural language to the probabilistic language of thought. ArXiv, abs/2306.12672, 2023. URL https://api_semanticscholar.org/CorpusID:259224900. +Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. TravelPlanner: A Benchmark for Real-World Planning with Language Agents. ArXiv, abs/2402.01622, 2024. URL https://api_semanticscholar.org/CorpusID:267406800. +Miao Xiong, Zhiyuan Hu, Xinyang Lu, Yifei Li, Jie Fu, Junxian He, and Bryan Hooi. Can LLMs Express Their Uncertainty? An Empirical Evaluation of Confidence Elicitation in LLMs. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=gjeQKFxFpZ. +Jundong Xu, Hao Fei, Liangming Pan, Qian Liu, Mong-Li Lee, and Wynne Hsu. Faithful logical reasoning via symbolic chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13326-13365, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.acl-long.720. + +Xiaohan Xu, Chongyang Tao, Tao Shen, Can Xu, Hongbo Xu, Guodong Long, and Jian-Guang Lou. Re-reading improves reasoning in language models, 2024b. URL https://openreview.net/forum?id=3jXCF5dNpC. +Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=Bb4VGOWELI. +Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of Thoughts: Deliberate problem solving with large language models, 2023. +Xi Ye, Qiaochu Chen, Isil Dillig, and Greg Durrett. Satisfiability-aided language models using declarative prompting. In Advances in Neural Information Processing Systems, 2023. +Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. ArXiv, abs/2405.00332, 2024. URL https://api-semanticscholar.org/CorpusID:269484687. +Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, Heng-Tze Cheng, Ed H. Chi, Quoc V Le, and Denny Zhou. Take a step back: Evoking reasoning via abstraction in large language models. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3bq3jsvcQ1. +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging LLM-as-a-judge with MT-bench and Chatbot Arena. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024b. Curran Associates Inc. +Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models, 2023. +Ben Zhou, Kyle Richardson, Xiaodong Yu, and Dan Roth. Learning to decompose: Hypothetical question decomposition based on comparable texts. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2223-2235, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.142. URL https://aclanthology.org/2022.emnlp-main.142. +Ben Zhou, Hongming Zhang, Sihao Chen, Dian Yu, Hongwei Wang, Baolin Peng, Dan Roth, and Dong Yu. Conceptual and unbiased reasoning in language models. ArXiv, abs/2404.00205, 2024. URL https://api-semanticscholar.org/CorpusID:268820105. +Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgtM. +Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large Language Models are Human-Level Prompt Engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-. + +# A META-ANALYSIS EXPANDED DETAILS ON CRITERIA AND PROCESS + +Automatic Selection and Paper Filtering We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). We filtered all 4,642 papers (2,259 from ICLR 2024 and 2,382 from the two ACL-affiliated conferences) for those with at least two occurrences of "CoT", "chain-of-thought", or "chain of thought", resulting in 516 papers. There are conceivably papers using CoT called by another name (e.g., Scratchpads), but we believe these 516 give a representative sample appropriate for systematic analysis. + +Manual Paper Filtering and Results Extraction We then filter down to papers that perform a comparison of CoT prompting vs. direct prompting, whether or not this is core to the paper's research question. We manually filtered the 516 papers in question and extracted the key results from those that remained. We excluded multimodal models, CoT-fine-tuned models, any experiments where the "CoT" method involves multiple forward passes (e.g., self-consistency (Wang et al., 2023c) and tree-of-thought (Yao et al., 2023)),5 and systems that augment LLMs with external tools (discussed more in Section 5). + +For each paper passing through these criteria, we manually extracted the results from key tables comparing CoT and direct answer prompts. We only include results where the CoT and direct prompts are run on the same model and same dataset while being on a scale of 0 to 100 (excluding Likert scale evaluations, for example) for a more direct comparison. When papers include various CoT or direct answer prompts (including zero/few-shot variants), we always take the best-performing prompt for both. We focus on key test results where applicable, excluding dev sets if they are reported alongside test and also excluding numbers from ablations or nonstandard subsets of datasets. + +This resulted in a total of 1,218 experimental comparisons across 110 papers (35 from ICLR and 75 from NAACL and EACL) covering 264 datasets. Details and more information will be available in our GitHub Repo. + +Categorization Given the large number of tasks and datasets being compared, we grouped each task into a set of 14 categories. These categories were determined based on the description (and possibly examples) of the task, not taking into account system performance. These categories abstract over traditional NLP task classifications (e.g., NER, reading comprehension) and take into account both the task format and the kinds of reasoning involved. Definitions for several categories are shown in Table 1 and the full description is given in Appendix B. + +# B QUANTITATIVE META-ANALYSIS + +See the full list of categories and their descriptions that we used for the meta-analysis in Table 2. + +# C EXPANDED EXPERIMENTAL DETAILS + +A full list of the datasets can be found in Table 4. Each model can be seen in Table 5. We use one answer parser for all datasets of the same answer response format (one for multiple choice, short answer, etc.); however, some datasets require special handling and have edge cases that we handle separately from the rest of the datasets. Similarly, for each model, we use the exact same prompt across them, except when closed source models require different prompts because they do not allow for partial completions (i.e., when we cannot put "let's think step by step" to warm-start the assistant's response). All prompts are given in our Huggingface repo, including the model output and what our answer parser extracted as the answer. + +Experiments were conducted either by invoking APIs or by running open-source models on our own hardware, mostly on a machine with 8 A40s or 4 Quadro RTX 8000s. All locally hosted models were + +Table 2: Categories and their descriptions for the meta-analysis. + +
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Commonsense reasoningDatasets designed to test for commonsense knowledge and reasoning, i.e., world knowledge that most people would have, rather than specialized expert-level knowledge in a discipline acquired after years of study.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Spatial and temporal rea-soningDatasets designed to test for an understanding of space and spatial relations (e.g., navigation) or reasoning involving time and sequences over time.
Multi-hop QAQuestions involving the composition of multiple steps of reasoning in order to arrive at an answer, such as “What is the capital of the country whose scientist discovered penicillin?”
Context-aware QATasks such as closed-book QA and reading comprehension involving rea-soning about a given text in context. The context is often a short passage, but could also take the form of a knowledge graph (KBQA) or a table. This category also includes information extraction tasks, such as NER or relation extraction.
EntailmentTasks involving establishing the inferential relation between two texts, prototypically NLI, but also including fact verification.
Text classificationTasks involving the classification of a text into a small set of categories, such a topic or sentiment classification, but also involving tasks such as hate speech detection and misinformation detection.
GenerationTasks involving text generation, including machine translation, dialogue, question generation, as well as code generation. Tasks such as SQL execution (Lei et al., 2024) or systematic transformations of data (e.g., SCAN (Lake & Baroni, 2018)) are excluded because they can be solved by executing a program.
Meta-linguisticTasks probing for models' knowledge of linguistics, such as identifying the main subject of a sentence or solving linguistic puzzles.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
OtherTasks which did not fit in any of the other categories, such as evaluating AI safety, eliciting models' verbalized confidence, or melody retrieval.
+ +hosted with vLLM. All parameters given to the vLLM API endpoint are given in the Huggingface repo as well. + +# OTHER COT PROMPT VARIANTS + +# D.1 TESTING PERFORMANCE VOLATILITY ACROSS PROMPTS + +To test the impact of prompt choice on performance, we performed our zero-shot experiment on Llama 3.1 8B with 7 different datasets and 4 different zero-shot CoT prompting strategies common in the literature (Kojima et al., 2022; Wang et al., 2023b; Zhou et al., 2023b; Yang et al., 2024). + +Table 3: Models, datasets, and prompting strategies used in our experiments. Models marked with $\dagger$ are run with a 4k context size window. Note that Gemma has a larger than 4k context size window, but VLLM only supports up to a 4k context size window for it. Models marked with * indicate closed-source models that cannot handle prefixed assistant messages. Datasets marked with $\triangle$ do not have a few-shot setting. + +
ModelsLlama 2 7B Chat† (Touvron et al., 2023), Mistral 7B Instruct v0.3 (Jiang et al., 2023), Llama 3.1 8B Instruct (Dubey et al., 2024), Llama 3.1 70B Instruct, Gemma 2 9B It† (Riviere & et. al., 2024), Phi-3 Small 8k Instruct (Abdin et al., 2024), gpt-4o-mini-2024-07-18*, gpt-4o-2024-08-06*, Gemini 1.5 Flash* (Reid & et. al., 2024), Gemini 1.5 Pro* (Reid & et. al., 2024), claude-3-haiku-20240307* (Anthropic, a), claude-3-5-sonnet-20240620* (Anthropic, b)
DatasetsCommonsenseQA (Talmor et al., 2019), StrategyQA (Geva et al., 2021), SiQA△ Sap et al. (2019), PiQA△ (Bisk et al., 2019), Winogrande△ (Sakaguchi et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024), ContextHub (Levels 1 and 2 only) (Hua et al., 2024), ARC△ (Clark et al., 2018), AGIEval LSAT (Zhong et al., 2023), MMLU (Hendrycks et al., 2021a), MMLU Pro (Wang et al., 2024), MATH (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), GSM8K-hard (Gao et al., 2023), FOLIO (Han et al., 2022), MuSiQue△ (Trivedi et al., 2022), Big-Bench Hard (Suzgun et al., 2023; Srivastava et al., 2022), BiGGen Bench (Kim et al., 2024)
Promptszero-shot direct answer, zero-shot CoT (Kojima et al., 2022), few-shot direct answer (Brown et al., 2020), few-shot CoT (Wei et al., 2022)
+ +Table 4: List of datasets used in our experiments. We categorize each dataset into one of five categories based on the type of reasoning required: Commonsense, Knowledge, Soft Reasoning, Symbolic, or Mathematical. We also report answer formats. When we use few-shot prompts, we mark how many examples those prompts contain. BiGGen Bench has many categories of questions that explicitly ask for CoTs in the response; we ignore those categories for our evaluation. + +
DatasetTypeAnswer Formatm-Shots
CommonsenseQACommonsenseMultiple choice7
StrategyQACommonsenseTrue or False6
SIQACommonsenseMultiple choice0
PIQACommonsenseMultiple choice0
WinograndeCommonsenseMultiple choice0
Arc EasyKnowledgeMultiple choice0
Arc ChallengeKnowledgeMultiple choice0
AGIEval LSATSoft ReasoningMultiple choice3
BiGGen-BenchSoft ReasoningFree response0
MMLUKnowledgeMultiple Choice5
MMLU ProKnowledgeMultiple Choice5
BigBench-HardSymbolicMultiple Choice0
MuSRSoft ReasoningMultiple Choice1
GPQAMathematicalMultiple Choice3
MuSiQueSoft ReasoningShort Answer0
GSM8KMathematicalShort Answer8
GSM8K-HardMathematicalShort Answer8
FOLIOSymbolicTrue, False, or Unknown4
ContextHubSymbolicTrue, False, or Neither3
MATHMathematicalShort Answer4
+ +Table 5: List of models for our experiments. We focus on contemporary instruction-tuned models; although pretrained and smaller language models could be used, they are not the focus of our study. Prompts and outputs used for each model are available on Huggingface. * Note that Gemma can accept more than 4k input tokens, but we are restricted to 4k by vLLM. + +
ModelContext LengthIs Open Source
Llama 2 7B Chat4kTrue
Mistral 7B Instruct v0.38kTrue
Llama 3.1 8B Instruct128kTrue
Llama 3.1 70B Instruct128kTrue
Gemma 2 9B It4k*True
Qwen 7B Instruct131kTrue
Qwen 72B Instruct131kTrue
GPT4o-Mini128kFalse
GPT4o128kFalse
Gemini 1.5 Pro128kFalse
Gemini Flash1mFalse
Claude 3.5 Sonnet200kFalse
Claude 3 Haiku200kFalse
+ +![](images/9f95f43776fbd56cd222beea57a2a5f12075b2356a2362dff3912e55246a903a.jpg) +Figure 7: Performance of multiple prompts commonly used to elicit reasoning through CoT in the zero shot setting. Each prompt starts the assistant completion with a different phrase meant to elicit reasoning. All results are from using Llama 3.1 8B Instruct. For the Kojima variant, we explicitly place "Let's think step by step." in the assistant message. There is very little variation between the CoT prompts on average. +Figure 7 shows variation due to prompts is typically small and no prompt gives a consistent gain over the other. For our experiments, this suggests that different prompts have small effects on the overall outcome on average. + +# E FEW-SHOT EXPERIMENTS + +Compared to a zero-shot prompt, a few-shot prompt additionally contains demonstrations of the relevant reasoning mode on different problem instances $\{(v(\mathbf{q}_i),\mathbf{y}_i^*)\}$ . Few-shot prompts for direct answer simply encode the answer $a_{i}$ as $\mathbf{y}_i^*$ , whereas few-shot prompts for chain-of-thought include a reasoning trace ending in the correct answer. Now we can define the $m$ -shot direct prompt as + +![](images/9436671d072bfe9e1df42bec2c0d8f5bee79dec70bd5f3261c1d86c2c4d1641e.jpg) +Figure 8: Average performance improvement from using CoT across different models in the zero-shot and few-shot settings. Each bar represents how much CoT improves the accuracy for that specific setting. In general, CoT in the few-shot setting does not change the qualitative performance of CoT versus zero-shot, though it can change the magnitude for symbolic datasets. + +$\mathcal{I}_{\mathrm{da}}^{m}(\mathbf{q}) = v_{\mathrm{da}}(\mathbf{q}_{1})\mathbf{a}_{1}v_{\mathrm{da}}(\mathbf{q}_{2})\mathbf{a}_{2}\dots v_{\mathrm{da}}(\mathbf{q}_{m})\mathbf{a}_{m}v_{\mathrm{da}}(\mathbf{q})$ and the $m$ -shot cot prompt as $\mathcal{I}_{\mathrm{cot}}^{m}(\mathbf{q}) = v_{\mathrm{cot}}(\mathbf{q}_{1})\mathbf{y}_{1}^{*}v_{\mathrm{cot}}(\mathbf{q}_{2})\mathbf{y}_{2}^{*}\dots v_{\mathrm{cot}}(\mathbf{q}_{m})\mathbf{y}_{m}^{*}v_{\mathrm{cot}}(\mathbf{q})$ . + +Figure 8 shows the difference between few-shot prompting and the zero-shot setting discussed in the main text of the paper. We see that using CoT in the few-shot setting largely does not change the datasets that benefit from it. Only one dataset, MuSR Team Allocation, starts to improve with few-shot; however, we believe this to be an exception because the final step to derive the answer is complex in the prompt and clearer in the examples. The magnitude of improvement over direct answer prompting when using CoT is also similar to the zero-shot setting. + +# F EXPANDED COT VS DIRECT EXPERIMENTAL RESULTS + +# F.1 FULL ZERO-SHOT RESULTS + +Table 6: Direct answer and CoT accuracies for each reasoning category across models. + +
ModelCommonsenseKnowledgeMathematicalSymbolicSoft
DA %CoT %DA %CoT %DA %CoT %DA %CoT %DA %CoT %
Claude-3 Haiku74.377.273.076.118.148.238.648.755.956.6
Claude-3.5 Sonnet84.385.883.888.838.759.053.267.167.675.7
GPT-4o Mini81.883.273.683.122.959.748.160.961.163.5
Gemini 1.5 Flash80.376.878.281.027.255.747.059.760.662.6
Gemini 1.5 Pro80.478.380.983.835.458.552.962.664.167.8
Gemma 2 9b75.076.174.976.918.550.546.755.858.260.5
Gpt-4o87.387.782.988.636.563.355.768.365.974.0
Meta-Llama 2 7b51.450.944.146.69.317.222.435.437.237.6
Meta-Llama 3.1 70b84.284.782.485.624.954.949.060.065.769.5
Meta-Llama 3.1 8b72.973.470.174.116.047.834.851.655.056.2
Mistral 7b58.361.862.064.510.928.941.845.048.649.7
Phi-3 Small 8k70.872.576.179.717.847.151.258.757.956.4
Qwen 2 72b82.984.978.684.623.958.548.258.764.265.1
Qwen 2 7b64.066.165.271.315.953.543.852.354.449.4
Average74.875.773.377.522.650.245.256.158.360.3
+ +Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Team AllocationsSoft ReasoningLlama 2 7b34.837.2
MuSR Team AllocationsSoft ReasoningMistral 7b38.846.8
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b44.048.0
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b65.266.8
MuSR Team AllocationsSoft ReasoningGemma 2 9b47.244.8
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k47.261.6
MuSR Team AllocationsSoft ReasoningQwen 2 7b42.049.6
MuSR Team AllocationsSoft ReasoningQwen 2 72b58.066.8
MuSR Team AllocationsSoft ReasoningGPT-4o Mini61.258.4
MuSR Team AllocationsSoft ReasoningGpt-4o64.063.6
MuSR Team AllocationsSoft ReasoningClaude-3 Haiku56.859.2
MuSR Team AllocationsSoft ReasoningClaude-3.5 Sonnet80.463.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash48.855.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Pro58.462.4
SiQACommonsenseLlama 2 7b53.455.9
SiQACommonsenseMistral 7b35.933.5
SiQACommonsenseLlama 3.1 8b73.573.5
SiQACommonsenseLlama 3.1 70b78.780.9
SiQACommonsenseGemma 2 9b74.976.3
SiQACommonsensePhi-3 Small 8k38.040.4
SiQACommonsenseQwen 2 7b37.339.3
SiQACommonsenseQwen 2 72b80.580.4
SiQACommonsenseGPT-4o Mini79.080.0
SiQACommonsenseGpt-4o81.981.5
SiQACommonsenseClaude-3 Haiku75.474.8
SiQACommonsenseClaude-3.5 Sonnet79.781.0
SiQACommonsenseGemini 1.5 Flash74.579.1
SiQACommonsenseGemini 1.5 Pro73.978.2
MuSiQueSoft ReasoningLlama 2 7b40.136.1
MuSiQueSoft ReasoningMistral 7b47.347.2
MuSiQueSoft ReasoningLlama 3.1 8b62.664.7
MuSiQueSoft ReasoningLlama 3.1 70b74.072.2
MuSiQueSoft ReasoningGemma 2 9b67.768.7
MuSiQueSoft ReasoningPhi-3 Small 8k58.364.3
MuSiQueSoft ReasoningQwen 2 7b60.765.1
MuSiQueSoft ReasoningQwen 2 72b56.369.0
MuSiQueSoft ReasoningGPT-4o Mini71.368.2
MuSiQueSoft ReasoningGpt-4o73.570.1
MuSiQueSoft ReasoningClaude-3 Haiku54.856.0
MuSiQueSoft ReasoningClaude-3.5 Sonnet66.970.4
MuSiQueSoft ReasoningGemini 1.5 Flash69.866.2
MuSiQueSoft ReasoningGemini 1.5 Pro69.871.3
AGIEval LSAT RCSoft ReasoningLlama 2 7b31.236.4
AGIEval LSAT RCSoft ReasoningMistral 7b61.761.0
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b71.068.8
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.487.0
AGIEval LSAT RCSoft ReasoningGemma 2 9b75.178.1
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k68.869.9
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.066.5
AGIEval LSAT RCSoft ReasoningQwen 2 72b83.684.4
+ +Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.374.3
AGIEval LSAT RCSoft ReasoningGpt-4o88.181.4
AGIEval LSAT RCSoft ReasoningClaude-3 Haiku71.765.1
AGIEval LSAT RCSoft ReasoningClaude-3.5 Sonnet90.089.6
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash78.181.0
AGIEval LSAT RCSoft ReasoningGemini 1.5 Pro82.285.9
CommonsenseQACommonsenseLlama 2 7b49.454.6
CommonsenseQACommonsenseMistral 7b68.068.0
CommonsenseQACommonsenseLlama 3.1 8b68.574.9
CommonsenseQACommonsenseLlama 3.1 70b83.584.4
CommonsenseQACommonsenseGemma 2 9b79.280.1
CommonsenseQACommonsensePhi-3 Small 8k81.880.3
CommonsenseQACommonsenseQwen 2 7b78.579.0
CommonsenseQACommonsenseQwen 2 72b87.487.3
CommonsenseQACommonsenseGPT-4o Mini82.583.9
CommonsenseQACommonsenseGpt-4o86.587.3
CommonsenseQACommonsenseClaude-3 Haiku80.679.0
CommonsenseQACommonsenseClaude-3.5 Sonnet85.184.3
CommonsenseQACommonsenseGemini 1.5 Flash79.782.6
CommonsenseQACommonsenseGemini 1.5 Pro79.982.9
GPQAMathematicalLlama 2 7b28.324.3
GPQAMathematicalMistral 7b23.024.3
GPQAMathematicalLlama 3.1 8b24.125.9
GPQAMathematicalLlama 3.1 70b23.225.9
GPQAMathematicalGemma 2 9b26.321.2
GPQAMathematicalPhi-3 Small 8k22.320.8
GPQAMathematicalQwen 2 7b24.124.6
GPQAMathematicalQwen 2 72b21.018.1
GPQAMathematicalGPT-4o Mini21.024.0
GPQAMathematicalGpt-4o23.725.9
GPQAMathematicalClaude-3 Haiku25.422.3
GPQAMathematicalClaude-3.5 Sonnet25.425.9
GPQAMathematicalGemini 1.5 Flash22.322.8
GPQAMathematicalGemini 1.5 Pro21.023.7
AGIEval LSAT LRSoft ReasoningLlama 2 7b29.433.5
AGIEval LSAT LRSoft ReasoningMistral 7b44.147.8
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b59.053.9
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b81.481.0
AGIEval LSAT LRSoft ReasoningGemma 2 9b64.967.6
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.564.1
AGIEval LSAT LRSoft ReasoningQwen 2 7b50.658.4
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.375.1
AGIEval LSAT LRSoft ReasoningGPT-4o Mini65.368.2
AGIEval LSAT LRSoft ReasoningGpt-4o87.383.9
AGIEval LSAT LRSoft ReasoningClaude-3 Haiku55.754.7
AGIEval LSAT LRSoft ReasoningClaude-3.5 Sonnet83.782.7
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash70.071.2
AGIEval LSAT LRSoft ReasoningGemini 1.5 Pro79.480.4
PiQACommonsenseLlama 2 7b62.164.7
PiQACommonsenseMistral 7b78.677.7
PiQACommonsenseLlama 3.1 8b85.084.2
PiQACommonsenseLlama 3.1 70b91.890.6
PiQACommonsenseGemma 2 9b84.084.8
PiQACommonsensePhi-3 Small 8k89.185.5
PiQACommonsenseQwen 2 7b84.386.2
PiQACommonsenseQwen 2 72b92.989.1
PiQACommonsenseGPT-4o Mini93.188.6
PiQACommonsenseGpt-4o95.995.5
PiQACommonsenseClaude-3 Haiku85.986.6
PiQACommonsenseClaude-3.5 Sonnet94.694.5
PiQACommonsenseGemini 1.5 Flash84.689.8
PiQACommonsenseGemini 1.5 Pro88.191.3
Arc EasyKnowledgeLlama 2 7b71.169.8
Arc EasyKnowledgeMistral 7b87.586.7
Arc EasyKnowledgeLlama 3.1 8b93.092.5
Arc EasyKnowledgeLlama 3.1 70b97.597.9
Arc EasyKnowledgeGemma 2 9b94.995.8
Arc EasyKnowledgePhi-3 Small 8k96.096.3
Arc EasyKnowledgeQwen 2 7b89.584.7
Arc EasyKnowledgeQwen 2 72b97.997.4
Arc EasyKnowledgeGPT-4o Mini96.894.6
Arc EasyKnowledgeGpt-4o98.998.1
Arc EasyKnowledgeClaude-3 Haiku95.195.4
Arc EasyKnowledgeClaude-3.5 Sonnet98.698.4
Arc EasyKnowledgeGemini 1.5 Flash96.897.2
Arc EasyKnowledgeGemini 1.5 Pro97.294.6
Arc ChallengeKnowledgeLlama 2 7b49.245.2
+ +Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
Arc ChallengeKnowledgeMistral 7b78.376.6
Arc ChallengeKnowledgeLlama 3.1 8b86.082.6
Arc ChallengeKnowledgeLlama 3.1 70b95.093.6
Arc ChallengeKnowledgeGemma 2 9b91.089.6
Arc ChallengeKnowledgePhi-3 Small 8k91.691.0
Arc ChallengeKnowledgeQwen 2 7b83.975.3
Arc ChallengeKnowledgeQwen 2 72b96.394.6
Arc ChallengeKnowledgeGPT-4o Mini93.382.6
Arc ChallengeKnowledgeGpt-4o96.095.3
Arc ChallengeKnowledgeClaude-3 Haiku89.389.3
Arc ChallengeKnowledgeClaude-3.5 Sonnet96.095.3
Arc ChallengeKnowledgeGemini 1.5 Flash92.393.6
Arc ChallengeKnowledgeGemini 1.5 Pro91.690.6
AGIEval LSAT ARSoft ReasoningLlama 2 7b17.017.4
AGIEval LSAT ARSoft ReasoningMistral 7b21.719.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b20.426.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b32.628.7
AGIEval LSAT ARSoft ReasoningGemma 2 9b24.823.0
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k28.326.5
AGIEval LSAT ARSoft ReasoningQwen 2 7b27.023.9
AGIEval LSAT ARSoft ReasoningQwen 2 72b29.128.3
AGIEval LSAT ARSoft ReasoningGPT-4o Mini32.223.0
AGIEval LSAT ARSoft ReasoningGpt-4o37.830.0
AGIEval LSAT ARSoft ReasoningClaude-3 Haiku24.823.5
AGIEval LSAT ARSoft ReasoningClaude-3.5 Sonnet38.333.9
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash27.827.8
AGIEval LSAT ARSoft ReasoningGemini 1.5 Pro30.031.7
BiGGen BenchSoft ReasoningLlama 2 7b61.656.8
BiGGen BenchSoft ReasoningMistral 7b70.168.1
BiGGen BenchSoft ReasoningLlama 3.1 8b66.567.7
BiGGen BenchSoft ReasoningLlama 3.1 70b78.976.9
BiGGen BenchSoft ReasoningGemma 2 9b64.764.5
BiGGen BenchSoft ReasoningPhi-3 Small 8k69.763.0
BiGGen BenchSoft ReasoningQwen 2 7b46.269.9
BiGGen BenchSoft ReasoningQwen 2 72b74.379.9
BiGGen BenchSoft ReasoningGPT-4o Mini70.377.7
BiGGen BenchSoft ReasoningGpt-4o86.082.0
BiGGen BenchSoft ReasoningClaude-3 Haiku80.080.0
BiGGen BenchSoft ReasoningClaude-3.5 Sonnet91.479.3
BiGGen BenchSoft ReasoningGemini 1.5 Flash73.968.5
BiGGen BenchSoft ReasoningGemini 1.5 Pro78.767.1
WinograndeCommonsenseLlama 2 7b49.950.4
WinograndeCommonsenseMistral 7b60.456.5
WinograndeCommonsenseLlama 3.1 8b66.563.3
WinograndeCommonsenseLlama 3.1 70b84.281.2
WinograndeCommonsenseGemma 2 9b68.767.7
WinograndeCommonsensePhi-3 Small 8k81.581.6
WinograndeCommonsenseQwen 2 7b67.160.7
WinograndeCommonsenseQwen 2 72b81.980.7
WinograndeCommonsenseGPT-4o Mini79.271.9
WinograndeCommonsenseGpt-4o89.786.5
WinograndeCommonsenseClaude-3 Haiku70.766.2
WinograndeCommonsenseClaude-3.5 Sonnet89.485.7
WinograndeCommonsenseGemini 1.5 Flash72.574.8
WinograndeCommonsenseGemini 1.5 Pro75.578.3
MMLUKnowledgeLlama 2 7b46.341.7
MMLUKnowledgeMistral 7b60.556.5
MMLUKnowledgeLlama 3.1 8b72.667.5
MMLUKnowledgeLlama 3.1 70b85.083.2
MMLUKnowledgeGemma 2 9b73.871.4
MMLUKnowledgePhi-3 Small 8k76.373.6
MMLUKnowledgeQwen 2 7b67.064.5
MMLUKnowledgeQwen 2 72b81.377.8
MMLUKnowledgeGPT-4o Mini79.974.8
MMLUKnowledgeGpt-4o87.583.4
MMLUKnowledgeClaude-3 Haiku72.268.4
MMLUKnowledgeClaude-3.5 Sonnet87.284.0
MMLUKnowledgeGemini 1.5 Flash76.374.7
MMLUKnowledgeGemini 1.5 Pro81.381.1
StrategyQACommonsenseLlama 2 7b39.531.2
StrategyQACommonsenseMistral 7b66.155.8
StrategyQACommonsenseLlama 3.1 8b73.768.6
StrategyQACommonsenseLlama 3.1 70b85.383.8
StrategyQACommonsenseGemma 2 9b73.766.4
StrategyQACommonsensePhi-3 Small 8k72.366.0
StrategyQACommonsenseQwen 2 7b63.254.8
StrategyQACommonsenseQwen 2 72b81.776.9
+ +Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
StrategyQACommonsenseGPT-4o Mini82.284.5
StrategyQACommonsenseGpt-4o84.585.5
StrategyQACommonsenseClaude-3 Haiku73.465.0
StrategyQACommonsenseClaude-3.5 Sonnet80.176.3
StrategyQACommonsenseGemini 1.5 Flash72.575.2
StrategyQACommonsenseGemini 1.5 Pro74.071.4
MuSR Object PlacementsSoft ReasoningLlama 2 7b36.330.5
MuSR Object PlacementsSoft ReasoningMistral 7b50.843.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b55.553.5
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b65.643.8
MuSR Object PlacementsSoft ReasoningGemma 2 9b63.357.0
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k53.155.1
MuSR Object PlacementsSoft ReasoningQwen 2 7b48.848.4
MuSR Object PlacementsSoft ReasoningQwen 2 72b61.745.7
MuSR Object PlacementsSoft ReasoningGPT-4o Mini59.055.0
MuSR Object PlacementsSoft ReasoningGpt-4o67.645.3
MuSR Object PlacementsSoft ReasoningClaude-3 Haiku46.952.3
MuSR Object PlacementsSoft ReasoningClaude-3.5 Sonnet69.551.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash61.756.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Pro66.450.0
FOLIOSymbolicLlama 2 7b36.533.0
FOLIOSymbolicMistral 7b50.741.9
FOLIOSymbolicLlama 3.1 8b58.656.7
FOLIOSymbolicLlama 3.1 70b70.969.0
FOLIOSymbolicGemma 2 9b66.055.7
FOLIOSymbolicPhi-3 Small 8k68.059.6
FOLIOSymbolicQwen 2 7b60.651.2
FOLIOSymbolicQwen 2 72b65.065.0
FOLIOSymbolicGPT-4o Mini65.058.1
FOLIOSymbolicGpt-4o79.862.6
FOLIOSymbolicClaude-3 Haiku61.648.8
FOLIOSymbolicClaude-3.5 Sonnet73.968.5
FOLIOSymbolicGemini 1.5 Flash74.969.5
FOLIOSymbolicGemini 1.5 Pro73.974.4
ContextHub Deductive L2SymbolicLlama 2 7b34.812.6
ContextHub Deductive L2SymbolicMistral 7b48.855.1
ContextHub Deductive L2SymbolicLlama 3.1 8b52.821.5
ContextHub Deductive L2SymbolicLlama 3.1 70b50.041.1
ContextHub Deductive L2SymbolicGemma 2 9b50.043.0
ContextHub Deductive L2SymbolicPhi-3 Small 8k52.449.1
ContextHub Deductive L2SymbolicQwen 2 7b51.339.8
ContextHub Deductive L2SymbolicQwen 2 72b52.844.0
ContextHub Deductive L2SymbolicGPT-4o Mini47.042.0
ContextHub Deductive L2SymbolicGpt-4o54.545.6
ContextHub Deductive L2SymbolicClaude-3 Haiku45.241.8
ContextHub Deductive L2SymbolicClaude-3.5 Sonnet53.046.2
ContextHub Deductive L2SymbolicGemini 1.5 Flash45.039.5
ContextHub Deductive L2SymbolicGemini 1.5 Pro57.343.3
ContextHub Abductive L2SymbolicLlama 2 7b34.331.9
ContextHub Abductive L2SymbolicMistral 7b34.025.7
ContextHub Abductive L2SymbolicLlama 3.1 8b41.337.3
ContextHub Abductive L2SymbolicLlama 3.1 70b51.044.4
ContextHub Abductive L2SymbolicGemma 2 9b41.532.9
ContextHub Abductive L2SymbolicPhi-3 Small 8k44.332.8
ContextHub Abductive L2SymbolicQwen 2 7b37.833.4
ContextHub Abductive L2SymbolicQwen 2 72b45.532.2
ContextHub Abductive L2SymbolicGPT-4o Mini65.055.0
ContextHub Abductive L2SymbolicGpt-4o57.546.8
ContextHub Abductive L2SymbolicClaude-3 Haiku37.031.4
ContextHub Abductive L2SymbolicClaude-3.5 Sonnet56.840.4
ContextHub Abductive L2SymbolicGemini 1.5 Flash53.132.2
ContextHub Abductive L2SymbolicGemini 1.5 Pro53.543.7
MMLU ProKnowledgeLlama 2 7b19.919.6
MMLU ProKnowledgeMistral 7b31.628.4
MMLU ProKnowledgeLlama 3.1 8b44.838.0
MMLU ProKnowledgeLlama 3.1 70b64.955.0
MMLU ProKnowledgeGemma 2 9b48.142.7
MMLU ProKnowledgePhi-3 Small 8k54.843.7
MMLU ProKnowledgeQwen 2 7b45.036.2
MMLU ProKnowledgeQwen 2 72b62.844.3
MMLU ProKnowledgeGPT-4o Mini62.342.6
MMLU ProKnowledgeGpt-4o72.155.0
MMLU ProKnowledgeClaude-3 Haiku47.639.0
MMLU ProKnowledgeClaude-3.5 Sonnet73.457.2
MMLU ProKnowledgeGemini 1.5 Flash58.547.2
MMLU ProKnowledgeGemini 1.5 Pro65.357.4
MuSR Murder MysteriesSoft ReasoningLlama 2 7b50.050.0
+ +Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Murder MysteriesSoft ReasoningMistral 7b62.855.6
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b70.457.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.669.6
MuSR Murder MysteriesSoft ReasoningGemma 2 9b76.861.6
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k61.658.8
MuSR Murder MysteriesSoft ReasoningQwen 2 7b59.253.2
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.864.4
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini71.263.6
MuSR Murder MysteriesSoft ReasoningGpt-4o87.670.8
MuSR Murder MysteriesSoft ReasoningClaude-3 Haiku62.456.8
MuSR Murder MysteriesSoft ReasoningClaude-3.5 Sonnet85.270.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.858.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Pro77.664.0
ContextHub Deductive L1SymbolicLlama 2 7b47.78.3
ContextHub Deductive L1SymbolicMistral 7b50.367.3
ContextHub Deductive L1SymbolicLlama 3.1 8b50.723.3
ContextHub Deductive L1SymbolicLlama 3.1 70b53.840.7
ContextHub Deductive L1SymbolicGemma 2 9b56.339.2
ContextHub Deductive L1SymbolicPhi-3 Small 8k54.850.2
ContextHub Deductive L1SymbolicQwen 2 7b59.343.3
ContextHub Deductive L1SymbolicQwen 2 72b51.544.0
ContextHub Deductive L1SymbolicGPT-4o Mini49.341.5
ContextHub Deductive L1SymbolicGpt-4o59.349.0
ContextHub Deductive L1SymbolicClaude-3 Haiku50.539.7
ContextHub Deductive L1SymbolicClaude-3.5 Sonnet54.547.0
ContextHub Deductive L1SymbolicGemini 1.5 Flash47.338.5
ContextHub Deductive L1SymbolicGemini 1.5 Pro57.346.0
ContextHub Abductive L1SymbolicLlama 2 7b29.416.4
ContextHub Abductive L1SymbolicMistral 7b46.925.8
ContextHub Abductive L1SymbolicLlama 3.1 8b43.624.2
ContextHub Abductive L1SymbolicLlama 3.1 70b55.343.9
ContextHub Abductive L1SymbolicGemma 2 9b61.958.9
ContextHub Abductive L1SymbolicPhi-3 Small 8k62.560.3
ContextHub Abductive L1SymbolicQwen 2 7b52.247.5
ContextHub Abductive L1SymbolicQwen 2 72b61.945.0
ContextHub Abductive L1SymbolicGPT-4o Mini61.142.2
ContextHub Abductive L1SymbolicGpt-4o74.265.6
ContextHub Abductive L1SymbolicClaude-3 Haiku35.322.8
ContextHub Abductive L1SymbolicClaude-3.5 Sonnet80.860.3
ContextHub Abductive L1SymbolicGemini 1.5 Flash66.447.2
ContextHub Abductive L1SymbolicGemini 1.5 Pro62.260.0
Big-Bench HardSymbolicLlama 2 7b29.831.9
Big-Bench HardSymbolicMistral 7b39.335.1
Big-Bench HardSymbolicLlama 3.1 8b62.845.6
Big-Bench HardSymbolicLlama 3.1 70b78.954.8
Big-Bench HardSymbolicGemma 2 9b58.750.8
Big-Bench HardSymbolicPhi-3 Small 8k70.055.1
Big-Bench HardSymbolicQwen 2 7b52.647.6
Big-Bench HardSymbolicQwen 2 72b75.159.0
Big-Bench HardSymbolicGPT-4o Mini77.749.7
Big-Bench HardSymbolicGpt-4o84.664.5
Big-Bench HardSymbolicClaude-3 Haiku62.447.3
Big-Bench HardSymbolicClaude-3.5 Sonnet83.656.9
Big-Bench HardSymbolicGemini 1.5 Flash71.355.4
Big-Bench HardSymbolicGemini 1.5 Pro71.650.3
MATHMathematicalLlama 2 7b4.24.0
MATHMathematicalMistral 7b12.46.1
MATHMathematicalLlama 3.1 8b47.213.8
MATHMathematicalLlama 3.1 70b64.422.8
MATHMathematicalGemma 2 9b45.619.1
MATHMathematicalPhi-3 Small 8k43.218.5
MATHMathematicalQwen 2 7b53.713.3
MATHMathematicalQwen 2 72b63.523.8
MATHMathematicalGPT-4o Mini69.624.3
MATHMathematicalGpt-4o73.335.2
MATHMathematicalClaude-3 Haiku32.717.4
MATHMathematicalClaude-3.5 Sonnet63.834.6
MATHMathematicalGemini 1.5 Flash54.531.3
MATHMathematicalGemini 1.5 Pro62.139.4
GSM8k-HardMathematicalLlama 2 7b6.71.8
GSM8k-HardMathematicalMistral 7b21.03.0
GSM8k-HardMathematicalLlama 3.1 8b34.46.0
GSM8k-HardMathematicalLlama 3.1 70b46.614.0
GSM8k-HardMathematicalGemma 2 9b40.98.8
GSM8k-HardMathematicalPhi-3 Small 8k33.06.9
GSM8k-HardMathematicalQwen 2 7b48.45.0
GSM8k-HardMathematicalQwen 2 72b54.813.7
+ +Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
GSM8k-HardMathematicalGPT-4o Mini53.911.7
GSM8k-HardMathematicalGpt-4o60.326.0
GSM8k-HardMathematicalClaude-3 Haiku45.39.6
GSM8k-HardMathematicalClaude-3.5 Sonnet50.832.3
GSM8k-HardMathematicalGemini 1.5 Flash54.616.2
GSM8k-HardMathematicalGemini 1.5 Pro58.226.2
GSM8kMathematicalLlama 2 7b29.66.9
GSM8kMathematicalMistral 7b59.210.2
GSM8kMathematicalLlama 3.1 8b85.418.5
GSM8kMathematicalLlama 3.1 70b85.637.0
GSM8kMathematicalGemma 2 9b89.224.9
GSM8kMathematicalPhi-3 Small 8k90.024.9
GSM8kMathematicalQwen 2 7b87.920.7
GSM8kMathematicalQwen 2 72b94.640.1
GSM8kMathematicalGPT-4o Mini94.131.8
GSM8kMathematicalGpt-4o95.858.8
GSM8kMathematicalClaude-3 Haiku89.422.9
GSM8kMathematicalClaude-3.5 Sonnet96.162.2
GSM8kMathematicalGemini 1.5 Flash91.438.6
GSM8kMathematicalGemini 1.5 Pro92.752.4
+ +# F.2 FULL FEW-SHOT RESULTS + +Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT RCSoft ReasoningLlama 2 7b33.138.7
AGIEval LSAT RCSoft ReasoningMistral 7b52.457.2
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b60.270.3
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.488.8
AGIEval LSAT RCSoft ReasoningGemma 2 9b74.379.2
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k63.265.1
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.768.8
AGIEval LSAT RCSoft ReasoningQwen 2 72b85.985.9
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.371.4
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash79.281.8
AGIEval LSAT LRSoft ReasoningLlama 2 7b33.734.7
AGIEval LSAT LRSoft ReasoningMistral 7b46.148.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b55.758.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b83.385.1
AGIEval LSAT LRSoft ReasoningGemma 2 9b65.768.2
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.759.2
AGIEval LSAT LRSoft ReasoningQwen 2 7b54.161.2
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.579.6
AGIEval LSAT LRSoft ReasoningGPT-4o Mini68.464.5
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash68.672.9
GPQAMathematicalMistral 7b23.025.9
GPQAMathematicalLlama 3.1 8b22.127.2
GPQAMathematicalLlama 3.1 70b24.824.3
GPQAMathematicalGemma 2 9b19.922.3
GPQAMathematicalPhi-3 Small 8k23.922.5
GPQAMathematicalQwen 2 7b23.421.2
GPQAMathematicalQwen 2 72b22.819.9
GPQAMathematicalGPT-4o Mini20.020.0
GPQAMathematicalGemini 1.5 Flash21.924.6
CommonsenseQACommonsenseLlama 2 7b18.219.2
CommonsenseQACommonsenseMistral 7b73.670.4
CommonsenseQACommonsenseLlama 3.1 8b74.076.5
CommonsenseQACommonsenseLlama 3.1 70b84.784.6
CommonsenseQACommonsenseGemma 2 9b81.880.8
CommonsenseQACommonsensePhi-3 Small 8k80.880.4
CommonsenseQACommonsenseQwen 2 7b80.372.9
CommonsenseQACommonsenseQwen 2 72b88.487.8
CommonsenseQACommonsenseGPT-4o Mini84.784.7
CommonsenseQACommonsenseGemini 1.5 Flash81.783.3
AGIEval LSAT ARSoft ReasoningLlama 2 7b19.618.7
AGIEval LSAT ARSoft ReasoningMistral 7b20.922.6
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b24.826.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b36.130.9
AGIEval LSAT ARSoft ReasoningGemma 2 9b22.228.7
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k27.820.0
AGIEval LSAT ARSoft ReasoningQwen 2 7b24.323.0
AGIEval LSAT ARSoft ReasoningQwen 2 72b27.030.0
+ +Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT ARSoft ReasoningGPT-4o Mini28.726.1
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash28.320.4
MMLUKnowledgeLlama 2 7b49.042.8
MMLUKnowledgeMistral 7b63.057.0
MMLUKnowledgeLlama 3.1 8b71.769.3
MMLUKnowledgeLlama 3.1 70b84.383.7
MMLUKnowledgeGemma 2 9b74.772.4
MMLUKnowledgePhi-3 Small 8k77.375.2
MMLUKnowledgeQwen 2 7b69.968.6
MMLUKnowledgeQwen 2 72b82.781.8
MMLUKnowledgeGPT-4o Mini82.377.8
MMLUKnowledgeGemini 1.5 Flash78.179.0
StrategyQACommonsenseLlama 2 7b57.930.9
StrategyQACommonsenseMistral 7b70.772.0
StrategyQACommonsenseLlama 3.1 8b74.465.8
StrategyQACommonsenseLlama 3.1 70b87.184.2
StrategyQACommonsenseGemma 2 9b77.173.3
StrategyQACommonsensePhi-3 Small 8k75.071.1
StrategyQACommonsenseQwen 2 7b71.958.9
StrategyQACommonsenseQwen 2 72b83.280.1
StrategyQACommonsenseGPT-4o Mini83.086.2
StrategyQACommonsenseGemini 1.5 Flash77.080.3
ContextHub Abductive L2SymbolicLlama 2 7b36.235.0
ContextHub Abductive L2SymbolicMistral 7b33.830.0
ContextHub Abductive L2SymbolicLlama 3.1 8b32.736.1
ContextHub Abductive L2SymbolicLlama 3.1 70b54.651.2
ContextHub Abductive L2SymbolicGemma 2 9b44.833.2
ContextHub Abductive L2SymbolicPhi-3 Small 8k49.834.2
ContextHub Abductive L2SymbolicQwen 2 7b39.635.0
ContextHub Abductive L2SymbolicQwen 2 72b54.734.9
ContextHub Abductive L2SymbolicGPT-4o Mini62.060.0
ContextHub Abductive L2SymbolicGemini 1.5 Flash48.647.8
ContextHub Abductive L1SymbolicLlama 2 7b21.416.7
ContextHub Abductive L1SymbolicMistral 7b23.621.7
ContextHub Abductive L1SymbolicLlama 3.1 8b40.036.1
ContextHub Abductive L1SymbolicLlama 3.1 70b62.258.9
ContextHub Abductive L1SymbolicGemma 2 9b48.959.4
ContextHub Abductive L1SymbolicPhi-3 Small 8k59.256.4
ContextHub Abductive L1SymbolicQwen 2 7b48.638.9
ContextHub Abductive L1SymbolicQwen 2 72b53.356.1
ContextHub Abductive L1SymbolicGPT-4o Mini77.259.2
ContextHub Abductive L1SymbolicGemini 1.5 Flash79.768.6
MuSR Murder MysteriesSoft ReasoningMistral 7b62.056.4
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b61.661.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.268.0
MuSR Murder MysteriesSoft ReasoningGemma 2 9b81.662.0
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k62.053.6
MuSR Murder MysteriesSoft ReasoningQwen 2 7b56.055.6
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.466.0
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini76.069.6
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.066.4
MuSR Team AllocationsSoft ReasoningMistral 7b42.843.2
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b59.651.6
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b89.263.6
MuSR Team AllocationsSoft ReasoningGemma 2 9b48.445.6
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k66.046.4
MuSR Team AllocationsSoft ReasoningQwen 2 7b34.040.8
MuSR Team AllocationsSoft ReasoningQwen 2 72b56.066.4
MuSR Team AllocationsSoft ReasoningGPT-4o Mini75.660.0
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash90.054.4
MMLU ProKnowledgeLlama 2 7b21.520.4
MMLU ProKnowledgeMistral 7b34.826.7
MMLU ProKnowledgeLlama 3.1 8b44.738.0
MMLU ProKnowledgeLlama 3.1 70b64.455.1
MMLU ProKnowledgeGemma 2 9b48.542.4
MMLU ProKnowledgePhi-3 Small 8k54.843.2
MMLU ProKnowledgeQwen 2 7b46.639.0
MMLU ProKnowledgeQwen 2 72b62.551.6
MMLU ProKnowledgeGPT-4o Mini63.045.0
MMLU ProKnowledgeGemini 1.5 Flash59.450.6
MuSR Object PlacementsSoft ReasoningMistral 7b55.541.0
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b66.850.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b67.257.4
MuSR Object PlacementsSoft ReasoningGemma 2 9b68.058.2
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k62.151.6
MuSR Object PlacementsSoft ReasoningQwen 2 7b46.943.8
MuSR Object PlacementsSoft ReasoningQwen 2 72b66.443.0
+ +Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets + +
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
MuSR Object PlacementsSoft ReasoningGPT-4o Mini67.047.0
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash73.054.7
ContextHub Deductive L2SymbolicLlama 2 7b34.715.0
ContextHub Deductive L2SymbolicMistral 7b63.851.4
ContextHub Deductive L2SymbolicLlama 3.1 8b76.127.3
ContextHub Deductive L2SymbolicLlama 3.1 70b82.653.6
ContextHub Deductive L2SymbolicGemma 2 9b61.947.6
ContextHub Deductive L2SymbolicPhi-3 Small 8k61.554.0
ContextHub Deductive L2SymbolicQwen 2 7b55.336.4
ContextHub Deductive L2SymbolicQwen 2 72b80.254.0
ContextHub Deductive L2SymbolicGPT-4o Mini59.041.0
ContextHub Deductive L2SymbolicGemini 1.5 Flash90.242.5
ContextHub Deductive L1SymbolicLlama 2 7b34.716.0
ContextHub Deductive L1SymbolicMistral 7b46.259.2
ContextHub Deductive L1SymbolicLlama 3.1 8b73.023.0
ContextHub Deductive L1SymbolicLlama 3.1 70b67.550.0
ContextHub Deductive L1SymbolicGemma 2 9b66.045.7
ContextHub Deductive L1SymbolicPhi-3 Small 8k74.851.8
ContextHub Deductive L1SymbolicQwen 2 7b58.837.5
ContextHub Deductive L1SymbolicQwen 2 72b70.742.8
ContextHub Deductive L1SymbolicGPT-4o Mini59.244.3
ContextHub Deductive L1SymbolicGemini 1.5 Flash89.349.8
MATHMathematicalLlama 2 7b4.73.9
MATHMathematicalMistral 7b13.77.1
MATHMathematicalLlama 3.1 8b41.214.2
MATHMathematicalLlama 3.1 70b61.924.2
MATHMathematicalGemma 2 9b47.519.8
MATHMathematicalPhi-3 Small 8k42.418.9
MATHMathematicalQwen 2 7b55.015.0
MATHMathematicalQwen 2 72b65.326.2
MATHMathematicalGPT-4o Mini71.724.6
MATHMathematicalGemini 1.5 Flash54.732.3
GSM8KMathematicalLlama 2 7b29.07.7
GSM8KMathematicalMistral 7b56.212.5
GSM8KMathematicalLlama 3.1 8b86.420.1
GSM8KMathematicalLlama 3.1 70b96.139.1
GSM8KMathematicalGemma 2 9b89.224.9
GSM8KMathematicalPhi-3 Small 8k90.424.5
GSM8KMathematicalQwen 2 7b87.621.4
GSM8KMathematicalQwen 2 72b93.240.6
GSM8KMathematicalGPT-4o Mini94.232.8
GSM8KMathematicalGemini 1.5 Flash90.640.4
+ +# F.3 ANSWER EXTRACTOR AND AVERAGE ANSWER SPAN RESULTS + +In this section, we report the number of generations from each model on each dataset that our answer parser could not extract. "1" denotes that a model was not run on a certain dataset due to context length limitations in the few-shot setting. We see that these unparseable rates are generally low across the board. The weakest models struggle on some of the most challenging datasets, but unparseable rates are all at or below $15\%$ . + +We also report the average character index of the beginning of the answer span that the answer parser extracted. Of particular note is that the direct answer prompts all return an answer within the first 60 characters, indicating that the answers are returned almost immediately, as desired. CoT completions are much longer. + +# G ZOOM-IN: MMLU AND MMLU PRO + +MMLU and MMLU Pro show gains from adding CoT, but because these datasets are so broad, they defy simple characterization. We explore the performance of CoT on each category of MMLU to understand divergences in CoT performance between these domains. We list the top three categories where CoT gives the largest error reduction for Llama 3.1 8B and 70B on MMLU and MMLU Pro in Table 17. Some of these categories are explicitly mathematical in nature, as we might expect from Figure 3. We can also see that CoT is helping on categories like "business"; upon closer inspection, we found that these categories frequently involve math as well (e.g., business questions may involve computations surrounding wealth). We need to more carefully characterize MMLU at the instance + +![](images/4555fd6feb79a2952a72299156e54e6ce602be5defc12ec9511a3ee2a0640053.jpg) +CoT vs direct answer prompting in zero-shot setting (sorted by CoT delta) + +![](images/23092bff0590411ea6a3a6a3b3cc309010f03fcc52eb0dd86e63bde56fae586a.jpg) + +![](images/7ee7c9def64df0116c3d3d3fd0d7f821a92277a84d73722bb4a98810297129e1.jpg) + +![](images/caedee3172006eff41acc4139009516b0772877a4d107fc766aa03a806257c3b.jpg) + +![](images/2dd6a070b54dea85cca67c15249312175b3c234a4d8465f0ff509f7b9bc66f03.jpg) + +![](images/b1ceea31a35477784ca2848751bf8f055fef4bf34de33f6883042939667571be.jpg) + +![](images/52f19834dc472e800244344b94f45e6ba78b8e1ce6e5d6f1e74a5077acb42011.jpg) + +![](images/1efd9008ea291cbd03a58815d2537ab80fd48a1a8a742e1048e4fc078b52607b.jpg) + +![](images/b3a9ab83cd4738f25c01c3b5a4c3ac93767c31780316f7283e4097b8757c2499.jpg) +Figure 9: Performance of zero-shot direct (blue) and zero-shot CoT (orange) across datasets and models. Graphs are sorted in ascending order by median delta (CoT, direct). The datasets benefiting substantially are all symbolic or semi-symbolic in nature. + +Table 9: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above $15\%$ . + +
Zero-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMeLaLima 2-7bMauu1/0MeLaLima 3-1-9bMeLaLima 3-1-7bGamma 2-9bPru-5 Small 8kQeu-2-7bQeu-2-7bGpr-ho MthGpr-loClauh-3-HouClauh-5-SomertGemini-1.5 Path
CommonsenseQA1.92.51.10.00.80.11.60.70.00.00.10.00.2
StrategyQA0.01.90.10.011.70.54.92.70.00.00.00.00.2
SiQA0.26.60.00.13.90.30.13.00.10.10.00.00.4
PiQA0.46.00.00.13.32.10.05.50.20.00.10.00.9
Winogrande0.03.00.10.02.10.25.10.40.00.00.00.03.6
Arc Easy0.01.80.50.00.00.29.10.73.50.40.20.03.2
Arc Challenge0.02.31.00.00.30.710.70.710.00.70.00.05.0
AGIeval LSAT LR0.40.00.00.00.00.20.00.00.02.50.00.00.2
AGIeval LSAT AR0.40.00.00.04.33.90.00.00.08.70.00.00.0
AGIeval LSAT RC0.40.40.00.00.00.00.00.00.09.70.00.00.4
ContextHub Deductive L10.00.00.00.01.20.02.30.00.00.00.20.00.2
ContextHub Deductive L20.00.00.00.00.00.02.21.00.00.02.80.00.0
ContextHub Abductive L10.00.00.00.00.00.30.00.00.00.00.00.00.0
ContextHub Abductive L20.00.00.00.00.00.11.50.20.00.00.80.00.0
MuSR Murder Mysteries0.00.00.00.00.00.00.00.00.00.00.00.00.0
MuSR Team Allocations0.00.00.00.03.60.00.00.00.00.00.08.40.4
MuSR Object Placements0.00.00.00.00.00.00.00.00.00.00.00.00.0
MMLU0.10.00.00.00.10.23.61.20.60.01.30.30.2
MMLU Pro0.71.31.00.31.03.76.812.20.40.30.30.40.6
GPQA1.37.10.00.08.712.75.415.20.00.01.60.00.7
MATH0.66.90.30.20.10.13.53.00.80.00.30.00.4
GSM8k0.24.12.50.02.70.01.70.20.00.012.75.50.0
BigGen Bench4.60.30.90.10.51.01.31.01.30.00.00.10.4
GSM8k-Hard4.87.62.00.40.40.23.21.10.10.55.20.50.2
MuSiQue0.10.00.00.00.00.10.00.00.00.00.00.20.1
Folio4.40.00.00.00.00.03.90.00.012.30.00.00.5
BigBench-Hard0.00.00.07.40.00.20.00.00.00.30.04.512.8
+ +Table 10: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above $15\%$ . + +
datasetZero-shot CoT Unparseable Answer Rate by Percentage
Meta-Lima 2-TbMeta-10Meta-Lima 3.1-80Meta-Lima 3.1-70bGensim 19bPir-3-Small6kQwen-2-TbQwen-2-TbGPT-4MoGPT-4oCiaJa-3-HuluCiaJa-5-SonarGeminl 1.5 Plan
CommonsenseQA2.91.38.60.00.60.10.00.01.60.00.20.32.4
StrategyQA1.00.11.10.80.30.40.30.00.00.00.02.14.4
SiQA0.81.80.30.11.60.00.10.10.00.00.30.13.5
PiQA1.61.60.20.12.80.30.50.30.00.01.40.34.6
Winogrande0.91.40.20.20.90.40.30.00.00.00.02.03.4
Arc Easy0.20.40.20.00.51.61.60.00.50.00.00.00.4
Arc Challenge0.00.70.00.00.00.00.70.00.00.00.00.00.7
AGIEval LSAT LR3.32.20.00.01.20.02.00.00.00.00.00.00.2
AGIEval LSAT AR4.87.06.12.25.75.24.30.41.31.30.00.41.7
AGIEval LSAT RC7.11.10.00.00.73.06.70.00.00.00.00.00.4
ContextHub Deductive L10.71.30.00.00.00.00.00.00.00.00.00.00.3
ContextHub Deductive L20.20.40.00.00.00.00.00.00.00.00.00.00.4
ContextHub Abductive L10.60.00.00.00.00.00.00.00.00.00.00.00.0
ContextHub Abductive L20.00.20.10.00.00.30.00.00.00.00.00.00.4
MuSR Murder Mysteries0.00.40.00.00.011.60.40.00.00.00.06.83.6
MuSR Team Allocations5.23.20.80.00.80.40.40.00.00.00.00.00.0
MuSR Object Placements0.01.60.00.00.40.80.00.00.00.00.02.00.4
MMLU1.90.61.00.21.51.00.40.20.00.10.03.13.2
MMLU Pro4.45.413.13.312.53.65.42.02.41.90.45.04.4
GPQA4.510.39.41.68.51.83.80.70.00.00.011.815.0
MATH1.65.58.22.52.31.63.00.40.40.50.91.71.0
GSM8k1.71.40.710.50.40.60.40.00.00.00.30.00.1
BigGen Bench5.00.40.50.10.50.40.39.50.00.00.00.10.4
GSM8k-Hard2.18.710.24.510.73.23.51.00.80.53.01.82.7
MuSiQue1.40.08.30.10.00.00.00.00.00.00.00.73.1
Folio0.00.01.50.00.00.00.00.00.00.00.02.01.5
BigBench-Hard3.85.41.80.41.30.10.40.30.00.00.01.20.9
+ +Table 11: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above $15\%$ . + +
Few-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMetaLlama 2-7bMistral 7bMetaLlama 3-1-8bMetaLlama 3-1-7bGamma 2-9bPhi-3 Small 8kQwen 2-7bQwen 2-7bGPT4o MiniGemini 1.5 Flash
CommonsenseQA0.00.10.20.01.30.99.91.30.00.6
AGIEval LSAT LR6.70.00.00.00.00.00.00.00.00.2
AGIEval LSAT AR2.60.00.00.03.55.20.00.00.00.0
AGIEval LSAT RC0.00.00.00.00.00.00.00.00.00.0
ContextHub Deductive L10.02.80.00.00.010.70.30.00.00.0
ContextHub Deductive L20.00.10.00.00.00.30.20.00.00.0
ContextHub Abductive L10.02.80.00.00.00.60.00.00.00.0
ContextHub Abductive L20.02.00.00.00.00.00.00.00.00.0
MuSR Murder Mysteries-1.00.00.00.00.00.00.00.00.00.4
MuSR Team Allocations-1.00.00.00.00.00.00.00.00.00.0
MuSR Object Placements-1.00.00.00.00.41.20.00.00.00.0
MMLU4.20.20.00.00.10.00.40.10.00.2
MMLU Pro5.11.22.40.31.09.10.52.60.40.5
GPQA-1.01.30.00.03.67.413.41.10.00.0
MATH0.35.90.30.20.10.11.62.20.00.3
GSM8k0.10.10.50.00.12.20.00.20.00.0
+ +Table 12: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above $15\%$ . + +
Few-shot CoT Unparseable Answer Rate by Percentage
datasetMetaLlama2-7bMetaLlama3-18bMetaLlama3-178bMetaLlama3-178bGamma2-9bPhi5Small8cQwen2-7bQwen2-7bGPT40MiniGamin1.5Flush
CommonsenseQA0.70.91.80.10.20.10.00.00.03.4
AGIEval LSAT LR0.60.80.40.01.43.10.80.00.00.6
AGIEval LSAT AR2.29.13.90.911.73.03.51.70.01.3
AGIEval LSAT RC7.85.90.00.01.99.32.60.00.02.2
ContextHub Deductive L10.20.00.20.00.00.00.00.00.00.3
ContextHub Deductive L20.90.00.20.00.00.00.00.00.00.3
ContextHub Abductive L10.80.00.00.00.00.00.00.00.00.0
ContextHub Abductive L23.10.05.30.10.00.20.00.00.00.7
MuSR Murder Mysteries-1.01.20.00.00.40.80.00.00.014.0
MuSR Team Allocations-1.02.40.00.00.00.00.80.00.00.4
MuSR Object Placements-1.00.40.00.01.20.40.00.00.00.0
MMLU0.60.81.10.21.50.70.30.20.22.5
MMLU Pro0.61.98.52.114.11.81.90.81.13.9
GPQA-1.012.110.30.912.96.05.63.30.013.6
MATH1.56.88.22.411.12.62.91.10.51.8
GSM8k0.81.31.00.10.50.50.10.00.10.1
+ +Table 13: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations. + +
Zero-shot Direct Answer Span Location By Character Index
datasetMeta-Liuna 2-7bMetaLiuHa 3-18bMetaLiuHa 3-17bMetaLiuHa 3-18bGamma 2-9bPhi-3 Small 8kOpen-2-7bOpen-2-7bGPT-4a-MiniGPT-40Clude-3 HakuClude-3.5 SorenGeminl-1.5 Pals
CommonsenseQA98278881088107788
StrategyQA444527444444464488424187
SiQA888888298886688
PiQA788888258884588
Winogrande89888898895488
Arc Easy98888898887788
Arc Challenge88888898887788
AGIEval LSAT LR2524242424242524432125252626
AGIEval LSAT AR2524242424242624482325252627
AGIEval LSAT RC2524242424242524311825252625
ContextHub Deductive L11919191920191919191920201920
ContextHub Deductive L21919191919191919191920201919
ContextHub Abductive L11919191920191919191920201919
ContextHub Abductive L21919191920191919191920201919
MuSR Minder Mysteries882788888886488
MuSR Team Allocations272219192723262288302088
MuSR Object Placements882788888887688
MMLU1918191920181818191919191920
MMLU Pro2019381921191920191920201919
GPQA1919191921191919191920201920
MATH3031282830303333282831292828
GSM8k2229302828372428282829282828
GSM8k-Hard95711219134020788888
Folio3988888311381656870
BigBench-Hard3922252126322926281928281016
+ +Table 14: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations. + +
Zero-shot CoT Answer Span Location By Character Index
datasetMeiLima 27bMeiLima 76MeiLima 3.1 8bMeiLima 3.1 70bGenma 29bPrl-3 Small 8kQues 27bQues 27bGPT-hMiaGPT-4Cluide-3 HabiCluide-5 SorenGenim 1.5 PlixB
CommonsenseQA441564845123723646657734189910866261103214165
StrategyQA726434996113126746036335869210337541158256195
SiQA56942384196523552847242084710946021016196169
PiQA6994558699142075324473646839355781092200150
Winogrande377324645694187326391298634750408889200173
Arc Easy6845811154131936761053435599012397891222340231
Arc Challenge76364411781316422596571387102012698281240372267
AGIEval LSAT LR205313241163167552468915607689499981561728906886
AGIEval LSAT AR1377179114222182712102718191264123011511202849817871
AGIEval LSAT RC1977103211031575739590117066097310791628786703709
ContextHub Deductive L1694368759711383327539402540580542556320254
ContextHub Deductive L28424721095990614442789585840758777655515503
ContextHub Abductive L1577461747879464440754638788879683594368325
ContextHub Abductive L28616001270122968657197685611151113894894601551
MuSR Murder Mysteries4951592195818471210124612411718196119651671175913491213
MuSR Team Allocations12121845229423101513143320212213256226981479185615961607
MuSR Object Placements917625135412666956419048191593153612101455616429
MMLU834512663622503277497407400461447409630413
MMLU Pro1371513788716640518954699926940590653660774
GPQA10347789179018065001018628541666486472981735
MATH7421118122211797486701189114511251153677675679698
GSM8k57263783471945352170964510481035708680541437
GSM8k-Hard916939102710695557661083105313501266594815605512
Folio72476514791379733668919488128515839071194934492
BigBench-Hard596230876861429349315443877973545863455346
+ +Table 15: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations. + +
Few-shot Direct Answer Span Location By Character Index
datasetMetaLlama 2.7bMistral 7bMetaLlama 3.1.8bMetaLlama 3.1.70bGemma 2.9bPhi-3 Small BkQwen 2.7bQwen 2.72bGPT4o MiniGemini 1.5 Flash
CommonsenseQA8782788810888
AGIEval LSAT LR25242424242424243124
AGIEval LSAT AR25242424242424242724
AGIEval LSAT RC25242424242424242524
ContextHub Deductive L119191919191919191919
ContextHub Deductive L219191919191919191919
ContextHub Abductive L119191919191919191919
ContextHub Abductive L219191919191919191919
MuSR Murder Mysteries-18278888888
MuSR Team Allocations-12119192721272388
MuSR Object Placements-18278888888
MMLU19181919191818181919
MMLU Pro19193819202019191919
GPQA-1191919191919191919
MATH29362929283030412828
GSM8k22232322222322242728
+ +Table 16: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations. + +
Few-shot CoT Answer Span Location By Character Index
datasetMetaLima 27bMetaLima 7bMetaLima 3.186MetaLima 3.170bGamma 2.98Phi-3 Small BkQwen 2.78Qwen 2.77bGPT-4o MiniGemini 1.5 Flash
CommonsenseQA301195470921145192280174219158
AGIEval LSAT LR1037510464539437359530599894523
AGIEval LSAT AR1024124788676857310257508351033670
AGIEval LSAT RC7993781312061641112412051086266
ContextHub Deductive L1383386406376359376388364416366
ContextHub Deductive L2736767829822823855612807884809
ContextHub Abductive L1301386428450431413541447575379
ContextHub Abductive L2709586967754804784829821905815
MuSR Murder Mysteries-1128016931702122513381246171919741419
MuSR Team Allocations-1219520872160162817552181215626321841
MuSR Object Placements-1907110412137069196769631351853
MMLU282266333245265260267243392218
MMLU Pro429397424411516425541325681396
GPQA-1848782774615711662703670594
MATH63070558464074752910748481261553
GSM8k374332352352398372415341651314
+ +Table 17: The top 3 slices benefiting the most from CoT across MMLU and MMLU Pro for Llama 3.1 8b and 70b. 6 out of 12 of these top slices directly contain "math" or "mathematics." We dive deeper into each category subsequently and observe that the questions leading to improvements in the other categories are mathematical in nature as well. + +
MMLUMMLU Pro
ModelSubjectDirect (%)CoT (%)Err. Red. (%)NSubjectDirect (%)CoT (%)Err. Red. (%)N
Llama 3.1 8belementary mathematics46.888.478.1378math23.644.827.81350
Llama 3.1 8bhigh_school mathematics39.671.552.8270business29.445.623.0789
Llama 3.1 8bmiscellaneous83.989.937.3783physics27.941.418.81299
Llama 3.1 70belementary mathematics82.394.770.1378math44.568.342.91351
Llama 3.1 70bmedical_genetics93.097.057.1100business44.067.842.5789
Llama 3.1 70bhigh_school mathematics61.582.253.8270chemistry40.564.039.61132
+ +level. In doing so, we can test our hypotheses with much finer granularity than possible by relying on subjective groupings into tasks and categories. + +Breakdown by the presence of equations We aim to design an instance-level classifier to determine if CoT is expected to help on a question or not. That is, we want a function $g: \mathbf{q} \to \{0,1\}$ where $g(\mathbf{q})$ returns 1 if $\text{extract}(\tilde{\mathbf{y}}_{\text{cot}}) = \mathbf{y}^*$ and $\text{extract}(\tilde{\mathbf{y}}_{da}) \neq \mathbf{y}^*$ where $\mathbf{y}^*$ is the gold answer to $\mathbf{q}$ . We explored different forms of $g$ ; however, we ultimately found it most effective to use a classifier $g: (\mathbf{q}, \tilde{\mathbf{y}}_{\text{cot}}) \to \{0,1\}$ which also consults the chain-of-thought produced by the model. This allows us to featurize how the LM solves the problem, particularly whether it uses symbolic reasoning or not. + +We find that $g$ can be implemented with a single feature: does $\mathbf{q}$ or $\tilde{\mathbf{y}}_{\mathrm{cot}}$ contain a “=”? The “=” token very strongly indicates the presence of equations in the problem or its solution, which turn out to be a strong hallmark of symbolic reasoning. + +We plot the overall CoT delta (performance of CoT minus the performance of direct answer) for both MMLU and MMLU Pro across multiple models between two bins according to this classifier $g$ , labeled as "With =" and "Without =", in Figure 4. We also report the amount of performance gain explained by questions having an " ==" vs. not in Appendix G.1. We find that the majority of the performance gain from CoT on MMLU and MMLU Pro comes from questions that have an " ==" in the question or generated responses. Because " ==" are usually found in math problems, we equate this to CoT primarily benefiting MMLU and MMLU Pro on the math-related questions with very little to no gain (depending on the model) for non-math questions. + +# G.1 PERFORMANCE IMPACTS OF “=” ON MMLU AND MMLU PRO + +Tables 18 and 19 show the amount of total improvement from using CoT over direct prompting that can be explained by the presence of “=” on MMLU and MMLU Pro over multiple models. + +# H FULL RESULTS OF EVALUATIONS ON FORMAL REASONING DATASETS + +As discussed in Section 5, we include detailed evaluation results of few-shot direct answer, few-shot CoT, direct answer solver, CoT solver, and tool-augmented prompting in Table 20. The unparseable rate stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except tool-augmented prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect. + +We note that all models have a low unparseable rate $(< 10\%)$ for all methods except tool-augmented prompting. By manually inspecting the outputs, we observe that the high unparseable rate for some models with tool-augmented prompting is caused by these models generating Python programs or + +Table 18: Total CoT deltas on MMLU broken down by the total gain from questions and responses with an “=” vs. without an “=” + +
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b6.00.65.49.8%10.9%
Mistral 7b4.11.22.928.6%9.8%
Llama 3.1 8b5.52.92.652.9%9.6%
Llama 3.1 70b1.91.80.194.0%10.6%
Gemma 2 9b2.62.00.678.5%10.0%
Phi-3 Small 8k3.11.51.747.4%8.3%
Qwen 2 7b2.53.0-0.5100.0%9.8%
Qwen 2 72b3.52.41.167.8%9.6%
GPT-4o Mini5.23.51.766.9%10.5%
GPT-4o4.22.41.857.6%10.3%
Claude-3 Haiku3.72.41.364.4%9.3%
Claude-3.5 Sonnet3.22.30.972.1%10.7%
Gemini 1.5 Flash3.01.71.259.0%10.1%
Gemini 1.5 Pro1.91.00.951.9%9.6%
+ +Table 19: Total CoT deltas on MMLU Pro broken down by the total gain from questions and responses with an “=” vs. without an “=” + +
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b1.61.30.379.6%43.6%
Mistral 7b3.81.91.950.7%41.8%
Llama 3.1 8b12.410.02.480.8%35.2%
Llama 3.1 70b11.411.10.397.6%39.6%
Gemma 2 9b7.67.40.297.9%40.2%
Phi-3 Small 8k11.69.91.785.7%42.7%
Qwen 2 7b10.08.91.188.6%41.6%
Qwen 2 72b19.016.12.984.7%41.4%
GPT-4o Mini20.618.42.389.0%44.0%
GPT-4o17.717.10.696.7%44.1%
Claude-3 Haiku8.77.80.990.1%42.0%
Claude-3.5 Sonnet16.214.81.391.9%43.4%
Gemini 1.5 Flash12.911.81.191.3%42.3%
Gemini 1.5 Pro10.08.61.485.7%41.8%
+ +Table 20: Performance and unparseable rates for few-shot direct answer, few-shot CoT, Plan + Direct Solver, Plan + CoT Solver, and Plan + Tool Solver Solver. "Acc." stands for accuracy and "% Unp." stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except Plan + Tool Solver prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect. + +
DatasetMethodMistral 7bLlama 3.1 8bLlama 3.1 70bGPT-4o Mini
Acc.% Unp.Acc.% Unp.Acc.% Unp.Acc.% Unp.
GSM8KDirect Answer12.50.120.10.539.10.032.80.0
GSM8KCoT56.21.486.41.096.10.194.20.1
GSM8KPlan + CoT Solver45.01.078.70.494.70.092.00.1
GSM8KPlan + Direct Solver10.60.119.60.142.20.039.30.0
GSM8KPlan + Tool Solver59.88.680.31.394.40.490.51.5
GSM8K-HardDirect Answer2.90.74.40.612.80.712.37.6
GSM8K-HardCoT20.35.032.49.647.84.452.20.5
GSM8K-HardPlan + CoT Solver18.72.632.41.349.70.651.50.3
GSM8K-HardPlan + Direct Solver3.00.55.50.815.80.117.40.3
GSM8K-HardPlan + Tool Solver44.28.957.91.268.00.570.41.4
ContextHub Deductive L1Direct Answer59.22.823.00.050.00.044.30.0
ContextHub Deductive L1CoT46.20.273.00.267.50.059.20.0
ContextHub Deductive L1Plan + CoT Solver49.50.064.80.065.50.063.20.0
ContextHub Deductive L1Plan + Direct Solver45.83.055.80.053.50.056.20.0
ContextHub Deductive L1Plan + Tool Solver68.827.884.211.891.79.890.77.8
ContextHub Abductive L1Direct Answer21.72.836.10.058.90.059.20.0
ContextHub Abductive L1CoT23.90.040.00.062.20.076.90.0
ContextHub Abductive L1Plan + CoT Solver38.30.042.50.065.60.074.20.0
ContextHub Abductive L1Plan + Direct Solver46.93.933.30.363.10.061.70.0
ContextHub Abductive L1Plan + Tool Solver59.235.870.89.773.94.274.710.3
FOLIODirect Answer56.212.359.60.069.50.064.00.0
FOLIOCoT53.71.556.72.572.42.070.40.0
FOLIOPlan + CoT Solver53.70.055.70.073.90.570.40.0
FOLIOPlan + Direct Solver52.70.054.20.072.90.063.50.0
FOLIOPlan + Tool Solver48.846.854.228.670.016.762.625.1
+ +formal specifications that fail to follow the format of the formal language (Python or z3) and that lead to execution errors. Such an issue is particularly severe for the smaller models. However, we note that despite the high unparseable rate, the overall accuracy of these models with tool augmentation is still on par with or outperforms other methods. + +# I DISCUSSION OF LIMITATIONS + +# I.1 LONG HORIZON PLANNING + +One set of tasks where symbolic reasoning helps substantially that our experiments haven't covered as thoroughly (with the exception of BiGGen-Bench) is long-horizon planning (Valmeekam et al., 2023; Xie et al., 2024; Gundawar et al., 2024; Valmeekam et al., 2024). There are two reasons we don't treat it here. First, we are primarily interested in tasks that are conveyed in language, and we see less complex planning in language-only tasks. Second, there has already been a large debate on the effectiveness of CoT, both pro (Huang et al., 2022; Hu et al., 2023) and against (Valmeekam et al., 2023; Kambhampati, 2024; Kambhampati et al., 2024b; Stechly et al., 2024a; Guan et al., 2024; Verma et al., 2024; Gundawar et al., 2024; Stechly et al., 2024b) using CoT and its derivatives like tree-of-thought (Yao et al., 2023; Kang et al., 2024), that has resulted in complex systems to help solve planning problems better. While story generation and interpretation involve elements of planning with natural language (Peng et al., 2022; Karpinska et al., 2024), such tasks are not conventionally formalized and benchmarked as planning and reasoning. + +# I.2 DATASET CONTAMINATION + +One limitation of our study is the presence of possible data contamination: it is unknown which benchmarks may have been explicitly pre-trained on by language models. If a model had memorized answers to benchmark questions, we would expect direct answering to close some of the gap with CoT, as the model can just reproduce a known answer rather than deriving it from scratch. We argue + +there are four reasons that our general conclusions are still trustworthy. First, we use a range of language model scales, including small models that have less capacity to memorize. Second, datasets with poor direct answering performance like GSM8K-Hard are unlikely to have been substantially memorized. Third, the inclusion of recent datasets such as MuSR (Sprague et al., 2024) and BiGGen Bench (Kim et al., 2024) helps to defray this risk. Fourth, our survey of the literature includes papers that were submitted to conferences in 2023, representing a range of older LLMs trained at various times. + +# J EXAMPLE PROMPTS + +We will release all prompts and model responses on our Huggingface repo. We list a few prompt response pairs here in this section. + +# AGIEval LSAT AR zero-shot CoT prompt for Llama 3.1 70B + +<|start_header_id|>user<|end_header_id|> + +Explain your reasoning step-by-step for each question before answering. Give your final answer in the format \The answer is therefore $$ ". Failure to comply with the answer formatting will result in no credit. + +Of the eight students\u2014George, Helen, Irving, Kyle, Lenore, Nina, Olivia, and Robert\u2014in a seminar, exactly six will give individual oral reports during three consecutive days\u2014Monday, Tuesday, and Wednesday. Exactly two reports will be given each day\u2014one in the morning and one in the afternoon\u2014according to the following conditions: Tuesday is the only day on which George can give a report. Neither Olivia nor Robert can give an afternoon report. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday.Q: Which one of the following could be the schedule of the students' reports? Answer Choices: (A)Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle (B)Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore (C)Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving (D)Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore (E)Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George Let's think step by step.<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +# AGIEval LSAT AR zero-shot CoT response for Llama 3.1 70B + +To determine which one of the following could be the schedule of the students' reports, let's analyze each answer choice in light of the given conditions. First, let's recall the conditions: 1. Exactly six students will give reports over three days. 2. Tuesday is the only day George can give a report. 3. Neither Olivia nor Robert can give an afternoon report. 4. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Let's evaluate each answer choice: (A) Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle - This choice does not violate any conditions directly. However, we need to check if it aligns with all the rules, especially considering the Nina rule, which isn't directly applicable here since Nina isn't scheduled. George's requirement is also not met since George isn't scheduled on Tuesday. (B) Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Since Nina's report is on Wednesday, this condition is technically not violated, but we need to ensure all other conditions are met. George is not scheduled on Tuesday, which is a requirement. (C) Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving - This choice meets the condition that George gives a report on Tuesday. It also does not violate any other conditions directly. However, we need to ensure that the Nina rule isn't applicable or is met, which in this case, it's not applicable since Nina isn't scheduled. (D) Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports. Here, Helen is already scheduled on the same day as Nina, and while Irving is scheduled the next day, the condition specifies both Helen and Irving must give reports the next day, which is not the case since Helen's report is on the same day as Nina's. (E) Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George - This choice violates the condition that Tuesday is the only day George can give a report, as George is scheduled on Wednesday. Given the analysis, the only choice that does not directly violate any of the given conditions and meets the specific requirements (especially considering the placement of George and the handling of the Nina rule where applicable) is: The answer is therefore (C). + +MATH zero-shot CoT prompt for Llama 3.1 70B +```txt +<|begin_of_text>|<|start_header_id>|>user<|end_header_id>|> +Solve the following math problem efficiently and clearly: +- For simple problems (2 steps or fewer):\nProvide a concise solution with minimal explanation. +- For complex problems (3 steps or more): +Use this step-by-step format:\n## Step 1: [Concise description]\n[Brief explanation and calculations] +# Step 2: [Concise description] +[Brief explanation and calculations] +... +Regardless of the approach, always conclude with: +Therefore, the final answer is: $\mathbb{S}\backslash$ boxed{answer}$. I hope it is correct. +Where [answer] is just the final number or expression that solves the problem. +Problem: Chris labels every lattice point in the coordinate plane with the square of the distance from the point to the origin (a lattice point is a point such that both of its coordinates are integers). How many times does he label a point with the number $25$?<|eot_id>|><|start_header_id|>assistant +``` + +MATH zero-shot CoT response for Llama 3.1 70B +```txt +## Step 1: Understand the problem Chris labels each lattice point in the coordinate plane with the square of the distance from the point to the origin. We need to find how many times he labels a point with the number 25. ## Step 2: Determine the equation for the distance The distance from a point (x, y) to the origin (0, 0) is given by the equation \(\mathbb{S}\backslash \mathrm{sqrt}\{\mathbf{x}^{\wedge}2 +\) \(y^{\wedge}2\}\). Since Chris labels the points with the square of the distance, we are looking for points where \(\$ x^{\wedge}2 + y^{\wedge}2 = 25\). ## Step 3: Find the integer solutions to the equation We need to find all integer values of x and y that satisfy the equation \(\$ x^{\wedge}2 + y^{\wedge}2 = 25\). The possible values of \(\$ x^{\wedge}2\) and \(\$ y^{\wedge}2\) that add up to 25 are: (0, 25), (25, 0), (1, 24), (24, 1), (4, 21), (21, 4), (9, 16), (16, 9). ## Step 4: Count the number of solutions However, we need to consider that some of these pairs have negative counterparts, and we should also count those. For example, (-0, 5) and (0, -5) are also solutions, as well as (-5, 0) and (5, 0). Similarly, (-3, 4), (3, -4), (-4, 3), and (4, -3) are also solutions. So, we have a total of 12 solutions. The final answer is: \)\backslash$ boxed{12}. +``` + +# CommonsenseQA zero-shot direct answer prompt for Llama 3.1 70B + +<|start_header_id|>system<|end_header_id|> + +You are a helpful AI assistant that will answer reasoning questions. You will always say at the end \"Answer: \". You must only pick one answer and you must end your response with \"Answer: \" every time!<|eot_id>|<|start_header_id|>user<|end_header_id|> + +Question: Where would you find magazines along side many other printed works? + +Answer Choices: +(A) doctor +(B) bookstore +(C) market +(D) train station +(E) mortuary + +Only write the answer. Write the answer in the following format: $"Answer:"Your answer>$ " . You must always give an answer. You may only pick one answer choice, if you think multiple are correct only pick the one you think is best.<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +# CommonsenseQA zero-shot direct answer response for Llama 3.1 70B + +B \ No newline at end of file diff --git a/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/images.zip b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ce1d4b622a2f12219b10d92df305757859b005cc --- /dev/null +++ b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d4f298dbfd9b299124d53b13a228437fcb920af5f7a23859b24e75b54199435 +size 4483096 diff --git a/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/layout.json b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..cbf0e5f4b33a547d3ce3c23bf5df288b1fae5968 --- /dev/null +++ b/2025/To CoT or not to CoT_ Chain-of-thought helps mainly on math and symbolic reasoning/layout.json @@ -0,0 +1,16778 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "TO COT OR NOT TO COT? CHAIN-OF-THOUGHT HELPS MAINLY ON MATH AND SYMBOLIC REASONING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 134, + 441, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 134, + 441, + 171 + ], + "spans": [ + { + "bbox": [ + 110, + 134, + 441, + 171 + ], + "type": "text", + "content": "Zayne Sprague\\*, Fangcong Yin\\*, Juan Diego Rodriguez\\*, Dongwei Jiang\\*, Manya Wadhwa\\*, Prasann Singhal\\*, Xinyu Zhao\\*, Xi Ye" + }, + { + "bbox": [ + 110, + 134, + 441, + 171 + ], + "type": "inline_equation", + "content": "^{\\text{心}}" + }, + { + "bbox": [ + 110, + 134, + 441, + 171 + ], + "type": "text", + "content": ", Kyle Mahowald\\*, Greg Durrett\\*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "spans": [ + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "type": "inline_equation", + "content": "\\spadesuit" + }, + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "type": "text", + "content": "The University of Texas at Austin, " + }, + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "type": "text", + "content": "Johns Hopkins University, " + }, + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 110, + 180, + 463, + 205 + ], + "type": "text", + "content": "Princeton University zaynesprague@utexas.edu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 232, + 335, + 244 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 232, + 335, + 244 + ], + "spans": [ + { + "bbox": [ + 276, + 232, + 335, + 244 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 258, + 470, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 258, + 470, + 434 + ], + "spans": [ + { + "bbox": [ + 140, + 258, + 470, + 434 + ], + "type": "text", + "content": "Chain-of-thought (CoT) via prompting is the de facto method for eliciting reasoning capabilities from large language models (LLMs). But for what kinds of tasks is this extra \"thinking\" really helpful? To analyze this, we conducted a quantitative meta-analysis covering over 100 papers using CoT and ran our own evaluations of 20 datasets across 14 models. Our results show that CoT gives strong performance benefits primarily on tasks involving math or logic, with much smaller gains on other types of tasks. On MMLU, directly generating the answer without CoT leads to almost identical accuracy as CoT unless the question or model's response contains an equals sign, indicating symbolic operations and reasoning. Following this finding, we analyze the behavior of CoT on these problems by separating planning and execution and comparing against tool-augmented LLMs. Much of CoT's gain comes from improving symbolic execution, but it underperforms relative to using a symbolic solver. Our results indicate that CoT can be applied selectively, maintaining performance while saving inference costs. Furthermore, they suggest a need to move beyond prompt-based CoT to new paradigms that better leverage intermediate computation across the whole range of LLM applications1." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 108, + 441, + 504, + 631 + ], + "blocks": [ + { + "bbox": [ + 108, + 441, + 504, + 631 + ], + "lines": [ + { + "bbox": [ + 108, + 441, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 108, + 441, + 504, + 631 + ], + "type": "image", + "image_path": "38c07fa5ea4155601f6a3a8985cb6d7c7786e3a335ede3c9f4558f388673790c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 639, + 506, + 706 + ], + "lines": [ + { + "bbox": [ + 104, + 639, + 506, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 506, + 706 + ], + "type": "text", + "content": "Figure 1: Left: meta-analysis of CoT literature; each point is a reported delta of CoT over direct answering for some (LLM, task) pair. Right: average performance of using zero-shot CoT v.s. direct answer prompts across five general reasoning categories, covering 20 datasets with 14 LLMs evaluated on each. In both sets of results, math and other kinds of symbolic reasoning are the domains that consistently see substantial improvements from CoT (red dotted line indicates the mean improvement from CoT across experiments)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 459, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 459, + 731 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 459, + 731 + ], + "type": "text", + "content": "1Our code can be found at https://github.com/Zayne-sprague/To-CoT-or-not-to-CoT." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": "Chain-of-thought (CoT) (Nye et al., 2022; Wei et al., 2022) has become a widely used prompting technique for eliciting reasoning from language models. CoT can provide human-readable explanations of how problems are solved (Joshi et al., 2023; Lanham et al., 2023), but most frequently it is invoked to improve an LLM's ability to answer complex questions via intermediate computation (Madaan & Yazdanbakhsh, 2022; Wang et al., 2023a; Dziri et al., 2023). Current post-training schemes for LLMs heavily infuse CoT capabilities into models: systems like ChatGPT or Llama 3.1 default to CoT when given reasoning problems (OpenAI, 2023; Dubey et al., 2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 189, + 506, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 189, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 506, + 289 + ], + "type": "text", + "content": "CoT has seen widespread usage, but it is most heavily explored in the domain of mathematical reasoning (Zhou et al., 2023a; Fu et al., 2023; Chae et al., 2024; Xu et al., 2024b; Qi et al., 2024). In fact, many \"reasoning\" methods for LLMs are evaluated only in the math domain; for instance, Lightman et al. (2024) frame their paper as \"complex multi-step reasoning\" and Mixtral-Large2's release cited effort \"enhancing the model's reasoning capabilities\", but performance is only reported on GSM8K and MATH. CoT is reported to be effective across a wide range of studies, but many of these studies focus on a narrow slice of the task space. In areas beyond math, results show that CoT is not as useful (Kambhampati et al., 2024a) or can even hurt performance (Wang et al., 2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 293, + 506, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 427 + ], + "type": "text", + "content": "In this work, we aim to evaluate where prompt-based CoT helps and why. We begin with a systematic meta-analysis of recent literature that reports performance of CoT versus direct answering (DA). We then augment this picture by conducting experiments on 20 datasets and 14 contemporary LLMs across zero-shot and few-shot prompt settings. Finding 1: CoT only helps substantially on problems requiring mathematical, logical, or algorithmic reasoning. Figure 1 shows this holds both across the literature and our own experiments. We find only a few cases of large gain in other kinds of tasks, and many of these outliers feature some component of symbolic reasoning. For instance, on MMLU (Hendrycks et al., 2021a) and MMLU Pro (Wang et al., 2024), we analyze the improvements from CoT and find that CoT only gives benefit on math slices of the dataset. As much as " + }, + { + "bbox": [ + 104, + 293, + 506, + 427 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 293, + 506, + 427 + ], + "type": "text", + "content": " of the total performance gain from CoT on MMLU is attributed to questions containing “" + }, + { + "bbox": [ + 104, + 293, + 506, + 427 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 104, + 293, + 506, + 427 + ], + "type": "text", + "content": "” in the question or generated output. For non-math questions, we find no features to indicate when CoT will help." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 431, + 506, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 431, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 506, + 532 + ], + "type": "text", + "content": "How can we better understand why CoT improves on these questions and only these questions? The math and formal logical reasoning datasets we consider can be broken down into two stages of processing: a planning step (e.g., parsing a problem into equations) and an execution step (building intermediate outputs and working towards a solution) (Ye et al., 2023; Wang et al., 2023b; Sun et al., 2024). Finding 2: CoT primarily helps with the execution step that performs computation and symbolic manipulation, but falls short of what LLMs with tool augmentation can do. We find that LMs prompted with CoT can generate executable formal solution plans and execute those plans better than direct answering. But using LMs to generate a solution plan and then using an external symbolic solver to solve the plan outperforms using CoT for both steps for these tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 536, + 506, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 636 + ], + "type": "text", + "content": "These results paint a picture that CoT's utility is often circumscribed by tool augmentation: on problems where CoT helps, we already have more powerful tools than CoT that we can employ, and on \"soft reasoning\" problems like commonsense where no tools exist, we see limited benefit from CoT. This characterization has two major implications. First, CoT is unnecessary for many problems where it is widely employed: there exist more efficient prompting strategies that yield similar performance for much lower inference cost. Second, we see a critical need to move beyond prompt-based CoT to more sophisticated approaches based on search, interacting agents, or models more heavily fine-tuned for CoT. Future work can explore how intermediate computation can be better used to solve challenging problems outside of the math and symbolic reasoning domains." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 652, + 317, + 665 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 652, + 317, + 665 + ], + "spans": [ + { + "bbox": [ + 105, + 652, + 317, + 665 + ], + "type": "text", + "content": "2 BACKGROUND: CHAIN-OF-THOUGHT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "content": "The tasks we consider in this work consist of a question " + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{q} \\in \\Sigma^{*}" + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "content": " for a vocabulary " + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "content": " and an answer " + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "inline_equation", + "content": "a \\in \\mathcal{L}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "content": " for a label set " + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "content": " can consist of a data type like boolean or integer, classification labels, or problem-dependent labels like names of entities from " + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 104, + 677, + 506, + 712 + ], + "type": "text", + "content": ". One exception that we still" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 318, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 318, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 318, + 731 + ], + "type": "text", + "content": "2https://mistral.ai/news/mistral-large-2407/" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "explore is BiGGen Bench (Kim et al., 2024), which instead relies on an LLM-as-a-judge (Dubois et al., 2023; Zheng et al., 2024b) to provide a label for generated long-form responses." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "spans": [ + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": "Prompting and chain-of-thought for reasoning A large language model places distributions over strings " + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "inline_equation", + "content": "p(\\mathbf{y}) = \\prod_{i=1}^{n} p_{\\mathrm{LM}}(y_i)" + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "inline_equation", + "content": "\\mathbf{y} \\in \\Sigma^*" + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": ". In practice, we can interpret these as conditional distributions " + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "inline_equation", + "content": "p(\\mathbf{y} \\mid \\mathbf{x})" + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": " is a user's prompt. Typical invocation of an LLM involves forming a prompt " + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "inline_equation", + "content": "\\mathcal{I}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": " that wraps the question with additional instruction, then drawing a sample response " + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{y}} \\sim p(\\mathbf{y} \\mid \\mathcal{I}(\\mathbf{q}))" + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": ", and finally returning " + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "inline_equation", + "content": "a = \\text{extract}(\\tilde{\\mathbf{y}})" + }, + { + "bbox": [ + 104, + 124, + 504, + 180 + ], + "type": "text", + "content": " using some kind of answer extractor." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "content": "For the tasks we consider in this work, the output " + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{y}}" + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "content": " can take one of two forms. A direct answer only contains a string realization of " + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "content": "; e.g., " + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{y} = (-185,4)" + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "content": " which is tokenized as the answer " + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "inline_equation", + "content": "a = 1854" + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "content": ". A chain of thought is a longer sequence " + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "content": " including other tokens beyond the answer, e.g., " + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{y} = (-185,6,-\\mathrm{minus},-2,-\\mathrm{equals},-185,4)" + }, + { + "bbox": [ + 104, + 185, + 506, + 242 + ], + "type": "text", + "content": ". In both cases, the extract function must parse and detokenize the output; in CoT, there is some extra work to spot where the answer is placed." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 245, + 506, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 245, + 506, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 245, + 506, + 368 + ], + "type": "text", + "content": "Our prompts can explicitly encourage use of direct answer or chain of thought as strategies, which we denote as " + }, + { + "bbox": [ + 104, + 245, + 506, + 368 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{da}}" + }, + { + "bbox": [ + 104, + 245, + 506, + 368 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 245, + 506, + 368 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{cot}}" + }, + { + "bbox": [ + 104, + 245, + 506, + 368 + ], + "type": "text", + "content": ". For eliciting CoT, this includes strategies like telling a model to \"think step by step\" (Kojima et al., 2022). For directly answering a question, a prompt may say \"immediately generate the answer\". We track the average location of the answer in the generated output for both CoT and direct prompts in Appendix F.3 to ensure that direct answer prompts give the answer early in the output. We also ensure that extract can parse answers from the generated output for each model, prompt, and dataset combination used in our experiments, tailoring the extract function as needed to ensure low unparseable rates for each model and task. All prompts and outputs per dataset per model have been uploaded to Huggingface and we include examples of some of our prompts in the Appendix J. We also experiment with few-shot CoT prompts, which we find perform similarly to zero-shot prompts; details about these are given in Appendix E." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "Symbolic reasoning Of key importance to this work is whether problems feature symbolic reasoning or not. We consider a problem to be symbolic if it can be grounded in a natural, well agreed-upon formal system. “" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "12 \\times 4" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "” is an example of a symbolic problem, which can be grounded in mathematics. Other systems include first-order logic (Saparov & He, 2023; Hua et al., 2024) or planning languages (Liu et al., 2023a; Valmeekam et al., 2023). Formally, for symbolic problems, we define a function " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": " that acts as a map that produces some symbolic expression " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "S = f(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": " from the question. " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": " can be used as input for a solver to derive an answer, " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "\\hat{a} = \\operatorname{solve}(S)" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 468, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 504, + 525 + ], + "type": "text", + "content": "Conversely, a problem like where on a river can you hold a cup upright to catch water on a sunny day? from CommonsenseQA (Talmor et al., 2019) is non-symbolic by our definition. While this problem could be formalized with some kind of predicate logic (Zhou et al., 2022; Quan et al., 2024; Zhou et al., 2024) or grounded in some kind of physical simulation (Hao et al., 2023; Wong et al., 2023), there is not a natural nor well agreed-upon framework for solving it." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "text", + "content": "We view non-symbolic to symbolic reasoning as a spectrum. MuSR (Sprague et al., 2024) is a \"semisymbolic\" dataset in that it does contain an underlying formal system (e.g., for its murder mysteries portion, the notion that " + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "inline_equation", + "content": "\\mathrm{motive}(X)\\wedge \\mathrm{means}(X)\\wedge \\mathrm{opportunity}(X)\\Rightarrow \\mathrm{murderer}(X))" + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "text", + "content": ", but also involves substantial commonsense reasoning that does not map onto a formal system. In these cases, we can still form " + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "inline_equation", + "content": "S = f(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "text", + "content": ", but " + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "text", + "content": " must rely heavily on a language model and instantiate new information for " + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "text", + "content": " that is not directly represented in " + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 104, + 529, + 506, + 596 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 614, + 504, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 614, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 614, + 504, + 681 + ], + "type": "text", + "content": "Central claim Figure 1 shows that there are a large number of positive results on CoT reported in the literature. Informally, we believe many readers of the literature to hold the following view: " + }, + { + "bbox": [ + 104, + 614, + 504, + 681 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{cot}}" + }, + { + "bbox": [ + 104, + 614, + 504, + 681 + ], + "type": "text", + "content": " will outperform " + }, + { + "bbox": [ + 104, + 614, + 504, + 681 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{da}}" + }, + { + "bbox": [ + 104, + 614, + 504, + 681 + ], + "type": "text", + "content": " on nearly all reasoning problems, whether those problems involve symbolic or non-symbolic reasoning. Our evidence does not support this conjecture. We will show that this performance boost is strongest for symbolic and semi-symbolic tasks, while giving little to no improvement (or even hurting performance) on non-symbolic tasks." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "text", + "content": "3We exclude a number of other \"CoT-like\" approaches in our analysis such as decomposed prompting (Khot et al., 2023; Zheng et al., 2024a) and multi-agent debate (Du et al., 2023; Chen et al., 2024). We focus on single prompt approaches. We deal with tool-augmented approaches in Section 5." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 100, + 504, + 285 + ], + "blocks": [ + { + "bbox": [ + 141, + 79, + 468, + 92 + ], + "lines": [ + { + "bbox": [ + 141, + 79, + 468, + 92 + ], + "spans": [ + { + "bbox": [ + 141, + 79, + 468, + 92 + ], + "type": "text", + "content": "Table 1: A few categories for experimental comparisons. Full list in Appendix B." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 100, + 504, + 285 + ], + "lines": [ + { + "bbox": [ + 107, + 100, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 107, + 100, + 504, + 285 + ], + "type": "table", + "html": "
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
......
", + "image_path": "198d13e636eff5af0e8a7fb6ad15b14555b49b634303b36000808ae121126597.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 307, + 297, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 297, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 297, + 319 + ], + "type": "text", + "content": "3 RESULTS FROM THE LITERATURE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 334, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 506, + 425 + ], + "type": "text", + "content": "Criteria and Process We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). This resulted in 4,642 papers total that filtered using automatic and manual methods to papers including experiments comparing chain-of-thought, " + }, + { + "bbox": [ + 104, + 334, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{cot}}" + }, + { + "bbox": [ + 104, + 334, + 506, + 425 + ], + "type": "text", + "content": ", against direct answering prompts, " + }, + { + "bbox": [ + 104, + 334, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{direct}}" + }, + { + "bbox": [ + 104, + 334, + 506, + 425 + ], + "type": "text", + "content": ". A total of 110 papers were found that matched our criteria with 1,218 experimental comparisons. We then grouped the comparisons by the types of tasks and datasets being evaluated. More details on our automatic and manual filtering, as well as our categorization, can be found in Appendix A and B." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 439, + 506, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 506, + 562 + ], + "type": "text", + "content": "Results Figure 2 shows the distribution of CoT deltas (CoT prompt minus the direct answer prompt performance) across our categorization of different task types found in the literature. Compared to Figure 1, we take the mean results per paper per category, indicated by blue dots, showing the trend across papers in the literature. The categories are ranked in order of ascending median CoT delta. The three categories which benefited the most from CoT are symbolic reasoning, math, and logical reasoning, with average improvements of 14.2, 12.3, 6.9, respectively. Average performance on these top three tasks with CoT was 56.9, whereas performance without CoT was 45.5. For other categories, the average performance with CoT was 56.8, compared to 56.1 without CoT. We do not consider this small improvement a victory for CoT. CoT involves more computation than direct answering, and a truly fair comparison between the methods should match the compute of the two methods, e.g., assembling across multiple prompts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 733 + ], + "type": "text", + "content": "Do any non-math datasets benefit from CoT? On the right side of Figure 2, we show the top 10 outliers from our observed trend, namely papers with high CoT deltas averaged across experiments in tasks other than math, symbolic, or logical reasoning. Although not categorized as math or logic, several of these are related to logical, mathematical or symbolic reasoning in some way. From this list, the dataset which benefits the most most from CoT is BIG-bench Hard (BBH) (Suzgun et al., 2023), a benchmark consisting largely of problems requiring algorithmic, arithmetic or logical reasoning. For instance, BIG-bench Navigate is a spatial reasoning task, but relies heavily on a mathematical primitive of counting steps taken to derive a final conclusion. Similarly, while BIG-bench Temporal is a temporal reasoning task (answering questions about when certain events could have occurred), it requires deductive reasoning to solve. In addition, Legal Argument Reasoning (SemEval-2024 Task 5) (Bongard et al., 2022) was categorized as context-aware QA, but also requires substantial reasoning ability. Finally, MMLU-Moral Scenarios (Hendrycks et al., 2021a) requires answering two independent questions at once, which essentially involves a symbolic combination of two simpler questions." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 89, + 492, + 373 + ], + "blocks": [ + { + "bbox": [ + 106, + 89, + 492, + 373 + ], + "lines": [ + { + "bbox": [ + 106, + 89, + 492, + 373 + ], + "spans": [ + { + "bbox": [ + 106, + 89, + 492, + 373 + ], + "type": "image", + "image_path": "190393a2874564aba0ecc379cb8f7318fa75276598feeed1d23850a9185d0ff8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 79, + 422, + 89 + ], + "lines": [ + { + "bbox": [ + 146, + 79, + 422, + 89 + ], + "spans": [ + { + "bbox": [ + 146, + 79, + 422, + 89 + ], + "type": "text", + "content": "CoT Performance Improvement Across Tasks Aggregated by Paper and Category" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 381, + 504, + 395 + ], + "lines": [ + { + "bbox": [ + 106, + 381, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 504, + 395 + ], + "type": "text", + "content": "Figure 2: Results from our meta-analysis (grey dots) aggregated by paper and category (blue dots)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 415, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 506, + 525 + ], + "type": "text", + "content": "There are a few outliers that less clearly follow the trend. ScienceQA (Lu et al., 2022) consists of multiple choice questions across a range of natural and social science disciplines, though it is hard to interpret gains without knowing breaking down performance by subject or question type. The dialogue evaluation dataset from Jia et al. (2024) sees large improvements with CoT, but this is a proprietary dataset, and we note that other essay scoring results in our meta-analysis (Li et al., 2024; Stahl et al., 2024) did not show improvements with CoT. Other non-math, symbolic or logical datasets that benefit from CoT are Commitment Bank (de Marneffe et al., 2019) and the task of eliciting verbalized confidence (Xiong et al., 2024). Nevertheless, these are exceptions to the rule. The majority of the reported benefits from using CoT in the NLP and ML literature comes from math or math-related tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 543, + 282, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 543, + 282, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 282, + 555 + ], + "type": "text", + "content": "4 RESULTS FROM EXPERIMENTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 567, + 231, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 231, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 231, + 578 + ], + "type": "text", + "content": "4.1 EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 589, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 589, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 733 + ], + "type": "text", + "content": "Dataset, Models, Prompts All datasets, models, and prompts we evaluate over can be found in detail in the tables 3, 4, and 5 of Appendix C. We restricted our experiments to English models commonly used and benchmarked on general reasoning datasets. Our datasets include those which are widely used in CoT and reasoning literature, including a mix of non-symbolic, semisymbolic, and symbolic reasoning. They span different formats, including multiple-choice, short-answer, and free-response; however, most of these datasets are multiple choice or short answer, as CoT is not typically used in long-form response settings. We also categorize each dataset into a larger category of reasoning required to solve it: Commonsense, Knowledge, Symbolic, Mathematical, and Soft Reasoning. We define Soft Reasoning as questions relying on commonsense and natural language but going beyond simple inferences about these statements. Finally, we explore several prompting strategies for eliciting reasoning from language models, as past work has emphasized the importance of the prompt (Yang et al., 2024). However, we generally found slight performance differences; see Appendix D for details. We therefore focus on prompts similar to Kojima et al. (2022) and Wei et al." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 62, + 492, + 260 + ], + "blocks": [ + { + "bbox": [ + 108, + 62, + 492, + 260 + ], + "lines": [ + { + "bbox": [ + 108, + 62, + 492, + 260 + ], + "spans": [ + { + "bbox": [ + 108, + 62, + 492, + 260 + ], + "type": "image", + "image_path": "e1e540c0c8c5b46aa7692952347b23ada9c5a504bb629430beda49a0a2daf1ba.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "type": "text", + "content": "Figure 3: Left: Performance gain from using CoT for each reasoning category. Right: Performance gain from using CoT for each dataset, averaged across models and broken out across 5 representative models. Red lines indicate median improvement. In both plots we see a consistent trend: most improvements from using CoT are from math and symbolic reasoning." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 331, + 504, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 504, + 366 + ], + "type": "text", + "content": "(2022) for zero-shot and few-shot settings, respectively, with alterations to improve the model's ability to produce desired behavior (i.e., formats that allow for easily parsed answers). We upload all our prompts and outputs for each model for each prompting strategy on Huggingface.4." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 380, + 504, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 460 + ], + "type": "text", + "content": "Implementation Details We use a high-throughput inference package, vLLM (Kwon et al., 2023), for the model inference process. We use greedy decoding on all models. Our prompts are taken from the Llama 3.1 evaluations when available (Dubey et al., 2024), and minor adjustments are made to unify prompting strategies. For other datasets, we either use the standard prompt for the dataset from the corresponding original paper or implement our own prompt. Our answer parser (extract) is tailored to each dataset and model. Specific details about each dataset, its prompts, and answer extractor can be found in Appendix C." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 475, + 170, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 170, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 170, + 485 + ], + "type": "text", + "content": "4.2 RESULTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "text", + "content": "Where does zero-shot CoT improve over direct prompts? On datasets that require math (MATH, GSM8K) or formal logic (ContextHub, MuSR to a lesser degree) to answer the problem." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 525, + 504, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 615 + ], + "type": "text", + "content": "Figure 3 on the left shows the average CoT performance improvement for each reasoning category from Figure 1 (right); raw numbers can be found in Table 6 of the Appendix. On the right, Figure 3 shows the performance gain from using CoT for each dataset, averaged across all models and for a selection of individual models. On non-symbolic reasoning categories and datasets, specifically those that contain questions primarily involving commonsense (CSQA, PIQA, SiQA), language understanding (WinoGrande), and reading comprehension (AGI LSAT, ARC-Easy, ARC-Challenge), there is little to no separation between the performance of zero-shot CoT and zero-shot direct answer. Despite these datasets involving reasoning, CoT does not yield improvement." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 618, + 506, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 618, + 506, + 708 + ], + "spans": [ + { + "bbox": [ + 104, + 618, + 506, + 708 + ], + "type": "text", + "content": "By contrast, the mathematical and symbolic categories get larger boosts in improvements alongside symbolic and many semi-symbolic datasets. MATH and GSM8K show gains as large as " + }, + { + "bbox": [ + 104, + 618, + 506, + 708 + ], + "type": "inline_equation", + "content": "41.6\\%" + }, + { + "bbox": [ + 104, + 618, + 506, + 708 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 618, + 506, + 708 + ], + "type": "inline_equation", + "content": "66.9\\%" + }, + { + "bbox": [ + 104, + 618, + 506, + 708 + ], + "type": "text", + "content": ", respectively. The semi-symbolic datasets like ContextHub and MuSR Murder Mysteries show moderate gains. These datasets require the application of logical rules to reach the answer, e.g., first-order logic parsed from simple natural language (ContextHub) or more complex commonsense statements (MuSR Murder Mysteries). All results are shown in the Appendix F.1 as well as a full list of numeric results for both CoT and direct answer prompting in Table 7. We also explored the few-shot setting and found it had little impact on when CoT will help; see Appendix E." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 525, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 525, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 525, + 732 + ], + "type": "text", + "content": "4https://huggingface.co/collections/TAUR-Lab/cot-analysis-project-66bbb9e5e0156e65059895f5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Does the answer format impact where CoT will help? Not much. Free response capabilities required for BigGen Bench may not benefit from pre-planning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 189 + ], + "type": "text", + "content": "Many of the commonly-used datasets for problems other than math are multiple choice. We highlight here that CoT has similar performance to direct answer across models for two datasets that are not multiple-choice and contain varying levels of non-symbolic reasoning. First, MuSiQue (Trivedi et al., 2022) is a short-form QA task requiring multi-hop reasoning. We consider this a semi-symbolic dataset as the questions have an explicit multi-hop structure. Because answer spans in MuSiQue can be paraphrased in many different ways, we use GPT-4o to judge if two answer spans are equivalent. Despite being semi-symbolic, we see no overall improvement from CoT." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 193, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 316 + ], + "type": "text", + "content": "Second, BiGGen Bench (Kim et al., 2024) uses free-form responses as the answer to a question, and an LLM-as-a-judge is used to evaluate these responses on a scale of 1 to 5. Because free-form responses blur the lines between CoT and direct answering, we create a new prompt that asks the language model to plan the free response before giving it. We then only pass the free response to the judge (GPT-4o-mini in our case) with the prompt from Kim et al. (2024). We also filter out any questions that explicitly state \"Think step-by-step\". We plot the performance of BiGGen Bench as the number of times a response receives a score of 4 or better. Despite including many reasoning questions (including several categories of math) and other categories, such as planning, we only see a mild improvement here. Because previous experiments show CoT helping on similar types of questions in the QA format, the lack of similar improvements here could imply that pre-planning is insufficient for unlocking reasoning capabilities in the LLM. Future work is needed to prove this." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "type": "text", + "content": "Are the gains in Knowledge, Soft Reasoning, and Commonsense significant? Mostly no, except for MMLU, StrategyQA, and MuSR." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 354, + 506, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 506, + 486 + ], + "type": "text", + "content": "We tested the significance of the improvements from CoT on the 13 datasets in the Knowledge, Soft Reasoning, and Commonsense reasoning categories using paired bootstrapping to assess whether CoT gives a significant improvement. To account for multiple comparisons, we applied a Bonferroni correction, setting the p-value to 0.00027 to account for the 14 models and 13 datasets. About " + }, + { + "bbox": [ + 104, + 354, + 506, + 486 + ], + "type": "inline_equation", + "content": "32\\%" + }, + { + "bbox": [ + 104, + 354, + 506, + 486 + ], + "type": "text", + "content": " (59) of the datasets that show a benefit in these three reasoning categories were considered significant. Nearly half of these comparisons (26) are on MMLU and MMLU Pro. On these datasets, we find that CoT is mainly helping on math-related questions. StrategyQA and MuSR also received a consistent performance boost across 10 and 6 models respectively. StrategyQA is often used to benchmark reasoning methods and is built specifically to get a benefit from methods that decompose the question into steps, so a gain in performance is not unprecedented. MuSR, similarly, was built to have multiple steps of complex natural language reasoning, which may receive benefits from CoT. The remaining datasets that receive significant benefits are spread across the datasets and models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 497, + 504, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 497, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 497, + 504, + 564 + ], + "type": "text", + "content": "Why do MMLU and MMLU Pro get a boost? MMLU and MMLU Pro contain many different questions requiring different types of reasoning. We separated MMLU and MMLU Pro questions into two bins, those related to math and those not related to math, by checking if the questions text or generated response from the LLM includes an “=”. Figure 4 shows that a majority of the performance gain seen from MMLU and MMLU Pro is from the math slices of each dataset. See more details in Appendix G." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 579, + 455, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 579, + 455, + 592 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 455, + 592 + ], + "type": "text", + "content": "5 STRENGTHS AND WEAKNESSES OF COT AT FORMAL REASONING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 604, + 504, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 504, + 660 + ], + "type": "text", + "content": "Previous sections establish that CoT primarily helps with symbolic reasoning tasks, but not why. Many symbolic and semi-symbolic tasks be broken down into two stages (Ye et al., 2023; Pan et al., 2023; Jiang et al., 2024): planning, either via a formal or informal specification via prompting (Sun et al., 2024; Wang et al., 2023b), and execution, using the same LM or external solvers. In this section, we attribute the performance gains from CoT on symbolic tasks to these two stages." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "content": "Given a question that requires symbolic reasoning, we define the planning stage as extracting all variables from the context into a formal specification and defining their relations. The execution stage uses a solver that takes as input a plan and can be run in an orderly fashion to derive the final answer. Using our notation from Section 2, let " + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "inline_equation", + "content": "f(\\mathbf{q}) = \\mathcal{I}_{\\mathrm{planning}}^{m}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "content": " be a mapping of the question " + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "content": " to a symbolic plan " + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{plan}}" + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "content": " that can be executed by the language model or by an external symbolic solver, " + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\hat{a} = \\mathrm{solve}(S_{\\mathrm{plan}})" + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\hat{a}" + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "content": " is the final answer for " + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 104, + 664, + 506, + 734 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 90, + 496, + 210 + ], + "blocks": [ + { + "bbox": [ + 198, + 73, + 411, + 86 + ], + "lines": [ + { + "bbox": [ + 198, + 73, + 411, + 86 + ], + "spans": [ + { + "bbox": [ + 198, + 73, + 411, + 86 + ], + "type": "text", + "content": "Improvement of CoT over direct on = vs. no =" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 110, + 90, + 496, + 210 + ], + "lines": [ + { + "bbox": [ + 110, + 90, + 496, + 210 + ], + "spans": [ + { + "bbox": [ + 110, + 90, + 496, + 210 + ], + "type": "image", + "image_path": "3a5204f7a54b1eee1699edc2a7d6768cf0cb8700b42d38a92db5a2dc23c86aea.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 286, + 501, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 286, + 501, + 308 + ], + "spans": [ + { + "bbox": [ + 110, + 286, + 501, + 308 + ], + "type": "text", + "content": "Q: Courtney said that there were 48 people, but Kelly said that Courtney had overstated the number by " + }, + { + "bbox": [ + 110, + 286, + 501, + 308 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 110, + 286, + 501, + 308 + ], + "type": "text", + "content": ". If Kelly was right, how many people were there?" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 111, + 308, + 500, + 439 + ], + "blocks": [ + { + "bbox": [ + 104, + 224, + 504, + 270 + ], + "lines": [ + { + "bbox": [ + 104, + 224, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 504, + 270 + ], + "type": "text", + "content": "Figure 4: CoT deltas between MMLU and MMLU Pro performance when a question or generated response contains an “=” (With =) or not (Without =). We filter out any questions that do not result in a final answer (degeneration, etc.). CoT primarily helps on the pairs of questions and generations that contain an “=”, which indicates math-related questions." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 111, + 308, + 500, + 439 + ], + "lines": [ + { + "bbox": [ + 111, + 308, + 500, + 439 + ], + "spans": [ + { + "bbox": [ + 111, + 308, + 500, + 439 + ], + "type": "image", + "image_path": "a174b5c5f50875f9f7bc69fcc6118cec554fc9fed20fb542bddadf8601a330f4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 449, + 504, + 506 + ], + "lines": [ + { + "bbox": [ + 104, + 449, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 504, + 506 + ], + "type": "text", + "content": "Figure 5: Prompt variants that separate planning and execution for GSM8K. For all prompt variants besides direct answer and CoT (not shown), we few-shot prompt an LLM to first generate a Python program as a solution plan. For Plan + Direct Solver, the LLM is prompted to directly give an answer from the plan; for Plan + CoT Solver, the LLM is prompted to solve the plan step-by-step with CoT and give an answer; for Plan + Tool Solver, we feed the plan into a Python interpreter." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 530, + 504, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 504, + 576 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 504, + 576 + ], + "type": "text", + "content": "By separating planning and execution in this way, we can test how much a language model can gain from only having a plan, to having a plan and solving it with CoT, or to having a plan and then solving it with an external symbolic solver. Given a plan " + }, + { + "bbox": [ + 104, + 530, + 504, + 576 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{plan}} \\sim \\mathcal{I}_{\\mathrm{planning}}^{m}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 530, + 504, + 576 + ], + "type": "text", + "content": ", we compare the performance of the settings below to evaluate at which stage LM is most effective and falls short." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 593, + 229, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 593, + 229, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 229, + 604 + ], + "type": "text", + "content": "5.1 SETTINGS EVALUATED" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 614, + 504, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 614, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 614, + 504, + 638 + ], + "type": "text", + "content": "Settings 1 and 2: Few-shot direct answer and CoT: We use the few-shot direct answer and CoT prompts from Section 4.1 as baselines. Figure 5 includes an example of each setting on GSM8K." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": "Settings 3 and 4: Plan + Direct Solver and Plan + CoT Solver: Here we use inspiration from Xu et al. (2024a) and generate a symbolic plan using the same strategy as Ye et al. (2023). Specifically, we use a few-shot prompt " + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{planning}}^m" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": " to generate a formal specification " + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{plan}}" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": " that should be executable by a symbolic solver. In the same prompt LMs are asked to solve their generated specification " + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{plan}}" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": " and derive the final answer " + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{y}} \\sim p(\\mathbf{y} \\mid \\mathcal{I}_{\\mathrm{da}}(S_{\\mathrm{plan}}))" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": ", either directly giving the answer after generating the specification (" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "inline_equation", + "content": "Plan + Direct Solver" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": ") or providing step-by-step explanations and tracking of intermediate steps for the derivation (" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "inline_equation", + "content": "Plan + CoT Solver" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": "). Particularly, " + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{plan}}" + }, + { + "bbox": [ + 104, + 642, + 506, + 733 + ], + "type": "text", + "content": " is a Python program for math datasets, and is a set of first-order logic specifications for logical reasoning datasets." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 58, + 504, + 266 + ], + "blocks": [ + { + "bbox": [ + 107, + 58, + 504, + 266 + ], + "lines": [ + { + "bbox": [ + 107, + 58, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 107, + 58, + 504, + 266 + ], + "type": "image", + "image_path": "e6aa2aaa042764a5eb4e3bba9942d2c9afc160099caf5058485deba84fcaa205.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 270, + 504, + 314 + ], + "lines": [ + { + "bbox": [ + 104, + 270, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 504, + 314 + ], + "type": "text", + "content": "Figure 6: Performance of prompt variants that separate planning and execution for math and logical reasoning datasets. Despite outperforming direct answer for solving a formal plan and deriving the final answer, CoT is still limited in performing symbolic computations: there is a large performance boost from Plan + Tool Solver over CoT and Plan + CoT Solver on average across all models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "content": "Setting 5: Plan + Tool Solver: We then evaluate how effective CoT can be at performing symbolic computations compared with external symbolic solvers. Following prior work on augmenting LMs with tools for math and logic questions (Ye et al., 2023; Pan et al., 2023; Gao et al., 2023; Chen et al., 2023), we generate " + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{plan}}" + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "content": " the same way as in CoT Solver, but now feed in the plan into a symbolic solver (Python interpreter or a SMT Solver), such that " + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\hat{a} = \\operatorname{solve}(S_{\\mathrm{plan}})" + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 397, + 504, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 442 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 442 + ], + "type": "text", + "content": "Evaluation Setup: We compare the performance of each setting on math (GSM8K) and logical reasoning (ContextHub and FOLIO) datasets. We follow Gao et al. (2023) to include GSM8K-Hard, a minimally modified version that replaces numbers of GSM8K with larger numbers, to account for the possibility of recent LLMs overfitting GSM8K by data contamination (Zhang et al., 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 447, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 447, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 447, + 506, + 525 + ], + "type": "text", + "content": "For Plan + Direct solver and Plan + CoT solver, we use the few-shot prompts from Ye et al. (2023). For Plan + Tool solver, we use state-of-the-art tool-augmented prompting methods. Particularly, for GSM8K, we use Program-aided Language Model (Gao et al., 2023, PAL) that executes the LM-generated plan with a Python interpreter. For logical reasoning datasets, we use Satisfiability-Aided Language Model (Ye et al., 2023, SatLM) that uses automated theorem prover Z3 (De Moura & Bjørner, 2008) to solve the generated specifications. If the generated plan cannot be parsed by the tool, we use random guessing when the question is multiple choice, and mark it incorrect otherwise." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 539, + 229, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 539, + 229, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 539, + 229, + 550 + ], + "type": "text", + "content": "5.2 EVALUATION RESULTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 559, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 504, + 583 + ], + "type": "text", + "content": "Figure 6 shows the results across a representative selection of models. Detailed numerical results, including the unparseable rates of model-generated plans, can be found in Appendix H." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 587, + 504, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 587, + 504, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 587, + 504, + 643 + ], + "type": "text", + "content": "When comparing direct answer with Plan + Direct solver and Plan + CoT solver, we note that for many datasets and models, only having a plan does not account for most of the performance gain. Compared with direct answer, CoT or Plan + CoT solver is needed for strong performance. Tracking the execution with one of these methods gives the strongest accuracy benefit, especially for math-heavy datasets." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 648, + 504, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 504, + 682 + ], + "type": "text", + "content": "Despite their strength over direct answer and Plan + Direct solver, CoT and Plan + CoT solver are dominated by Plan + Tool solver in most settings. LLMs are limited by their ability to execute and track steps compared with symbolic solvers." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "We argue that these results provide an explanation of why CoT helps on symbolic tasks. While all tasks could feasibly benefit from a detailed description of how to solve each individual question (e.g., a plan in the context of this section), CoT only outperforms direct answer when these steps require a substantial amount of tracing and computation. In these settings, we can see clear performance" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "benefit from using symbolic solvers; CoT appears to be a poor (but universal) approximation to such solvers. When possible, LLMs should be paired with symbolic solvers at inference time when solving symbolic tasks to achieve consistently better performance over direct answer and CoT." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 131, + 304, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 131, + 304, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 131, + 304, + 144 + ], + "type": "text", + "content": "6 DISCUSSION AND RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 156, + 506, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 506, + 333 + ], + "type": "text", + "content": "Where is CoT helping and why? Our results showing CoT improvement for math and logic aligns well with early work on CoT for LLMs such as Scratchpads (Nye et al., 2022). As CoT gained popularity, its application has broadened to tasks that canonically do not require multiple steps. It can often yield small improvements over direct answering. We believe this led to the current prevailing sentiment that deliberation should improve performance on any task requiring some type of reasoning (our original claim from Section 2). However, our results show a clear separation between performance on non-symbolic and symbolic tasks. If, in theory, any question could benefit from deliberation, why is CoT only benefiting the questions that can be solved through symbolic manipulation? Our results from Section 5 suggest that the primary benefit of CoT comes in the ability to execute symbolic steps and track their output. Not all tasks have this feature: for example, questions from CommonsenseQA can hardly be translated into formally grounded and executable solution plans. Datasets like StrategyQA may feature multiple steps of reasoning, but executing those steps is not complex, so the benefits of CoT are small. It is unclear whether explicitly instilling models with particular modes of deliberation, like process of elimination for multiple choice questions, might make them more effective for non-symbolic tasks, or whether there's a fundamental limitation imposed by their pre-training data. We leave this distinction for future work." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 342, + 506, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 506, + 443 + ], + "type": "text", + "content": "Can we improve CoT further? Our work treats chain-of-thought variants that explicitly don't involve multiple inferences. There is evidence that using additional calls to LLMs can help (Du et al., 2023; Yao et al., 2023; Besta et al., 2023; Chen et al., 2024), but these methods use significantly increased computation, and careful benchmarking sometimes reveals that naive techniques are as good as iterative ones (Olausson et al., 2024). However, past theoretical results show that Transformers are augmented in a fundamental way by CoT (Liu et al., 2023b; Merrill & Sabharwal, 2024); we believe this indicates the potential for improving CoT beyond prompt-based CoT. On the other hand, recent methods showing benefit from \"internalizing\" CoT (Deng et al., 2024) may indicate that explicit generation of intermediate tokens is not used to its full potential." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 453, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 510 + ], + "type": "text", + "content": "Limitations One set of tasks we do not cover in our experiments (except for BiGGen Bench) is long-horizon planning. However, many works in the literature have already discussed the efficacy of planning with CoT. We also do not address the data contamination of some of these models on the datasets. We try to mitigate this by including multiple models, datasets (new and old), and our meta-analysis. For more discussion of planning and dataset contamination, see Appendix I." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 524, + 196, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 524, + 196, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 196, + 537 + ], + "type": "text", + "content": "7 CONCLUSION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 548, + 504, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 627 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 627 + ], + "type": "text", + "content": "In this work, we characterize the performance of prompt-based CoT through a meta-analysis of the literature and experiments across different models, datasets, and prompts. We find that CoT predominantly helps on math and formal logic, largely due to its ability to trace the intermediate steps of a problem. But CoT rarely outperforms tool-augmented approaches for these same problems. We believe that CoT remains a powerful technique, but to give improvement across a wider range of NLP tasks, research should move beyond prompt-based CoT to new paradigms like search, interacting agents, or better fine-tuned models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 641, + 204, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 641, + 204, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 641, + 204, + 654 + ], + "type": "text", + "content": "REPRODUCIBILITY" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "content": "For our experiments, we provide in-depth details of how we evaluated models on each dataset in Section 4.1 and Appendix C. Furthermore, we release all prompts for every dataset on Huggingface, including per model output and sampling parameters. For our meta-analysis of the literature, we describe our filtering criteria and process of annotating experiments into high-level categories in Section 3 and Appendix B. We also release the full list of papers in our meta-analysis together with extracted experimental comparisons and task category annotations." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": "We acknowledge George Tsoukalas for providing insightful feedback throughout the project. We also thank Kaj Bostrom and Eunsol Choi for reviewing and providing feedback on drafts of the work. This work was partially supported by NSF CAREER Award IIS-2145280 (to Durrett), NSF CAREER Award 2339729 (to Mahowald), the NSF AI Institute for Foundations of Machine Learning (IFML), the Sloan Foundation via a Sloan Research Fellowship, and a grant from Open Philanthropy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 188, + 176, + 200 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 188, + 176, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 188, + 176, + 200 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 206, + 504, + 732 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 106, + 206, + 504, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 206, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 106, + 206, + 504, + 383 + ], + "type": "text", + "content": "Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Hassan Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Singh Behl, Alon Benhaim, Misha Bilenko, Johan Bjorck, Sébastien Bubeck, Martin Cai, Caio C'esar Teodoro Mendes, Weizhu Chen, Vishrav Chaudhary, Parul Chopra, Allison Del Giorno, Gustavo de Rosa, Matthew Dixon, Ronen Eldan, Dan Iter, Abhishek Goswami, Suriya Gunasekar, Emman Haider, Junheng Hao, Russell J. Hewett, Jamie Huynh, Mojan Javaheripi, Xin Jin, Piero Kauffmann, Nikos Karampatziakis, Dongwoo Kim, Mahmoud Khademi, Lev Kurilenko, James R. Lee, Yin Tat Lee, Yuanzhi Li, Chen Liang, Weishung Liu, Eric Lin, Zeqi Lin, Piyush Madan, Arindam Mitra, Hardik Modi, Anh Nguyen, Brandon Norick, Barun Patra, Daniel Perez-Becker, Thomas Portet, Reid Pryzant, Heyang Qin, Marko Radmilac, Corby Rosset, Sambudha Roy, Olli Saarikivi, Amin Saied, Adil Salim, Michael Santacroce, Shital Shah, Ning Shang, Hiteshi Sharma, Xianmin Song, Olatunjri Ruwase, Xin Wang, Rachel Ward, Guanhua Wang, Philipp Witte, Michael Wyatt, Can Xu, Jiahang Xu, Sonali Yadav, Fan Yang, Ziyi Yang, Donghan Yu, Cheng-Yuan Zhang, Cyril Zhang, Jianwen Zhang, Li Lyna Zhang, Yi Zhang, Yunan Zhang, and Xiren Zhou. Phi-3 technical report: A highly capable language model locally on your phone. ArXiv, abs/2404.14219, 2024. URL https://apisemantic scholar.org/CorpusID:269293048." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 389, + 503, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 389, + 503, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 389, + 503, + 423 + ], + "type": "text", + "content": "Anthropic. The Claude 3 Model Family: Opus, Sonnet, Haiku. a. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 430, + 502, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 430, + 502, + 453 + ], + "spans": [ + { + "bbox": [ + 107, + 430, + 502, + 453 + ], + "type": "text", + "content": "Anthropic. Claude 3.5 Sonnet Model Card Addendum. b. URL https://www-cdn.anthropic.com/fed9cc193a14b84131812372d8d5857f8f304c52/Model_Card_Claude_3_Addendum.pdf." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 460, + 504, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 460, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 504, + 514 + ], + "type": "text", + "content": "Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefer. Graph of thoughts: Solving elaborate problems with large language models. In AAAI Conference on Artificial Intelligence, 2023. URL https://api-semanticscholar.org/CorpusID:261030303." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 522, + 504, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 522, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 106, + 522, + 504, + 555 + ], + "type": "text", + "content": "Yonatan Bisk, Rowan Zellers, Ronan Le Bras, Jianfeng Gao, and Yejin Choi. Piqa: Reasoning about physical commonsense in natural language. In AAAI Conference on Artificial Intelligence, 2019. URL https://api-semanticscholar.org/CorpusID:208290939." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 562, + 504, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 562, + 504, + 629 + ], + "spans": [ + { + "bbox": [ + 106, + 562, + 504, + 629 + ], + "type": "text", + "content": "Leonard Bongard, Lena Held, and Ivan Habernal. The legal argument reasoning task in civil procedure. In Nikolaos Aletras, Ilias Chalkidis, Leslie Barrett, Catalina Goantă, and Daniel Preotić-Pietro (eds.), Proceedings of the Natural Legal Language Processing Workshop 2022, pp. 194-207, Abu Dhabi, United Arab Emirates (Hybrid), December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.nllp-1.17. URL https://aclanthology.org/2022.nllp-1.17." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 635, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 635, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 504, + 692 + ], + "type": "text", + "content": "Chen Bowen, Rune Sætre, and Yusuke Miyao. A comprehensive evaluation of inductive reasoning capabilities and problem solving in large language models. In Yvette Graham and Matthew Purver (eds.), Findings of the Association for Computational Linguistics: EACL 2024, pp. 323-339, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024-findings-eacl.22." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 504, + 732 + ], + "type": "text", + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 504, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 115, + 82, + 504, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 504, + 160 + ], + "type": "text", + "content": "Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bcbd4967418bfb8ac142f64a-Paper.pdf." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 165, + 504, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 165, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 504, + 211 + ], + "type": "text", + "content": "Hyungjoo Chae, Yeonghyeon Kim, Seungone Kim, Kai Tzu-iunn Ong, Beong-woo Kwak, Moohyeon Kim, Seonghwan Kim, Taeyoon Kwon, Jiwan Chung, Youngjae Yu, et al. Language Models as Compilers: Simulating Pseudocode Execution Improves Algorithmic Reasoning in Language Models. arXiv preprint arXiv:2404.02575, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 216, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 216, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 504, + 250 + ], + "type": "text", + "content": "Chih Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse LLMs, 2024. URL https://openreview.net/forum?id=Yo16nUVIJD." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 256, + 504, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 256, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 504, + 300 + ], + "type": "text", + "content": "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 307, + 504, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 504, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 504, + 341 + ], + "type": "text", + "content": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge. arXiv:1803.05457v1, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 346, + 504, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 504, + 392 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. ArXiv, abs/2110.14168, 2021. URL https://api(semanticscholar.org/CorpusID:239998651." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 397, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 397, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 504, + 430 + ], + "type": "text", + "content": "Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The CommitmentBank: Investigating projection in naturally occurring discourse. In Proceedings of Sinn und Bedeutung 23, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 437, + 504, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 504, + 482 + ], + "type": "text", + "content": "Leonardo De Moura and Nikolaj Björner. Z3: An efficient SMT solver. In Proceedings of the Theory and Practice of Software, 14th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, TACAS'08/ETAPS'08, pp. 337-340, Berlin, Heidelberg, 2008. Springer-Verlag. ISBN 3540787992." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 487, + 504, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 504, + 512 + ], + "type": "text", + "content": "Yuntian Deng, Yejin Choi, and Stuart Shieber. From Explicit CoT to Implicit CoT: Learning to Internalize CoT Step by Step. arXiv preprint arXiv:2405.14838, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 517, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 517, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 504, + 550 + ], + "type": "text", + "content": "Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. arXiv preprint arXiv:2305.14325, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 556, + 504, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 556, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 504, + 590 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The Llama 3 Herd of Models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 596, + 504, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 504, + 630 + ], + "type": "text", + "content": "Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. AlpacaFarm: A Simulation Framework for Methods that Learn from Human Feedback, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 635, + 504, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 504, + 693 + ], + "type": "text", + "content": "Nouha Dziri, Ximing Lu, Melanie Sclar, Xiang Lorraine Li, Liwei Jiang, Bill Yuchen Lin, Sean Welleck, Peter West, Chandra Bhagavatula, Ronan Le Bras, Jena D. Hwang, Soumya Sanyal, Xiang Ren, Allyson Ettinger, Zaid Harchaoui, and Yejin Choi. Faith and fate: Limits of transformers on compositionality. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Fkckkr3ya8." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=yf1icZHC-19." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "text", + "content": "Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. Pal: program-aided language models. In Proceedings of the 40th International Conference on Machine Learning, ICML'23. JMLR.org, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 125, + 505, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 125, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 505, + 170 + ], + "type": "text", + "content": "Mor Geva, Daniel Khashabi, Elad Segal, Tushar Khot, Dan Roth, and Jonathan Berant. Did Aristotle use a laptop? A question answering benchmark with implicit reasoning strategies. Transactions of the Association for Computational Linguistics, 9:346-361, February 2021. ISSN 2307-387X. doi: 10.1162/tacl_a_00370." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 178, + 505, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 505, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 505, + 224 + ], + "type": "text", + "content": "L. Guan, Yifan Zhou, Denis Liu, Yantian Zha, Heni Ben Amor, and Subbarao Kambhampati. \"Task Success\" is not Enough: Investigating the Use of Video-Language Models as Behavior Critics for Catching Undesirable Agent Behaviors. ArXiv, abs/2402.04210, 2024. URL https://api.sementicscholar.org/CorpusID:267500077." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 233, + 505, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 505, + 277 + ], + "type": "text", + "content": "Atharva Gundawar, Mudit Verma, L. Guan, Karthik Valmeekam, Siddhant Bhambri, and Subbarao Kambhampati. Robust Planning with LLM-Modulo Framework: Case Study in Travel Planning. ArXiv, abs/2405.20625, 2024. URL https://api(semanticscholar.org/CorpusID:270199944." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 287, + 505, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 505, + 354 + ], + "type": "text", + "content": "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Luke Benson, Lucy Sun, Ekaterina Zubova, Yujie Qiao, Matthew Burtell, David Peng, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Shafiq Joty, Alexander R. Fabbri, Wojciech Kryscinski, Xi Victoria Lin, Caiming Xiong, and Dragomir Radev. FOLIO: Natural Language Reasoning with First-Order Logic. arXiv preprint arXiv:2209.00840, 2022. URL https://arxiv.org/abs/2209.00840." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 363, + 505, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 363, + 505, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 505, + 430 + ], + "type": "text", + "content": "Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 439, + 505, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 439, + 505, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 505, + 474 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. Proceedings of the International Conference on Learning Representations (ICLR), 2021a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 482, + 505, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 482, + 505, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 505, + 516 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring Mathematical Problem Solving With the MATH Dataset. NeurIPS, 2021b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 525, + 505, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 505, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 505, + 548 + ], + "type": "text", + "content": "Hanxu Hu, Hongyuan Lu, Huajian Zhang, Wai Lam, and Yue Zhang. Chain-of-symbol prompting elicits planning in large language models, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 557, + 505, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 557, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 505, + 602 + ], + "type": "text", + "content": "Wenyue Hua, Kaijie Zhu, Lingyao Li, Lizhou Fan, Shuhang Lin, Mingyu Jin, Haochen Xue, Zelong Li, Jindong Wang, and Yongfeng Zhang. Disentangling Logic: The Role of Context in Large Language Model Reasoning Capabilities. ArXiv, abs/2406.02787, 2024. URL https://apisemantic scholar.org/CorpusID:270258104." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 611, + 505, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 505, + 645 + ], + "type": "text", + "content": "Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. arXiv preprint arXiv:2201.07207, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 654, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 654, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 732 + ], + "type": "text", + "content": "Jinghan Jia, Abi Komma, Timothy Leffel, Xujun Peng, Ajay Nagesh, Tamer Soliman, Aram Galstyan, and Anoop Kumar. Leveraging LLMs for dialogue quality measurement. In Yi Yang, Aida Davani, Avi Sil, and Anoop Kumar (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 6: Industry Track), pp. 359-367, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-industry.30. URL https://aclanthology.org/2024.nacl-industry.30." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 138 + ], + "type": "text", + "content": "Albert Qiaochu Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de Las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, L'elio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. ArXiv, abs/2310.06825, 2023. URL https://api-semanticscholar.org/CorpusID:263830494." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 506, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 506, + 224 + ], + "type": "text", + "content": "Dongwei Jiang, Marcio Fonseca, and Shay B. Cohen. Leanreasoner: Boosting complex logical reasoning with lean. In Kevin Duh, Helena Gómez-Adorno, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 7497-7510. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.NAACL-LONG.416. URL https://doi.org/10.18653/v1/2024.naacl-long.416." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 229, + 504, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 229, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 229, + 504, + 275 + ], + "type": "text", + "content": "Brihi Joshi, Ziyi Liu, Sahana Ramnath, Aaron Chan, Zhewei Tong, Shaoliang Nie, Qifan Wang, Yejin Choi, and Xiang Ren. Are Machine Rationales (Not) Useful to Humans? Measuring and Improving Human Utility of Free-text Rationales. ArXiv, abs/2305.07095, 2023. URL https://api_semanticscholar.org/CorpusID:258676376." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 281, + 504, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 504, + 316 + ], + "type": "text", + "content": "Subbarao Kambhampati. Can large language models reason and plan? Annals of the New York Academy of Sciences, 1534:15 - 18, 2024. URL https://api-semanticscholar.org/CorpusID:268249961." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 322, + 504, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 322, + 504, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 504, + 400 + ], + "type": "text", + "content": "Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp (eds.), Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 22895-22907. PMLR, 21-27 Jul 2024a. URL https://proceedings.mlr.org/press/v235/kambhampati24a.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 407, + 504, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 504, + 453 + ], + "type": "text", + "content": "Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. Position: LLMs can't plan, but can help planning in LLM-modulo frameworks. In *Forty-first International Conference on Machine Learning*, 2024b. URL https://openreview.net/forum?id=Th8JPEmH4z." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 460, + 504, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 504, + 483 + ], + "type": "text", + "content": "Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in llms. arXiv preprint arXiv:2404.11041, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 490, + 504, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 490, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 504, + 523 + ], + "type": "text", + "content": "Marzena Karpinska, Katherine Thai, Kyle Lo, Tanya Goyal, and Mohit Iyyer. One thousand and one pairs: A \"novel\" challenge for long-context language models. ArXiv, abs/2406.16264, 2024. URL https://api-semanticscholar.org/CorpusID:270703648." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 531, + 504, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 531, + 504, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 504, + 576 + ], + "type": "text", + "content": "Tushar Khot, H. Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The International Conference on Learning Representations, volume abs/2210.02406, 2023. URL https://api_semanticscholar.org/CorpusID:252715485." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 582, + 504, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 582, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 504, + 628 + ], + "type": "text", + "content": "Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The BiGGen Bench: A Principled Benchmark for Fine-grained Evaluation of Language Models with Language Models. arXiv preprint arXiv:2406.05761, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 635, + 504, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 504, + 679 + ], + "type": "text", + "content": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. In Proceedings of the 36th International Conference on Neural Information Processing Systems, Red Hook, NY, USA, 2022. Curran Associates Inc. ISBN 9781713871088." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 687, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 504, + 732 + ], + "type": "text", + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 107, + 82, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 505, + 148 + ], + "type": "text", + "content": "Brenden M. Lake and Marco Baroni. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 2879-2888. PMLR, 2018. URL http://proceedings.mlr.press/v80/lake18a.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 158, + 504, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 158, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 504, + 192 + ], + "type": "text", + "content": "Tamera Lanham, Anna Chen, Ansh Radhakrishnan, Benoit Steiner, Carson Denison, Danny Hernandez, Dustin Li, Esin Durmus, Evan Hubinger, Jackson Kernion, et al. Measuring faithfulness in chain-of-thought reasoning. arXiv preprint arXiv:2307.13702, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 201, + 504, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 504, + 278 + ], + "type": "text", + "content": "Fangyu Lei, Qian Liu, Yiming Huang, Shizhu He, Jun Zhao, and Kang Liu. S3Eval: A synthetic, scalable, systematic evaluation suite for large language model. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1259-1286, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.69. URL https://aclanthology.org/2024.naacl-long.69." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 287, + 504, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 504, + 365 + ], + "type": "text", + "content": "Tianwen Li, Zhexiong Liu, Lindsay Matsumura, Elaine Wang, Diane Litman, and Richard Correnti. Using large language models to assess young students' writing revisions. In Ekaterina Kochmar, Marie Bexte, Jill Burstein, Andrea Horbach, Ronja Laermann-Quante, Anaïs Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 365–380, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.30." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 374, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 504, + 419 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=v8L0pN6E0i." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 428, + 504, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 504, + 462 + ], + "type": "text", + "content": "B. Liu, Yuqian Jiang, Xiaohan Zhang, Qian Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. Llm+p: Empowering large language models with optimal planning proficiency. ArXiv, abs/2304.11477, 2023a. URL https://api-semanticscholar.org/CorpusID:258298051." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 471, + 504, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 504, + 506 + ], + "type": "text", + "content": "Bingbin Liu, Jordan T. Ash, Surbhi Goel, Akshay Krishnamurthy, and Cyril Zhang. Transformers learn shortcuts to automata. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=De4FYqjFueZ." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 514, + 504, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 504, + 592 + ], + "type": "text", + "content": "Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bitstream/11332b6b6cf4485b84afadb1352d3a9a-AbsAbstract-Conference.html." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 601, + 504, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 504, + 647 + ], + "type": "text", + "content": "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At Which Training Stage Does Code Data Help LLMs Reasoning? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=KIPJKST4gw." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "type": "text", + "content": "Aman Madaan and Amir Yazdanbakhsh. Text and patterns: For effective chain of thought, it takes two to tango. ArXiv, abs/2209.07686, 2022. URL https://api-semanticscholar.org/ CorpusID:252355328." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The International Conference on Learning Representations*, volume abs/2310.07923, 2024. URL https://api-semanticscholar.org/CorpusID:263909434." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "text", + "content": "Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2022. URL https://openreview.net/forum?id=iedYJm92o0a." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 505, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 505, + 170 + ], + "type": "text", + "content": "Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=y0GJXRungR." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 175, + 503, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 503, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 503, + 200 + ], + "type": "text", + "content": "OpenAI. GPT-4 Technical Report. ArXiv, abs/2303.08774, 2023. URL https://apisemantic scholar.org/CorpusID:257532815." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 208, + 505, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 208, + 505, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 505, + 275 + ], + "type": "text", + "content": "Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023.findings-emnlp.248." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 282, + 505, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 282, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 106, + 282, + 505, + 351 + ], + "type": "text", + "content": "Xiangyu Peng, Siyan Li, Sarah Wiegrefe, and Mark Riedl. Inferring the reader: Guiding automated story generation with commonsense reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 7008-7029, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.520. URL https://aclanthology.org/2022-findings-emnlp.520." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 357, + 504, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 504, + 383 + ], + "type": "text", + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual Reasoning Makes Smaller LLMs Stronger Problem-Solvers. arXiv preprint arXiv:2408.06195, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 389, + 505, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 389, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 389, + 505, + 456 + ], + "type": "text", + "content": "Xin Quan, Marco Valentino, Louise Dennis, and Andre Freitas. Enhancing ethical explanations of large language models through iterative symbolic refinement. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1-22, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.1." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 464, + 504, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 504, + 499 + ], + "type": "text", + "content": "Rachel Reid and et. al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. ArXiv, abs/2403.05530, 2024. URL https://api(semanticscholar.org/ CorpusID:268297180." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 506, + 505, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 506, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 505, + 541 + ], + "type": "text", + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A Graduate-Level Google-Proof Q&A Benchmark, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 548, + 504, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 504, + 573 + ], + "type": "text", + "content": "Gemma Team Morgane Riviere and et. al. Gemma 2: Improving open language models at a practical size. 2024. URL https://api_semanticscholar.org/CorpusID:270843326." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 580, + 505, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 580, + 505, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 580, + 505, + 615 + ], + "type": "text", + "content": "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. WinoGrande: an adversarial winograd schema challenge at scale. Commun. ACM, 64(9):99-106, aug 2021. ISSN 0001-0782. doi: 10.1145/3474381. URL https://doi.org/10.1145/3474381." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 622, + 505, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 622, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 622, + 505, + 690 + ], + "type": "text", + "content": "Maarten Sap, Hannah Rashkin, Derek Chen, Ronan Le Bras, and Yejin Choi. Social IQa: Commonsense reasoning about social interactions. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 4463-4473, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1454. URL https://aclanthology.org/D19-1454." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "text", + "content": "Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=jenyYQzue1." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 505, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 505, + 169 + ], + "type": "text", + "content": "Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 178, + 505, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 505, + 245 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 505, + 245 + ], + "type": "text", + "content": "Maja Stahl, Leon Biermann, Andreas Nehring, and Henning Wachsmuth. Exploring LLM prompting strategies for joint essay scoring and feedback generation. In Ekaterina Kochmar, Marie Bexe, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Anais Tack, Victoria Yaneva, and Zheng Yuan (eds.), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pp. 283–298, Mexico City, Mexico, June 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.bea-1.23." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 254, + 505, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 254, + 505, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 254, + 505, + 288 + ], + "type": "text", + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. ArXiv, abs/2402.08115, 2024a. URL https://api_semanticscholar.org/CorpusID:267637077." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 297, + 505, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 505, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 505, + 330 + ], + "type": "text", + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. 2024b. URL https://api-semanticscholar.org/CorpusID: 269626390." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 339, + 505, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 339, + 505, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 505, + 406 + ], + "type": "text", + "content": "Simeng Sun, Yang Liu, Shuohang Wang, Dan Iter, Chenguang Zhu, and Mohit Iyyer. PEARL: Prompting large language models to plan and execute actions over long documents. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 469-486, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.29." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 415, + 505, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 505, + 492 + ], + "type": "text", + "content": "Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 502, + 505, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 505, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 505, + 568 + ], + "type": "text", + "content": "Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. *CommonsenseQA: A question answering challenge targeting commonsense knowledge.* In *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies*, Volume 1 (Long and Short Papers), pp. 4149-4158, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1421. URL https://aclanthology.org/N19-1421." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 578, + 505, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 505, + 731 + ], + "type": "text", + "content": "Hugo Touvron, Louis Martin, Kevin R. Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Daniel M. Bikel, Lukas Blecher, Cristian Cantón Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony S. Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel M. Kloumann, A. V. Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, R. Subramanian, Xia Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zhengxu Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. ArXiv, abs/2307.09288, 2023. URL https://api(semanticscholar.org/CorpusID: 259950998." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. MuSiQue: Multi-hop questions via single-hop question composition. Transactions of the Association for Computational Linguistics, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 505, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 166 + ], + "type": "text", + "content": "Karthik Valmeekam, Matthew Marquez, Sarath Sreedharan, and Subbarao Kambhampati. On the planning abilities of large language models - a critical investigation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=X6dEqXIsEW." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 172, + 505, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 172, + 505, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 505, + 217 + ], + "type": "text", + "content": "Karthik Valmeekam, Matthew Marquez, Alberto Olmo, Sarath Sreedharan, and Subbarao Kambhampati. PlanBench: An extensible benchmark for evaluating large language models on planning and reasoning about change. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024. Curran Associates Inc." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 222, + 505, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 222, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 505, + 267 + ], + "type": "text", + "content": "Mudit Verma, Siddhant Bhambri, and Subbarao Kambhampati. Theory of mind abilities of large language models in human-robot interaction: An illusion? Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction, 2024. URL https://apisemantic scholar.org/CorpusID:266902529." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 273, + 505, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 273, + 505, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 505, + 340 + ], + "type": "text", + "content": "Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 345, + 505, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 345, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 505, + 390 + ], + "type": "text", + "content": "Lei Wang, Wanyu Xu, Yihuai Lan, Zhiqiang Hu, Yunshi Lan, Roy Ka-Wei Lee, and Ee-Peng Lim. Plan-and-solve prompting: Improving zero-shot chain-of-thought reasoning by large language models. In Annual Meeting of the Association for Computational Linguistics, 2023b. URL https://apisemantic scholar.org/CorpusID:258558102." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 395, + 505, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 395, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 395, + 505, + 441 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023c. URL https://openreview.net/forum?id=1PL1NIMMrw." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 445, + 505, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 505, + 491 + ], + "type": "text", + "content": "Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 496, + 505, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 505, + 531 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 536, + 505, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 505, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 505, + 581 + ], + "type": "text", + "content": "Li Siang Wong, Gabriel Grand, Alexander K. Lew, Noah D. Goodman, Vikash K. Mansinghka, Jacob Andreas, and Joshua B. Tenenbaum. From word models to world models: Translating from natural language to the probabilistic language of thought. ArXiv, abs/2306.12672, 2023. URL https://api_semanticscholar.org/CorpusID:259224900." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 586, + 505, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 505, + 620 + ], + "type": "text", + "content": "Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. TravelPlanner: A Benchmark for Real-World Planning with Language Agents. ArXiv, abs/2402.01622, 2024. URL https://api_semanticscholar.org/CorpusID:267406800." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 625, + 505, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 505, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 505, + 671 + ], + "type": "text", + "content": "Miao Xiong, Zhiyuan Hu, Xinyang Lu, Yifei Li, Jie Fu, Junxian He, and Bryan Hooi. Can LLMs Express Their Uncertainty? An Empirical Evaluation of Confidence Elicitation in LLMs. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=gjeQKFxFpZ." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 676, + 505, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 676, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 505, + 733 + ], + "type": "text", + "content": "Jundong Xu, Hao Fei, Liangming Pan, Qian Liu, Mong-Li Lee, and Wynne Hsu. Faithful logical reasoning via symbolic chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13326-13365, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.acl-long.720." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 642 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Xiaohan Xu, Chongyang Tao, Tao Shen, Can Xu, Hongbo Xu, Guodong Long, and Jian-Guang Lou. Re-reading improves reasoning in language models, 2024b. URL https://openreview.net/forum?id=3jXCF5dNpC." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=Bb4VGOWELI." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "text", + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of Thoughts: Deliberate problem solving with large language models, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "type": "text", + "content": "Xi Ye, Qiaochu Chen, Isil Dillig, and Greg Durrett. Satisfiability-aided language models using declarative prompting. In Advances in Neural Information Processing Systems, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 504, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 504, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 504, + 268 + ], + "type": "text", + "content": "Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. A careful examination of large language model performance on grade school arithmetic. ArXiv, abs/2405.00332, 2024. URL https://api-semanticscholar.org/CorpusID:269484687." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 274, + 504, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 504, + 320 + ], + "type": "text", + "content": "Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, Heng-Tze Cheng, Ed H. Chi, Quoc V Le, and Denny Zhou. Take a step back: Evoking reasoning via abstraction in large language models. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3bq3jsvcQ1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 327, + 504, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 504, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 504, + 382 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging LLM-as-a-judge with MT-bench and Chatbot Arena. In Proceedings of the 37th International Conference on Neural Information Processing Systems, NIPS '23, Red Hook, NY, USA, 2024b. Curran Associates Inc." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 389, + 504, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 504, + 423 + ], + "type": "text", + "content": "Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 430, + 504, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 504, + 498 + ], + "type": "text", + "content": "Ben Zhou, Kyle Richardson, Xiaodong Yu, and Dan Roth. Learning to decompose: Hypothetical question decomposition based on comparable texts. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 2223-2235, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.142. URL https://aclanthology.org/2022.emnlp-main.142." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 504, + 504, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 504, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 504, + 539 + ], + "type": "text", + "content": "Ben Zhou, Hongming Zhang, Sihao Chen, Dian Yu, Hongwei Wang, Baolin Peng, Dan Roth, and Dong Yu. Conceptual and unbiased reasoning in language models. ArXiv, abs/2404.00205, 2024. URL https://api-semanticscholar.org/CorpusID:268820105." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 544, + 504, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 504, + 590 + ], + "type": "text", + "content": "Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgtM." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 597, + 504, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 504, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 504, + 642 + ], + "type": "text", + "content": "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large Language Models are Human-Level Prompt Engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 470, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 470, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 470, + 94 + ], + "type": "text", + "content": "A META-ANALYSIS EXPANDED DETAILS ON CRITERIA AND PROCESS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 185 + ], + "type": "text", + "content": "Automatic Selection and Paper Filtering We investigate all papers from ICLR 2024, a representative ML venue, and two representative NLP venues, EACL 2024 and NAACL 2024 (including Findings and Workshop papers). We filtered all 4,642 papers (2,259 from ICLR 2024 and 2,382 from the two ACL-affiliated conferences) for those with at least two occurrences of \"CoT\", \"chain-of-thought\", or \"chain of thought\", resulting in 516 papers. There are conceivably papers using CoT called by another name (e.g., Scratchpads), but we believe these 516 give a representative sample appropriate for systematic analysis." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 198, + 506, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 275 + ], + "type": "text", + "content": "Manual Paper Filtering and Results Extraction We then filter down to papers that perform a comparison of CoT prompting vs. direct prompting, whether or not this is core to the paper's research question. We manually filtered the 516 papers in question and extracted the key results from those that remained. We excluded multimodal models, CoT-fine-tuned models, any experiments where the \"CoT\" method involves multiple forward passes (e.g., self-consistency (Wang et al., 2023c) and tree-of-thought (Yao et al., 2023)),5 and systems that augment LLMs with external tools (discussed more in Section 5)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 281, + 506, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 357 + ], + "type": "text", + "content": "For each paper passing through these criteria, we manually extracted the results from key tables comparing CoT and direct answer prompts. We only include results where the CoT and direct prompts are run on the same model and same dataset while being on a scale of 0 to 100 (excluding Likert scale evaluations, for example) for a more direct comparison. When papers include various CoT or direct answer prompts (including zero/few-shot variants), we always take the best-performing prompt for both. We focus on key test results where applicable, excluding dev sets if they are reported alongside test and also excluding numbers from ablations or nonstandard subsets of datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 363, + 506, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 506, + 397 + ], + "type": "text", + "content": "This resulted in a total of 1,218 experimental comparisons across 110 papers (35 from ICLR and 75 from NAACL and EACL) covering 264 datasets. Details and more information will be available in our GitHub Repo." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 410, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 506, + 477 + ], + "type": "text", + "content": "Categorization Given the large number of tasks and datasets being compared, we grouped each task into a set of 14 categories. These categories were determined based on the description (and possibly examples) of the task, not taking into account system performance. These categories abstract over traditional NLP task classifications (e.g., NER, reading comprehension) and take into account both the task format and the kinds of reasoning involved. Definitions for several categories are shown in Table 1 and the full description is given in Appendix B." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 495, + 297, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 297, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 297, + 507 + ], + "type": "text", + "content": "B QUANTITATIVE META-ANALYSIS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 520, + 493, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 493, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 493, + 532 + ], + "type": "text", + "content": "See the full list of categories and their descriptions that we used for the meta-analysis in Table 2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 548, + 317, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 317, + 562 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 317, + 562 + ], + "type": "text", + "content": "C EXPANDED EXPERIMENTAL DETAILS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 574, + 506, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 506, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 506, + 663 + ], + "type": "text", + "content": "A full list of the datasets can be found in Table 4. Each model can be seen in Table 5. We use one answer parser for all datasets of the same answer response format (one for multiple choice, short answer, etc.); however, some datasets require special handling and have edge cases that we handle separately from the rest of the datasets. Similarly, for each model, we use the exact same prompt across them, except when closed source models require different prompts because they do not allow for partial completions (i.e., when we cannot put \"let's think step by step\" to warm-start the assistant's response). All prompts are given in our Huggingface repo, including the model output and what our answer parser extracted as the answer." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 668, + 505, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 668, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 668, + 505, + 691 + ], + "type": "text", + "content": "Experiments were conducted either by invoking APIs or by running open-source models on our own hardware, mostly on a machine with 8 A40s or 4 Quadro RTX 8000s. All locally hosted models were" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "text", + "content": "5These systems use more compute than direct answer, and there is not a clear comparison to be made here. Moreover, our anecdotal coverage of these methods shows that they are most used for math, coding, and logic settings, for which we already have high representation among reported CoT methods." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 100, + 504, + 588 + ], + "blocks": [ + { + "bbox": [ + 176, + 79, + 434, + 92 + ], + "lines": [ + { + "bbox": [ + 176, + 79, + 434, + 92 + ], + "spans": [ + { + "bbox": [ + 176, + 79, + 434, + 92 + ], + "type": "text", + "content": "Table 2: Categories and their descriptions for the meta-analysis." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 100, + 504, + 588 + ], + "lines": [ + { + "bbox": [ + 107, + 100, + 504, + 588 + ], + "spans": [ + { + "bbox": [ + 107, + 100, + 504, + 588 + ], + "type": "table", + "html": "
CategoryDescription
Symbolic and algorithmicTasks involving symbol manipulation which can be solved by executing a program. This includes entity tracking datasets (e.g., SCONE, Coin Flip) and algorithmic tasks (e.g., BBH word sorting or finding shortest paths in a graph).
MathTasks requiring mathematical reasoning, from grade-school math to advanced mathematics, including physics questions.
Logical reasoningTasks designed to test for logical reasoning, whether deductive (Saparov & He, 2023, PrOntoQA), inductive (Bowen et al., 2024) or analogical (Ma et al., 2024) reasoning, including syllogisms and logical puzzles.
Commonsense reasoningDatasets designed to test for commonsense knowledge and reasoning, i.e., world knowledge that most people would have, rather than specialized expert-level knowledge in a discipline acquired after years of study.
Encyclopedic knowledgeTasks requiring expert-level in-depth knowledge beyond mere common-sense, usually in an open-book setting.
Spatial and temporal rea-soningDatasets designed to test for an understanding of space and spatial relations (e.g., navigation) or reasoning involving time and sequences over time.
Multi-hop QAQuestions involving the composition of multiple steps of reasoning in order to arrive at an answer, such as “What is the capital of the country whose scientist discovered penicillin?”
Context-aware QATasks such as closed-book QA and reading comprehension involving rea-soning about a given text in context. The context is often a short passage, but could also take the form of a knowledge graph (KBQA) or a table. This category also includes information extraction tasks, such as NER or relation extraction.
EntailmentTasks involving establishing the inferential relation between two texts, prototypically NLI, but also including fact verification.
Text classificationTasks involving the classification of a text into a small set of categories, such a topic or sentiment classification, but also involving tasks such as hate speech detection and misinformation detection.
GenerationTasks involving text generation, including machine translation, dialogue, question generation, as well as code generation. Tasks such as SQL execution (Lei et al., 2024) or systematic transformations of data (e.g., SCAN (Lake & Baroni, 2018)) are excluded because they can be solved by executing a program.
Meta-linguisticTasks probing for models' knowledge of linguistics, such as identifying the main subject of a sentence or solving linguistic puzzles.
Mixed datasetsDatasets containing a variety of tasks, such as BIG-Bench Hard (BBH) or MMLU.
OtherTasks which did not fit in any of the other categories, such as evaluating AI safety, eliciting models' verbalized confidence, or melody retrieval.
", + "image_path": "481a836f0c5229e7c25ba7aebb1c230dc39c76544624658815990853b3c80d27.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 609, + 504, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 633 + ], + "type": "text", + "content": "hosted with vLLM. All parameters given to the vLLM API endpoint are given in the Huggingface repo as well." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 651, + 292, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 292, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 292, + 662 + ], + "type": "text", + "content": "OTHER COT PROMPT VARIANTS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 677, + 376, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 376, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 376, + 689 + ], + "type": "text", + "content": "D.1 TESTING PERFORMANCE VOLATILITY ACROSS PROMPTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "To test the impact of prompt choice on performance, we performed our zero-shot experiment on Llama 3.1 8B with 7 different datasets and 4 different zero-shot CoT prompting strategies common in the literature (Kojima et al., 2022; Wang et al., 2023b; Zhou et al., 2023b; Yang et al., 2024)." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 175, + 504, + 345 + ], + "blocks": [ + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "type": "text", + "content": "Table 3: Models, datasets, and prompting strategies used in our experiments. Models marked with " + }, + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "type": "text", + "content": " are run with a 4k context size window. Note that Gemma has a larger than 4k context size window, but VLLM only supports up to a 4k context size window for it. Models marked with * indicate closed-source models that cannot handle prefixed assistant messages. Datasets marked with " + }, + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "type": "inline_equation", + "content": "\\triangle" + }, + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "type": "text", + "content": " do not have a few-shot setting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 175, + 504, + 345 + ], + "lines": [ + { + "bbox": [ + 106, + 175, + 504, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 175, + 504, + 345 + ], + "type": "table", + "html": "
ModelsLlama 2 7B Chat† (Touvron et al., 2023), Mistral 7B Instruct v0.3 (Jiang et al., 2023), Llama 3.1 8B Instruct (Dubey et al., 2024), Llama 3.1 70B Instruct, Gemma 2 9B It† (Riviere & et. al., 2024), Phi-3 Small 8k Instruct (Abdin et al., 2024), gpt-4o-mini-2024-07-18*, gpt-4o-2024-08-06*, Gemini 1.5 Flash* (Reid & et. al., 2024), Gemini 1.5 Pro* (Reid & et. al., 2024), claude-3-haiku-20240307* (Anthropic, a), claude-3-5-sonnet-20240620* (Anthropic, b)
DatasetsCommonsenseQA (Talmor et al., 2019), StrategyQA (Geva et al., 2021), SiQA△ Sap et al. (2019), PiQA△ (Bisk et al., 2019), Winogrande△ (Sakaguchi et al., 2021), GPQA (Rein et al., 2023), MuSR (Sprague et al., 2024), ContextHub (Levels 1 and 2 only) (Hua et al., 2024), ARC△ (Clark et al., 2018), AGIEval LSAT (Zhong et al., 2023), MMLU (Hendrycks et al., 2021a), MMLU Pro (Wang et al., 2024), MATH (Hendrycks et al., 2021b), GSM8K (Cobbe et al., 2021), GSM8K-hard (Gao et al., 2023), FOLIO (Han et al., 2022), MuSiQue△ (Trivedi et al., 2022), Big-Bench Hard (Suzgun et al., 2023; Srivastava et al., 2022), BiGGen Bench (Kim et al., 2024)
Promptszero-shot direct answer, zero-shot CoT (Kojima et al., 2022), few-shot direct answer (Brown et al., 2020), few-shot CoT (Wei et al., 2022)
", + "image_path": "4901ebf5dcf6c5e3308e4fc8cd8be2bc356075e2cbff12912f2eb88ac42bd641.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 160, + 477, + 451, + 700 + ], + "blocks": [ + { + "bbox": [ + 104, + 412, + 504, + 468 + ], + "lines": [ + { + "bbox": [ + 104, + 412, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 412, + 504, + 468 + ], + "type": "text", + "content": "Table 4: List of datasets used in our experiments. We categorize each dataset into one of five categories based on the type of reasoning required: Commonsense, Knowledge, Soft Reasoning, Symbolic, or Mathematical. We also report answer formats. When we use few-shot prompts, we mark how many examples those prompts contain. BiGGen Bench has many categories of questions that explicitly ask for CoTs in the response; we ignore those categories for our evaluation." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 160, + 477, + 451, + 700 + ], + "lines": [ + { + "bbox": [ + 160, + 477, + 451, + 700 + ], + "spans": [ + { + "bbox": [ + 160, + 477, + 451, + 700 + ], + "type": "table", + "html": "
DatasetTypeAnswer Formatm-Shots
CommonsenseQACommonsenseMultiple choice7
StrategyQACommonsenseTrue or False6
SIQACommonsenseMultiple choice0
PIQACommonsenseMultiple choice0
WinograndeCommonsenseMultiple choice0
Arc EasyKnowledgeMultiple choice0
Arc ChallengeKnowledgeMultiple choice0
AGIEval LSATSoft ReasoningMultiple choice3
BiGGen-BenchSoft ReasoningFree response0
MMLUKnowledgeMultiple Choice5
MMLU ProKnowledgeMultiple Choice5
BigBench-HardSymbolicMultiple Choice0
MuSRSoft ReasoningMultiple Choice1
GPQAMathematicalMultiple Choice3
MuSiQueSoft ReasoningShort Answer0
GSM8KMathematicalShort Answer8
GSM8K-HardMathematicalShort Answer8
FOLIOSymbolicTrue, False, or Unknown4
ContextHubSymbolicTrue, False, or Neither3
MATHMathematicalShort Answer4
", + "image_path": "45bf096ee3d131eb1c544ded558b8563d629a7955eedc841e204cab4acca93e4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 187, + 133, + 424, + 288 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 506, + 125 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 506, + 125 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 506, + 125 + ], + "type": "text", + "content": "Table 5: List of models for our experiments. We focus on contemporary instruction-tuned models; although pretrained and smaller language models could be used, they are not the focus of our study. Prompts and outputs used for each model are available on Huggingface. * Note that Gemma can accept more than 4k input tokens, but we are restricted to 4k by vLLM." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 133, + 424, + 288 + ], + "lines": [ + { + "bbox": [ + 187, + 133, + 424, + 288 + ], + "spans": [ + { + "bbox": [ + 187, + 133, + 424, + 288 + ], + "type": "table", + "html": "
ModelContext LengthIs Open Source
Llama 2 7B Chat4kTrue
Mistral 7B Instruct v0.38kTrue
Llama 3.1 8B Instruct128kTrue
Llama 3.1 70B Instruct128kTrue
Gemma 2 9B It4k*True
Qwen 7B Instruct131kTrue
Qwen 72B Instruct131kTrue
GPT4o-Mini128kFalse
GPT4o128kFalse
Gemini 1.5 Pro128kFalse
Gemini Flash1mFalse
Claude 3.5 Sonnet200kFalse
Claude 3 Haiku200kFalse
", + "image_path": "3c8cb99a7df5940afd3c610f4dd360299a0612a254a7e621c3907526efa1c63d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 115, + 301, + 495, + 483 + ], + "blocks": [ + { + "bbox": [ + 115, + 301, + 495, + 483 + ], + "lines": [ + { + "bbox": [ + 115, + 301, + 495, + 483 + ], + "spans": [ + { + "bbox": [ + 115, + 301, + 495, + 483 + ], + "type": "image", + "image_path": "9f95f43776fbd56cd222beea57a2a5f12075b2356a2362dff3912e55246a903a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 531, + 506, + 588 + ], + "lines": [ + { + "bbox": [ + 104, + 531, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 506, + 588 + ], + "type": "text", + "content": "Figure 7: Performance of multiple prompts commonly used to elicit reasoning through CoT in the zero shot setting. Each prompt starts the assistant completion with a different phrase meant to elicit reasoning. All results are from using Llama 3.1 8B Instruct. For the Kojima variant, we explicitly place \"Let's think step by step.\" in the assistant message. There is very little variation between the CoT prompts on average." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 609, + 504, + 644 + ], + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 644 + ], + "type": "text", + "content": "Figure 7 shows variation due to prompts is typically small and no prompt gives a consistent gain over the other. For our experiments, this suggests that different prompts have small effects on the overall outcome on average." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 661, + 258, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 661, + 258, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 661, + 258, + 673 + ], + "type": "text", + "content": "E FEW-SHOT EXPERIMENTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "Compared to a zero-shot prompt, a few-shot prompt additionally contains demonstrations of the relevant reasoning mode on different problem instances " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\{(v(\\mathbf{q}_i),\\mathbf{y}_i^*)\\}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ". Few-shot prompts for direct answer simply encode the answer " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "a_{i}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_i^*" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ", whereas few-shot prompts for chain-of-thought include a reasoning trace ending in the correct answer. Now we can define the " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "-shot direct prompt as" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 83, + 500, + 295 + ], + "blocks": [ + { + "bbox": [ + 109, + 83, + 500, + 295 + ], + "lines": [ + { + "bbox": [ + 109, + 83, + 500, + 295 + ], + "spans": [ + { + "bbox": [ + 109, + 83, + 500, + 295 + ], + "type": "image", + "image_path": "9436671d072bfe9e1df42bec2c0d8f5bee79dec70bd5f3261c1d86c2c4d1641e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 327, + 504, + 373 + ], + "lines": [ + { + "bbox": [ + 104, + 327, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 504, + 373 + ], + "type": "text", + "content": "Figure 8: Average performance improvement from using CoT across different models in the zero-shot and few-shot settings. Each bar represents how much CoT improves the accuracy for that specific setting. In general, CoT in the few-shot setting does not change the qualitative performance of CoT versus zero-shot, though it can change the magnitude for symbolic datasets." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{da}}^{m}(\\mathbf{q}) = v_{\\mathrm{da}}(\\mathbf{q}_{1})\\mathbf{a}_{1}v_{\\mathrm{da}}(\\mathbf{q}_{2})\\mathbf{a}_{2}\\dots v_{\\mathrm{da}}(\\mathbf{q}_{m})\\mathbf{a}_{m}v_{\\mathrm{da}}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "type": "text", + "content": " and the " + }, + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "type": "text", + "content": "-shot cot prompt as " + }, + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{cot}}^{m}(\\mathbf{q}) = v_{\\mathrm{cot}}(\\mathbf{q}_{1})\\mathbf{y}_{1}^{*}v_{\\mathrm{cot}}(\\mathbf{q}_{2})\\mathbf{y}_{2}^{*}\\dots v_{\\mathrm{cot}}(\\mathbf{q}_{m})\\mathbf{y}_{m}^{*}v_{\\mathrm{cot}}(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 392, + 504, + 418 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 421, + 506, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 421, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 421, + 506, + 489 + ], + "type": "text", + "content": "Figure 8 shows the difference between few-shot prompting and the zero-shot setting discussed in the main text of the paper. We see that using CoT in the few-shot setting largely does not change the datasets that benefit from it. Only one dataset, MuSR Team Allocation, starts to improve with few-shot; however, we believe this to be an exception because the final step to derive the answer is complex in the prompt and clearer in the examples. The magnitude of improvement over direct answer prompting when using CoT is also similar to the zero-shot setting." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 503, + 403, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 503, + 403, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 403, + 516 + ], + "type": "text", + "content": "F EXPANDED COT VS DIRECT EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 529, + 249, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 249, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 249, + 540 + ], + "type": "text", + "content": "F.1 FULL ZERO-SHOT RESULTS" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 101, + 502, + 276 + ], + "blocks": [ + { + "bbox": [ + 130, + 80, + 479, + 92 + ], + "lines": [ + { + "bbox": [ + 130, + 80, + 479, + 92 + ], + "spans": [ + { + "bbox": [ + 130, + 80, + 479, + 92 + ], + "type": "text", + "content": "Table 6: Direct answer and CoT accuracies for each reasoning category across models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 101, + 502, + 276 + ], + "lines": [ + { + "bbox": [ + 108, + 101, + 502, + 276 + ], + "spans": [ + { + "bbox": [ + 108, + 101, + 502, + 276 + ], + "type": "table", + "html": "
ModelCommonsenseKnowledgeMathematicalSymbolicSoft
DA %CoT %DA %CoT %DA %CoT %DA %CoT %DA %CoT %
Claude-3 Haiku74.377.273.076.118.148.238.648.755.956.6
Claude-3.5 Sonnet84.385.883.888.838.759.053.267.167.675.7
GPT-4o Mini81.883.273.683.122.959.748.160.961.163.5
Gemini 1.5 Flash80.376.878.281.027.255.747.059.760.662.6
Gemini 1.5 Pro80.478.380.983.835.458.552.962.664.167.8
Gemma 2 9b75.076.174.976.918.550.546.755.858.260.5
Gpt-4o87.387.782.988.636.563.355.768.365.974.0
Meta-Llama 2 7b51.450.944.146.69.317.222.435.437.237.6
Meta-Llama 3.1 70b84.284.782.485.624.954.949.060.065.769.5
Meta-Llama 3.1 8b72.973.470.174.116.047.834.851.655.056.2
Mistral 7b58.361.862.064.510.928.941.845.048.649.7
Phi-3 Small 8k70.872.576.179.717.847.151.258.757.956.4
Qwen 2 72b82.984.978.684.623.958.548.258.764.265.1
Qwen 2 7b64.066.165.271.315.953.543.852.354.449.4
Average74.875.773.377.522.650.245.256.158.360.3
", + "image_path": "ac49b23c021cbc9aca989b032d58bd2ac8d3e4c4ac7556ce13ed7cddeb1c2061.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 135, + 313, + 476, + 725 + ], + "blocks": [ + { + "bbox": [ + 140, + 293, + 469, + 305 + ], + "lines": [ + { + "bbox": [ + 140, + 293, + 469, + 305 + ], + "spans": [ + { + "bbox": [ + 140, + 293, + 469, + 305 + ], + "type": "text", + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 313, + 476, + 725 + ], + "lines": [ + { + "bbox": [ + 135, + 313, + 476, + 725 + ], + "spans": [ + { + "bbox": [ + 135, + 313, + 476, + 725 + ], + "type": "table", + "html": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Team AllocationsSoft ReasoningLlama 2 7b34.837.2
MuSR Team AllocationsSoft ReasoningMistral 7b38.846.8
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b44.048.0
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b65.266.8
MuSR Team AllocationsSoft ReasoningGemma 2 9b47.244.8
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k47.261.6
MuSR Team AllocationsSoft ReasoningQwen 2 7b42.049.6
MuSR Team AllocationsSoft ReasoningQwen 2 72b58.066.8
MuSR Team AllocationsSoft ReasoningGPT-4o Mini61.258.4
MuSR Team AllocationsSoft ReasoningGpt-4o64.063.6
MuSR Team AllocationsSoft ReasoningClaude-3 Haiku56.859.2
MuSR Team AllocationsSoft ReasoningClaude-3.5 Sonnet80.463.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash48.855.2
MuSR Team AllocationsSoft ReasoningGemini 1.5 Pro58.462.4
SiQACommonsenseLlama 2 7b53.455.9
SiQACommonsenseMistral 7b35.933.5
SiQACommonsenseLlama 3.1 8b73.573.5
SiQACommonsenseLlama 3.1 70b78.780.9
SiQACommonsenseGemma 2 9b74.976.3
SiQACommonsensePhi-3 Small 8k38.040.4
SiQACommonsenseQwen 2 7b37.339.3
SiQACommonsenseQwen 2 72b80.580.4
SiQACommonsenseGPT-4o Mini79.080.0
SiQACommonsenseGpt-4o81.981.5
SiQACommonsenseClaude-3 Haiku75.474.8
SiQACommonsenseClaude-3.5 Sonnet79.781.0
SiQACommonsenseGemini 1.5 Flash74.579.1
SiQACommonsenseGemini 1.5 Pro73.978.2
MuSiQueSoft ReasoningLlama 2 7b40.136.1
MuSiQueSoft ReasoningMistral 7b47.347.2
MuSiQueSoft ReasoningLlama 3.1 8b62.664.7
MuSiQueSoft ReasoningLlama 3.1 70b74.072.2
MuSiQueSoft ReasoningGemma 2 9b67.768.7
MuSiQueSoft ReasoningPhi-3 Small 8k58.364.3
MuSiQueSoft ReasoningQwen 2 7b60.765.1
MuSiQueSoft ReasoningQwen 2 72b56.369.0
MuSiQueSoft ReasoningGPT-4o Mini71.368.2
MuSiQueSoft ReasoningGpt-4o73.570.1
MuSiQueSoft ReasoningClaude-3 Haiku54.856.0
MuSiQueSoft ReasoningClaude-3.5 Sonnet66.970.4
MuSiQueSoft ReasoningGemini 1.5 Flash69.866.2
MuSiQueSoft ReasoningGemini 1.5 Pro69.871.3
AGIEval LSAT RCSoft ReasoningLlama 2 7b31.236.4
AGIEval LSAT RCSoft ReasoningMistral 7b61.761.0
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b71.068.8
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.487.0
AGIEval LSAT RCSoft ReasoningGemma 2 9b75.178.1
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k68.869.9
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.066.5
AGIEval LSAT RCSoft ReasoningQwen 2 72b83.684.4
", + "image_path": "fb151bdd62ef2d6da214ecee6b4a7e25253dcf4b16b1bcbfb99d38956a707e8a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 98, + 476, + 724 + ], + "blocks": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "lines": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "spans": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "type": "text", + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 98, + 476, + 724 + ], + "lines": [ + { + "bbox": [ + 135, + 98, + 476, + 724 + ], + "spans": [ + { + "bbox": [ + 135, + 98, + 476, + 724 + ], + "type": "table", + "html": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.374.3
AGIEval LSAT RCSoft ReasoningGpt-4o88.181.4
AGIEval LSAT RCSoft ReasoningClaude-3 Haiku71.765.1
AGIEval LSAT RCSoft ReasoningClaude-3.5 Sonnet90.089.6
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash78.181.0
AGIEval LSAT RCSoft ReasoningGemini 1.5 Pro82.285.9
CommonsenseQACommonsenseLlama 2 7b49.454.6
CommonsenseQACommonsenseMistral 7b68.068.0
CommonsenseQACommonsenseLlama 3.1 8b68.574.9
CommonsenseQACommonsenseLlama 3.1 70b83.584.4
CommonsenseQACommonsenseGemma 2 9b79.280.1
CommonsenseQACommonsensePhi-3 Small 8k81.880.3
CommonsenseQACommonsenseQwen 2 7b78.579.0
CommonsenseQACommonsenseQwen 2 72b87.487.3
CommonsenseQACommonsenseGPT-4o Mini82.583.9
CommonsenseQACommonsenseGpt-4o86.587.3
CommonsenseQACommonsenseClaude-3 Haiku80.679.0
CommonsenseQACommonsenseClaude-3.5 Sonnet85.184.3
CommonsenseQACommonsenseGemini 1.5 Flash79.782.6
CommonsenseQACommonsenseGemini 1.5 Pro79.982.9
GPQAMathematicalLlama 2 7b28.324.3
GPQAMathematicalMistral 7b23.024.3
GPQAMathematicalLlama 3.1 8b24.125.9
GPQAMathematicalLlama 3.1 70b23.225.9
GPQAMathematicalGemma 2 9b26.321.2
GPQAMathematicalPhi-3 Small 8k22.320.8
GPQAMathematicalQwen 2 7b24.124.6
GPQAMathematicalQwen 2 72b21.018.1
GPQAMathematicalGPT-4o Mini21.024.0
GPQAMathematicalGpt-4o23.725.9
GPQAMathematicalClaude-3 Haiku25.422.3
GPQAMathematicalClaude-3.5 Sonnet25.425.9
GPQAMathematicalGemini 1.5 Flash22.322.8
GPQAMathematicalGemini 1.5 Pro21.023.7
AGIEval LSAT LRSoft ReasoningLlama 2 7b29.433.5
AGIEval LSAT LRSoft ReasoningMistral 7b44.147.8
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b59.053.9
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b81.481.0
AGIEval LSAT LRSoft ReasoningGemma 2 9b64.967.6
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.564.1
AGIEval LSAT LRSoft ReasoningQwen 2 7b50.658.4
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.375.1
AGIEval LSAT LRSoft ReasoningGPT-4o Mini65.368.2
AGIEval LSAT LRSoft ReasoningGpt-4o87.383.9
AGIEval LSAT LRSoft ReasoningClaude-3 Haiku55.754.7
AGIEval LSAT LRSoft ReasoningClaude-3.5 Sonnet83.782.7
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash70.071.2
AGIEval LSAT LRSoft ReasoningGemini 1.5 Pro79.480.4
PiQACommonsenseLlama 2 7b62.164.7
PiQACommonsenseMistral 7b78.677.7
PiQACommonsenseLlama 3.1 8b85.084.2
PiQACommonsenseLlama 3.1 70b91.890.6
PiQACommonsenseGemma 2 9b84.084.8
PiQACommonsensePhi-3 Small 8k89.185.5
PiQACommonsenseQwen 2 7b84.386.2
PiQACommonsenseQwen 2 72b92.989.1
PiQACommonsenseGPT-4o Mini93.188.6
PiQACommonsenseGpt-4o95.995.5
PiQACommonsenseClaude-3 Haiku85.986.6
PiQACommonsenseClaude-3.5 Sonnet94.694.5
PiQACommonsenseGemini 1.5 Flash84.689.8
PiQACommonsenseGemini 1.5 Pro88.191.3
Arc EasyKnowledgeLlama 2 7b71.169.8
Arc EasyKnowledgeMistral 7b87.586.7
Arc EasyKnowledgeLlama 3.1 8b93.092.5
Arc EasyKnowledgeLlama 3.1 70b97.597.9
Arc EasyKnowledgeGemma 2 9b94.995.8
Arc EasyKnowledgePhi-3 Small 8k96.096.3
Arc EasyKnowledgeQwen 2 7b89.584.7
Arc EasyKnowledgeQwen 2 72b97.997.4
Arc EasyKnowledgeGPT-4o Mini96.894.6
Arc EasyKnowledgeGpt-4o98.998.1
Arc EasyKnowledgeClaude-3 Haiku95.195.4
Arc EasyKnowledgeClaude-3.5 Sonnet98.698.4
Arc EasyKnowledgeGemini 1.5 Flash96.897.2
Arc EasyKnowledgeGemini 1.5 Pro97.294.6
Arc ChallengeKnowledgeLlama 2 7b49.245.2
", + "image_path": "10df7936f14bb883a39ce8192760a300751dc4f64ae68a01bb958b1b04e0fd93.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 98, + 476, + 726 + ], + "blocks": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "lines": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "spans": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "type": "text", + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 98, + 476, + 726 + ], + "lines": [ + { + "bbox": [ + 135, + 98, + 476, + 726 + ], + "spans": [ + { + "bbox": [ + 135, + 98, + 476, + 726 + ], + "type": "table", + "html": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
Arc ChallengeKnowledgeMistral 7b78.376.6
Arc ChallengeKnowledgeLlama 3.1 8b86.082.6
Arc ChallengeKnowledgeLlama 3.1 70b95.093.6
Arc ChallengeKnowledgeGemma 2 9b91.089.6
Arc ChallengeKnowledgePhi-3 Small 8k91.691.0
Arc ChallengeKnowledgeQwen 2 7b83.975.3
Arc ChallengeKnowledgeQwen 2 72b96.394.6
Arc ChallengeKnowledgeGPT-4o Mini93.382.6
Arc ChallengeKnowledgeGpt-4o96.095.3
Arc ChallengeKnowledgeClaude-3 Haiku89.389.3
Arc ChallengeKnowledgeClaude-3.5 Sonnet96.095.3
Arc ChallengeKnowledgeGemini 1.5 Flash92.393.6
Arc ChallengeKnowledgeGemini 1.5 Pro91.690.6
AGIEval LSAT ARSoft ReasoningLlama 2 7b17.017.4
AGIEval LSAT ARSoft ReasoningMistral 7b21.719.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b20.426.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b32.628.7
AGIEval LSAT ARSoft ReasoningGemma 2 9b24.823.0
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k28.326.5
AGIEval LSAT ARSoft ReasoningQwen 2 7b27.023.9
AGIEval LSAT ARSoft ReasoningQwen 2 72b29.128.3
AGIEval LSAT ARSoft ReasoningGPT-4o Mini32.223.0
AGIEval LSAT ARSoft ReasoningGpt-4o37.830.0
AGIEval LSAT ARSoft ReasoningClaude-3 Haiku24.823.5
AGIEval LSAT ARSoft ReasoningClaude-3.5 Sonnet38.333.9
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash27.827.8
AGIEval LSAT ARSoft ReasoningGemini 1.5 Pro30.031.7
BiGGen BenchSoft ReasoningLlama 2 7b61.656.8
BiGGen BenchSoft ReasoningMistral 7b70.168.1
BiGGen BenchSoft ReasoningLlama 3.1 8b66.567.7
BiGGen BenchSoft ReasoningLlama 3.1 70b78.976.9
BiGGen BenchSoft ReasoningGemma 2 9b64.764.5
BiGGen BenchSoft ReasoningPhi-3 Small 8k69.763.0
BiGGen BenchSoft ReasoningQwen 2 7b46.269.9
BiGGen BenchSoft ReasoningQwen 2 72b74.379.9
BiGGen BenchSoft ReasoningGPT-4o Mini70.377.7
BiGGen BenchSoft ReasoningGpt-4o86.082.0
BiGGen BenchSoft ReasoningClaude-3 Haiku80.080.0
BiGGen BenchSoft ReasoningClaude-3.5 Sonnet91.479.3
BiGGen BenchSoft ReasoningGemini 1.5 Flash73.968.5
BiGGen BenchSoft ReasoningGemini 1.5 Pro78.767.1
WinograndeCommonsenseLlama 2 7b49.950.4
WinograndeCommonsenseMistral 7b60.456.5
WinograndeCommonsenseLlama 3.1 8b66.563.3
WinograndeCommonsenseLlama 3.1 70b84.281.2
WinograndeCommonsenseGemma 2 9b68.767.7
WinograndeCommonsensePhi-3 Small 8k81.581.6
WinograndeCommonsenseQwen 2 7b67.160.7
WinograndeCommonsenseQwen 2 72b81.980.7
WinograndeCommonsenseGPT-4o Mini79.271.9
WinograndeCommonsenseGpt-4o89.786.5
WinograndeCommonsenseClaude-3 Haiku70.766.2
WinograndeCommonsenseClaude-3.5 Sonnet89.485.7
WinograndeCommonsenseGemini 1.5 Flash72.574.8
WinograndeCommonsenseGemini 1.5 Pro75.578.3
MMLUKnowledgeLlama 2 7b46.341.7
MMLUKnowledgeMistral 7b60.556.5
MMLUKnowledgeLlama 3.1 8b72.667.5
MMLUKnowledgeLlama 3.1 70b85.083.2
MMLUKnowledgeGemma 2 9b73.871.4
MMLUKnowledgePhi-3 Small 8k76.373.6
MMLUKnowledgeQwen 2 7b67.064.5
MMLUKnowledgeQwen 2 72b81.377.8
MMLUKnowledgeGPT-4o Mini79.974.8
MMLUKnowledgeGpt-4o87.583.4
MMLUKnowledgeClaude-3 Haiku72.268.4
MMLUKnowledgeClaude-3.5 Sonnet87.284.0
MMLUKnowledgeGemini 1.5 Flash76.374.7
MMLUKnowledgeGemini 1.5 Pro81.381.1
StrategyQACommonsenseLlama 2 7b39.531.2
StrategyQACommonsenseMistral 7b66.155.8
StrategyQACommonsenseLlama 3.1 8b73.768.6
StrategyQACommonsenseLlama 3.1 70b85.383.8
StrategyQACommonsenseGemma 2 9b73.766.4
StrategyQACommonsensePhi-3 Small 8k72.366.0
StrategyQACommonsenseQwen 2 7b63.254.8
StrategyQACommonsenseQwen 2 72b81.776.9
", + "image_path": "5ae34891f3c36408bcc6bfbc261276e5c966d281447cb298c6085bf1be1f94d4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 98, + 476, + 724 + ], + "blocks": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "lines": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "spans": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "type": "text", + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 98, + 476, + 724 + ], + "lines": [ + { + "bbox": [ + 135, + 98, + 476, + 724 + ], + "spans": [ + { + "bbox": [ + 135, + 98, + 476, + 724 + ], + "type": "table", + "html": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
StrategyQACommonsenseGPT-4o Mini82.284.5
StrategyQACommonsenseGpt-4o84.585.5
StrategyQACommonsenseClaude-3 Haiku73.465.0
StrategyQACommonsenseClaude-3.5 Sonnet80.176.3
StrategyQACommonsenseGemini 1.5 Flash72.575.2
StrategyQACommonsenseGemini 1.5 Pro74.071.4
MuSR Object PlacementsSoft ReasoningLlama 2 7b36.330.5
MuSR Object PlacementsSoft ReasoningMistral 7b50.843.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b55.553.5
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b65.643.8
MuSR Object PlacementsSoft ReasoningGemma 2 9b63.357.0
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k53.155.1
MuSR Object PlacementsSoft ReasoningQwen 2 7b48.848.4
MuSR Object PlacementsSoft ReasoningQwen 2 72b61.745.7
MuSR Object PlacementsSoft ReasoningGPT-4o Mini59.055.0
MuSR Object PlacementsSoft ReasoningGpt-4o67.645.3
MuSR Object PlacementsSoft ReasoningClaude-3 Haiku46.952.3
MuSR Object PlacementsSoft ReasoningClaude-3.5 Sonnet69.551.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash61.756.2
MuSR Object PlacementsSoft ReasoningGemini 1.5 Pro66.450.0
FOLIOSymbolicLlama 2 7b36.533.0
FOLIOSymbolicMistral 7b50.741.9
FOLIOSymbolicLlama 3.1 8b58.656.7
FOLIOSymbolicLlama 3.1 70b70.969.0
FOLIOSymbolicGemma 2 9b66.055.7
FOLIOSymbolicPhi-3 Small 8k68.059.6
FOLIOSymbolicQwen 2 7b60.651.2
FOLIOSymbolicQwen 2 72b65.065.0
FOLIOSymbolicGPT-4o Mini65.058.1
FOLIOSymbolicGpt-4o79.862.6
FOLIOSymbolicClaude-3 Haiku61.648.8
FOLIOSymbolicClaude-3.5 Sonnet73.968.5
FOLIOSymbolicGemini 1.5 Flash74.969.5
FOLIOSymbolicGemini 1.5 Pro73.974.4
ContextHub Deductive L2SymbolicLlama 2 7b34.812.6
ContextHub Deductive L2SymbolicMistral 7b48.855.1
ContextHub Deductive L2SymbolicLlama 3.1 8b52.821.5
ContextHub Deductive L2SymbolicLlama 3.1 70b50.041.1
ContextHub Deductive L2SymbolicGemma 2 9b50.043.0
ContextHub Deductive L2SymbolicPhi-3 Small 8k52.449.1
ContextHub Deductive L2SymbolicQwen 2 7b51.339.8
ContextHub Deductive L2SymbolicQwen 2 72b52.844.0
ContextHub Deductive L2SymbolicGPT-4o Mini47.042.0
ContextHub Deductive L2SymbolicGpt-4o54.545.6
ContextHub Deductive L2SymbolicClaude-3 Haiku45.241.8
ContextHub Deductive L2SymbolicClaude-3.5 Sonnet53.046.2
ContextHub Deductive L2SymbolicGemini 1.5 Flash45.039.5
ContextHub Deductive L2SymbolicGemini 1.5 Pro57.343.3
ContextHub Abductive L2SymbolicLlama 2 7b34.331.9
ContextHub Abductive L2SymbolicMistral 7b34.025.7
ContextHub Abductive L2SymbolicLlama 3.1 8b41.337.3
ContextHub Abductive L2SymbolicLlama 3.1 70b51.044.4
ContextHub Abductive L2SymbolicGemma 2 9b41.532.9
ContextHub Abductive L2SymbolicPhi-3 Small 8k44.332.8
ContextHub Abductive L2SymbolicQwen 2 7b37.833.4
ContextHub Abductive L2SymbolicQwen 2 72b45.532.2
ContextHub Abductive L2SymbolicGPT-4o Mini65.055.0
ContextHub Abductive L2SymbolicGpt-4o57.546.8
ContextHub Abductive L2SymbolicClaude-3 Haiku37.031.4
ContextHub Abductive L2SymbolicClaude-3.5 Sonnet56.840.4
ContextHub Abductive L2SymbolicGemini 1.5 Flash53.132.2
ContextHub Abductive L2SymbolicGemini 1.5 Pro53.543.7
MMLU ProKnowledgeLlama 2 7b19.919.6
MMLU ProKnowledgeMistral 7b31.628.4
MMLU ProKnowledgeLlama 3.1 8b44.838.0
MMLU ProKnowledgeLlama 3.1 70b64.955.0
MMLU ProKnowledgeGemma 2 9b48.142.7
MMLU ProKnowledgePhi-3 Small 8k54.843.7
MMLU ProKnowledgeQwen 2 7b45.036.2
MMLU ProKnowledgeQwen 2 72b62.844.3
MMLU ProKnowledgeGPT-4o Mini62.342.6
MMLU ProKnowledgeGpt-4o72.155.0
MMLU ProKnowledgeClaude-3 Haiku47.639.0
MMLU ProKnowledgeClaude-3.5 Sonnet73.457.2
MMLU ProKnowledgeGemini 1.5 Flash58.547.2
MMLU ProKnowledgeGemini 1.5 Pro65.357.4
MuSR Murder MysteriesSoft ReasoningLlama 2 7b50.050.0
", + "image_path": "d428f884b8057ca10febe71ad16d24604f9b8fa1ddf577e95d17b03aaf7d1f11.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 98, + 476, + 725 + ], + "blocks": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "lines": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "spans": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "type": "text", + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 98, + 476, + 725 + ], + "lines": [ + { + "bbox": [ + 135, + 98, + 476, + 725 + ], + "spans": [ + { + "bbox": [ + 135, + 98, + 476, + 725 + ], + "type": "table", + "html": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
MuSR Murder MysteriesSoft ReasoningMistral 7b62.855.6
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b70.457.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.669.6
MuSR Murder MysteriesSoft ReasoningGemma 2 9b76.861.6
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k61.658.8
MuSR Murder MysteriesSoft ReasoningQwen 2 7b59.253.2
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.864.4
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini71.263.6
MuSR Murder MysteriesSoft ReasoningGpt-4o87.670.8
MuSR Murder MysteriesSoft ReasoningClaude-3 Haiku62.456.8
MuSR Murder MysteriesSoft ReasoningClaude-3.5 Sonnet85.270.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.858.4
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Pro77.664.0
ContextHub Deductive L1SymbolicLlama 2 7b47.78.3
ContextHub Deductive L1SymbolicMistral 7b50.367.3
ContextHub Deductive L1SymbolicLlama 3.1 8b50.723.3
ContextHub Deductive L1SymbolicLlama 3.1 70b53.840.7
ContextHub Deductive L1SymbolicGemma 2 9b56.339.2
ContextHub Deductive L1SymbolicPhi-3 Small 8k54.850.2
ContextHub Deductive L1SymbolicQwen 2 7b59.343.3
ContextHub Deductive L1SymbolicQwen 2 72b51.544.0
ContextHub Deductive L1SymbolicGPT-4o Mini49.341.5
ContextHub Deductive L1SymbolicGpt-4o59.349.0
ContextHub Deductive L1SymbolicClaude-3 Haiku50.539.7
ContextHub Deductive L1SymbolicClaude-3.5 Sonnet54.547.0
ContextHub Deductive L1SymbolicGemini 1.5 Flash47.338.5
ContextHub Deductive L1SymbolicGemini 1.5 Pro57.346.0
ContextHub Abductive L1SymbolicLlama 2 7b29.416.4
ContextHub Abductive L1SymbolicMistral 7b46.925.8
ContextHub Abductive L1SymbolicLlama 3.1 8b43.624.2
ContextHub Abductive L1SymbolicLlama 3.1 70b55.343.9
ContextHub Abductive L1SymbolicGemma 2 9b61.958.9
ContextHub Abductive L1SymbolicPhi-3 Small 8k62.560.3
ContextHub Abductive L1SymbolicQwen 2 7b52.247.5
ContextHub Abductive L1SymbolicQwen 2 72b61.945.0
ContextHub Abductive L1SymbolicGPT-4o Mini61.142.2
ContextHub Abductive L1SymbolicGpt-4o74.265.6
ContextHub Abductive L1SymbolicClaude-3 Haiku35.322.8
ContextHub Abductive L1SymbolicClaude-3.5 Sonnet80.860.3
ContextHub Abductive L1SymbolicGemini 1.5 Flash66.447.2
ContextHub Abductive L1SymbolicGemini 1.5 Pro62.260.0
Big-Bench HardSymbolicLlama 2 7b29.831.9
Big-Bench HardSymbolicMistral 7b39.335.1
Big-Bench HardSymbolicLlama 3.1 8b62.845.6
Big-Bench HardSymbolicLlama 3.1 70b78.954.8
Big-Bench HardSymbolicGemma 2 9b58.750.8
Big-Bench HardSymbolicPhi-3 Small 8k70.055.1
Big-Bench HardSymbolicQwen 2 7b52.647.6
Big-Bench HardSymbolicQwen 2 72b75.159.0
Big-Bench HardSymbolicGPT-4o Mini77.749.7
Big-Bench HardSymbolicGpt-4o84.664.5
Big-Bench HardSymbolicClaude-3 Haiku62.447.3
Big-Bench HardSymbolicClaude-3.5 Sonnet83.656.9
Big-Bench HardSymbolicGemini 1.5 Flash71.355.4
Big-Bench HardSymbolicGemini 1.5 Pro71.650.3
MATHMathematicalLlama 2 7b4.24.0
MATHMathematicalMistral 7b12.46.1
MATHMathematicalLlama 3.1 8b47.213.8
MATHMathematicalLlama 3.1 70b64.422.8
MATHMathematicalGemma 2 9b45.619.1
MATHMathematicalPhi-3 Small 8k43.218.5
MATHMathematicalQwen 2 7b53.713.3
MATHMathematicalQwen 2 72b63.523.8
MATHMathematicalGPT-4o Mini69.624.3
MATHMathematicalGpt-4o73.335.2
MATHMathematicalClaude-3 Haiku32.717.4
MATHMathematicalClaude-3.5 Sonnet63.834.6
MATHMathematicalGemini 1.5 Flash54.531.3
MATHMathematicalGemini 1.5 Pro62.139.4
GSM8k-HardMathematicalLlama 2 7b6.71.8
GSM8k-HardMathematicalMistral 7b21.03.0
GSM8k-HardMathematicalLlama 3.1 8b34.46.0
GSM8k-HardMathematicalLlama 3.1 70b46.614.0
GSM8k-HardMathematicalGemma 2 9b40.98.8
GSM8k-HardMathematicalPhi-3 Small 8k33.06.9
GSM8k-HardMathematicalQwen 2 7b48.45.0
GSM8k-HardMathematicalQwen 2 72b54.813.7
", + "image_path": "b525d383042b5783dceda3c38de601aba0076e717365bb347ecf929af7e0a671.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 99, + 476, + 270 + ], + "blocks": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "lines": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "spans": [ + { + "bbox": [ + 140, + 78, + 470, + 90 + ], + "type": "text", + "content": "Table 7: Zero-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 99, + 476, + 270 + ], + "lines": [ + { + "bbox": [ + 135, + 99, + 476, + 270 + ], + "spans": [ + { + "bbox": [ + 135, + 99, + 476, + 270 + ], + "type": "table", + "html": "
DatasetTypeModelzero-shot CoT accuracyzero-shot DA accuracy
GSM8k-HardMathematicalGPT-4o Mini53.911.7
GSM8k-HardMathematicalGpt-4o60.326.0
GSM8k-HardMathematicalClaude-3 Haiku45.39.6
GSM8k-HardMathematicalClaude-3.5 Sonnet50.832.3
GSM8k-HardMathematicalGemini 1.5 Flash54.616.2
GSM8k-HardMathematicalGemini 1.5 Pro58.226.2
GSM8kMathematicalLlama 2 7b29.66.9
GSM8kMathematicalMistral 7b59.210.2
GSM8kMathematicalLlama 3.1 8b85.418.5
GSM8kMathematicalLlama 3.1 70b85.637.0
GSM8kMathematicalGemma 2 9b89.224.9
GSM8kMathematicalPhi-3 Small 8k90.024.9
GSM8kMathematicalQwen 2 7b87.920.7
GSM8kMathematicalQwen 2 72b94.640.1
GSM8kMathematicalGPT-4o Mini94.131.8
GSM8kMathematicalGpt-4o95.858.8
GSM8kMathematicalClaude-3 Haiku89.422.9
GSM8kMathematicalClaude-3.5 Sonnet96.162.2
GSM8kMathematicalGemini 1.5 Flash91.438.6
GSM8kMathematicalGemini 1.5 Pro92.752.4
", + "image_path": "40dd47fb0c0340c0e72e770cdc928d678926fc1bdbb82d49d5824dcbe69e1cc0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 297, + 243, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 243, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 243, + 308 + ], + "type": "text", + "content": "F.2 FULL FEW-SHOT RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 138, + 338, + 472, + 727 + ], + "blocks": [ + { + "bbox": [ + 141, + 318, + 468, + 330 + ], + "lines": [ + { + "bbox": [ + 141, + 318, + 468, + 330 + ], + "spans": [ + { + "bbox": [ + 141, + 318, + 468, + 330 + ], + "type": "text", + "content": "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 338, + 472, + 727 + ], + "lines": [ + { + "bbox": [ + 138, + 338, + 472, + 727 + ], + "spans": [ + { + "bbox": [ + 138, + 338, + 472, + 727 + ], + "type": "table", + "html": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT RCSoft ReasoningLlama 2 7b33.138.7
AGIEval LSAT RCSoft ReasoningMistral 7b52.457.2
AGIEval LSAT RCSoft ReasoningLlama 3.1 8b60.270.3
AGIEval LSAT RCSoft ReasoningLlama 3.1 70b84.488.8
AGIEval LSAT RCSoft ReasoningGemma 2 9b74.379.2
AGIEval LSAT RCSoft ReasoningPhi-3 Small 8k63.265.1
AGIEval LSAT RCSoft ReasoningQwen 2 7b61.768.8
AGIEval LSAT RCSoft ReasoningQwen 2 72b85.985.9
AGIEval LSAT RCSoft ReasoningGPT-4o Mini77.371.4
AGIEval LSAT RCSoft ReasoningGemini 1.5 Flash79.281.8
AGIEval LSAT LRSoft ReasoningLlama 2 7b33.734.7
AGIEval LSAT LRSoft ReasoningMistral 7b46.148.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 8b55.758.0
AGIEval LSAT LRSoft ReasoningLlama 3.1 70b83.385.1
AGIEval LSAT LRSoft ReasoningGemma 2 9b65.768.2
AGIEval LSAT LRSoft ReasoningPhi-3 Small 8k64.759.2
AGIEval LSAT LRSoft ReasoningQwen 2 7b54.161.2
AGIEval LSAT LRSoft ReasoningQwen 2 72b77.579.6
AGIEval LSAT LRSoft ReasoningGPT-4o Mini68.464.5
AGIEval LSAT LRSoft ReasoningGemini 1.5 Flash68.672.9
GPQAMathematicalMistral 7b23.025.9
GPQAMathematicalLlama 3.1 8b22.127.2
GPQAMathematicalLlama 3.1 70b24.824.3
GPQAMathematicalGemma 2 9b19.922.3
GPQAMathematicalPhi-3 Small 8k23.922.5
GPQAMathematicalQwen 2 7b23.421.2
GPQAMathematicalQwen 2 72b22.819.9
GPQAMathematicalGPT-4o Mini20.020.0
GPQAMathematicalGemini 1.5 Flash21.924.6
CommonsenseQACommonsenseLlama 2 7b18.219.2
CommonsenseQACommonsenseMistral 7b73.670.4
CommonsenseQACommonsenseLlama 3.1 8b74.076.5
CommonsenseQACommonsenseLlama 3.1 70b84.784.6
CommonsenseQACommonsenseGemma 2 9b81.880.8
CommonsenseQACommonsensePhi-3 Small 8k80.880.4
CommonsenseQACommonsenseQwen 2 7b80.372.9
CommonsenseQACommonsenseQwen 2 72b88.487.8
CommonsenseQACommonsenseGPT-4o Mini84.784.7
CommonsenseQACommonsenseGemini 1.5 Flash81.783.3
AGIEval LSAT ARSoft ReasoningLlama 2 7b19.618.7
AGIEval LSAT ARSoft ReasoningMistral 7b20.922.6
AGIEval LSAT ARSoft ReasoningLlama 3.1 8b24.826.1
AGIEval LSAT ARSoft ReasoningLlama 3.1 70b36.130.9
AGIEval LSAT ARSoft ReasoningGemma 2 9b22.228.7
AGIEval LSAT ARSoft ReasoningPhi-3 Small 8k27.820.0
AGIEval LSAT ARSoft ReasoningQwen 2 7b24.323.0
AGIEval LSAT ARSoft ReasoningQwen 2 72b27.030.0
", + "image_path": "4112dcc85423a261e0150879b292cf256cebcbd7047df4ac82f69de0680d3a93.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 138, + 98, + 472, + 727 + ], + "blocks": [ + { + "bbox": [ + 141, + 78, + 468, + 90 + ], + "lines": [ + { + "bbox": [ + 141, + 78, + 468, + 90 + ], + "spans": [ + { + "bbox": [ + 141, + 78, + 468, + 90 + ], + "type": "text", + "content": "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 98, + 472, + 727 + ], + "lines": [ + { + "bbox": [ + 138, + 98, + 472, + 727 + ], + "spans": [ + { + "bbox": [ + 138, + 98, + 472, + 727 + ], + "type": "table", + "html": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
AGIEval LSAT ARSoft ReasoningGPT-4o Mini28.726.1
AGIEval LSAT ARSoft ReasoningGemini 1.5 Flash28.320.4
MMLUKnowledgeLlama 2 7b49.042.8
MMLUKnowledgeMistral 7b63.057.0
MMLUKnowledgeLlama 3.1 8b71.769.3
MMLUKnowledgeLlama 3.1 70b84.383.7
MMLUKnowledgeGemma 2 9b74.772.4
MMLUKnowledgePhi-3 Small 8k77.375.2
MMLUKnowledgeQwen 2 7b69.968.6
MMLUKnowledgeQwen 2 72b82.781.8
MMLUKnowledgeGPT-4o Mini82.377.8
MMLUKnowledgeGemini 1.5 Flash78.179.0
StrategyQACommonsenseLlama 2 7b57.930.9
StrategyQACommonsenseMistral 7b70.772.0
StrategyQACommonsenseLlama 3.1 8b74.465.8
StrategyQACommonsenseLlama 3.1 70b87.184.2
StrategyQACommonsenseGemma 2 9b77.173.3
StrategyQACommonsensePhi-3 Small 8k75.071.1
StrategyQACommonsenseQwen 2 7b71.958.9
StrategyQACommonsenseQwen 2 72b83.280.1
StrategyQACommonsenseGPT-4o Mini83.086.2
StrategyQACommonsenseGemini 1.5 Flash77.080.3
ContextHub Abductive L2SymbolicLlama 2 7b36.235.0
ContextHub Abductive L2SymbolicMistral 7b33.830.0
ContextHub Abductive L2SymbolicLlama 3.1 8b32.736.1
ContextHub Abductive L2SymbolicLlama 3.1 70b54.651.2
ContextHub Abductive L2SymbolicGemma 2 9b44.833.2
ContextHub Abductive L2SymbolicPhi-3 Small 8k49.834.2
ContextHub Abductive L2SymbolicQwen 2 7b39.635.0
ContextHub Abductive L2SymbolicQwen 2 72b54.734.9
ContextHub Abductive L2SymbolicGPT-4o Mini62.060.0
ContextHub Abductive L2SymbolicGemini 1.5 Flash48.647.8
ContextHub Abductive L1SymbolicLlama 2 7b21.416.7
ContextHub Abductive L1SymbolicMistral 7b23.621.7
ContextHub Abductive L1SymbolicLlama 3.1 8b40.036.1
ContextHub Abductive L1SymbolicLlama 3.1 70b62.258.9
ContextHub Abductive L1SymbolicGemma 2 9b48.959.4
ContextHub Abductive L1SymbolicPhi-3 Small 8k59.256.4
ContextHub Abductive L1SymbolicQwen 2 7b48.638.9
ContextHub Abductive L1SymbolicQwen 2 72b53.356.1
ContextHub Abductive L1SymbolicGPT-4o Mini77.259.2
ContextHub Abductive L1SymbolicGemini 1.5 Flash79.768.6
MuSR Murder MysteriesSoft ReasoningMistral 7b62.056.4
MuSR Murder MysteriesSoft ReasoningLlama 3.1 8b61.661.2
MuSR Murder MysteriesSoft ReasoningLlama 3.1 70b73.268.0
MuSR Murder MysteriesSoft ReasoningGemma 2 9b81.662.0
MuSR Murder MysteriesSoft ReasoningPhi-3 Small 8k62.053.6
MuSR Murder MysteriesSoft ReasoningQwen 2 7b56.055.6
MuSR Murder MysteriesSoft ReasoningQwen 2 72b80.466.0
MuSR Murder MysteriesSoft ReasoningGPT-4o Mini76.069.6
MuSR Murder MysteriesSoft ReasoningGemini 1.5 Flash70.066.4
MuSR Team AllocationsSoft ReasoningMistral 7b42.843.2
MuSR Team AllocationsSoft ReasoningLlama 3.1 8b59.651.6
MuSR Team AllocationsSoft ReasoningLlama 3.1 70b89.263.6
MuSR Team AllocationsSoft ReasoningGemma 2 9b48.445.6
MuSR Team AllocationsSoft ReasoningPhi-3 Small 8k66.046.4
MuSR Team AllocationsSoft ReasoningQwen 2 7b34.040.8
MuSR Team AllocationsSoft ReasoningQwen 2 72b56.066.4
MuSR Team AllocationsSoft ReasoningGPT-4o Mini75.660.0
MuSR Team AllocationsSoft ReasoningGemini 1.5 Flash90.054.4
MMLU ProKnowledgeLlama 2 7b21.520.4
MMLU ProKnowledgeMistral 7b34.826.7
MMLU ProKnowledgeLlama 3.1 8b44.738.0
MMLU ProKnowledgeLlama 3.1 70b64.455.1
MMLU ProKnowledgeGemma 2 9b48.542.4
MMLU ProKnowledgePhi-3 Small 8k54.843.2
MMLU ProKnowledgeQwen 2 7b46.639.0
MMLU ProKnowledgeQwen 2 72b62.551.6
MMLU ProKnowledgeGPT-4o Mini63.045.0
MMLU ProKnowledgeGemini 1.5 Flash59.450.6
MuSR Object PlacementsSoft ReasoningMistral 7b55.541.0
MuSR Object PlacementsSoft ReasoningLlama 3.1 8b66.850.4
MuSR Object PlacementsSoft ReasoningLlama 3.1 70b67.257.4
MuSR Object PlacementsSoft ReasoningGemma 2 9b68.058.2
MuSR Object PlacementsSoft ReasoningPhi-3 Small 8k62.151.6
MuSR Object PlacementsSoft ReasoningQwen 2 7b46.943.8
MuSR Object PlacementsSoft ReasoningQwen 2 72b66.443.0
", + "image_path": "e475ef0b9cb04a84d3ae94a8f5ab9b38aadc1893e814332b167f58731fd384f0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 138, + 98, + 472, + 446 + ], + "blocks": [ + { + "bbox": [ + 142, + 78, + 467, + 89 + ], + "lines": [ + { + "bbox": [ + 142, + 78, + 467, + 89 + ], + "spans": [ + { + "bbox": [ + 142, + 78, + 467, + 89 + ], + "type": "text", + "content": "Table 8: Few-shot accuracy for direct answering and CoT prompts on all datasets" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 98, + 472, + 446 + ], + "lines": [ + { + "bbox": [ + 138, + 98, + 472, + 446 + ], + "spans": [ + { + "bbox": [ + 138, + 98, + 472, + 446 + ], + "type": "table", + "html": "
DatasetTypeModelfew-shot CoT accuracyfew-shot DA accuracy
MuSR Object PlacementsSoft ReasoningGPT-4o Mini67.047.0
MuSR Object PlacementsSoft ReasoningGemini 1.5 Flash73.054.7
ContextHub Deductive L2SymbolicLlama 2 7b34.715.0
ContextHub Deductive L2SymbolicMistral 7b63.851.4
ContextHub Deductive L2SymbolicLlama 3.1 8b76.127.3
ContextHub Deductive L2SymbolicLlama 3.1 70b82.653.6
ContextHub Deductive L2SymbolicGemma 2 9b61.947.6
ContextHub Deductive L2SymbolicPhi-3 Small 8k61.554.0
ContextHub Deductive L2SymbolicQwen 2 7b55.336.4
ContextHub Deductive L2SymbolicQwen 2 72b80.254.0
ContextHub Deductive L2SymbolicGPT-4o Mini59.041.0
ContextHub Deductive L2SymbolicGemini 1.5 Flash90.242.5
ContextHub Deductive L1SymbolicLlama 2 7b34.716.0
ContextHub Deductive L1SymbolicMistral 7b46.259.2
ContextHub Deductive L1SymbolicLlama 3.1 8b73.023.0
ContextHub Deductive L1SymbolicLlama 3.1 70b67.550.0
ContextHub Deductive L1SymbolicGemma 2 9b66.045.7
ContextHub Deductive L1SymbolicPhi-3 Small 8k74.851.8
ContextHub Deductive L1SymbolicQwen 2 7b58.837.5
ContextHub Deductive L1SymbolicQwen 2 72b70.742.8
ContextHub Deductive L1SymbolicGPT-4o Mini59.244.3
ContextHub Deductive L1SymbolicGemini 1.5 Flash89.349.8
MATHMathematicalLlama 2 7b4.73.9
MATHMathematicalMistral 7b13.77.1
MATHMathematicalLlama 3.1 8b41.214.2
MATHMathematicalLlama 3.1 70b61.924.2
MATHMathematicalGemma 2 9b47.519.8
MATHMathematicalPhi-3 Small 8k42.418.9
MATHMathematicalQwen 2 7b55.015.0
MATHMathematicalQwen 2 72b65.326.2
MATHMathematicalGPT-4o Mini71.724.6
MATHMathematicalGemini 1.5 Flash54.732.3
GSM8KMathematicalLlama 2 7b29.07.7
GSM8KMathematicalMistral 7b56.212.5
GSM8KMathematicalLlama 3.1 8b86.420.1
GSM8KMathematicalLlama 3.1 70b96.139.1
GSM8KMathematicalGemma 2 9b89.224.9
GSM8KMathematicalPhi-3 Small 8k90.424.5
GSM8KMathematicalQwen 2 7b87.621.4
GSM8KMathematicalQwen 2 72b93.240.6
GSM8KMathematicalGPT-4o Mini94.232.8
GSM8KMathematicalGemini 1.5 Flash90.640.4
", + "image_path": "f1ee595f2bae3b379ed01eb3e261d1974f2de31226bb5ac0646fe5244a263ac2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 475, + 397, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 475, + 397, + 486 + ], + "spans": [ + { + "bbox": [ + 107, + 475, + 397, + 486 + ], + "type": "text", + "content": "F.3 ANSWER EXTRACTOR AND AVERAGE ANSWER SPAN RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 496, + 504, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 496, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 107, + 496, + 504, + 551 + ], + "type": "text", + "content": "In this section, we report the number of generations from each model on each dataset that our answer parser could not extract. \"1\" denotes that a model was not run on a certain dataset due to context length limitations in the few-shot setting. We see that these unparseable rates are generally low across the board. The weakest models struggle on some of the most challenging datasets, but unparseable rates are all at or below " + }, + { + "bbox": [ + 107, + 496, + 504, + 551 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 107, + 496, + 504, + 551 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 557, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 557, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 107, + 557, + 504, + 601 + ], + "type": "text", + "content": "We also report the average character index of the beginning of the answer span that the answer parser extracted. Of particular note is that the direct answer prompts all return an answer within the first 60 characters, indicating that the answers are returned almost immediately, as desired. CoT completions are much longer." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 618, + 315, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 618, + 315, + 629 + ], + "spans": [ + { + "bbox": [ + 107, + 618, + 315, + 629 + ], + "type": "text", + "content": "G ZOOM-IN: MMLU AND MMLU PRO" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 643, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 643, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 643, + 504, + 731 + ], + "type": "text", + "content": "MMLU and MMLU Pro show gains from adding CoT, but because these datasets are so broad, they defy simple characterization. We explore the performance of CoT on each category of MMLU to understand divergences in CoT performance between these domains. We list the top three categories where CoT gives the largest error reduction for Llama 3.1 8B and 70B on MMLU and MMLU Pro in Table 17. Some of these categories are explicitly mathematical in nature, as we might expect from Figure 3. We can also see that CoT is helping on categories like \"business\"; upon closer inspection, we found that these categories frequently involve math as well (e.g., business questions may involve computations surrounding wealth). We need to more carefully characterize MMLU at the instance" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 752, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 752, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 752, + 310, + 760 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 89, + 504, + 160 + ], + "blocks": [ + { + "bbox": [ + 173, + 78, + 438, + 87 + ], + "lines": [ + { + "bbox": [ + 173, + 78, + 438, + 87 + ], + "spans": [ + { + "bbox": [ + 173, + 78, + 438, + 87 + ], + "type": "text", + "content": "CoT vs direct answer prompting in zero-shot setting (sorted by CoT delta)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 116, + 89, + 504, + 160 + ], + "lines": [ + { + "bbox": [ + 116, + 89, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 116, + 89, + 504, + 160 + ], + "type": "image", + "image_path": "4555fd6feb79a2952a72299156e54e6ce602be5defc12ec9511a3ee2a0640053.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 116, + 163, + 504, + 227 + ], + "blocks": [ + { + "bbox": [ + 116, + 163, + 504, + 227 + ], + "lines": [ + { + "bbox": [ + 116, + 163, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 116, + 163, + 504, + 227 + ], + "type": "image", + "image_path": "23092bff0590411ea6a3a6a3b3cc309010f03fcc52eb0dd86e63bde56fae586a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 116, + 232, + 504, + 294 + ], + "blocks": [ + { + "bbox": [ + 116, + 232, + 504, + 294 + ], + "lines": [ + { + "bbox": [ + 116, + 232, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 116, + 232, + 504, + 294 + ], + "type": "image", + "image_path": "7ee7c9def64df0116c3d3d3fd0d7f821a92277a84d73722bb4a98810297129e1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 116, + 298, + 504, + 361 + ], + "blocks": [ + { + "bbox": [ + 116, + 298, + 504, + 361 + ], + "lines": [ + { + "bbox": [ + 116, + 298, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 116, + 298, + 504, + 361 + ], + "type": "image", + "image_path": "caedee3172006eff41acc4139009516b0772877a4d107fc766aa03a806257c3b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 116, + 365, + 504, + 428 + ], + "blocks": [ + { + "bbox": [ + 116, + 365, + 504, + 428 + ], + "lines": [ + { + "bbox": [ + 116, + 365, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 116, + 365, + 504, + 428 + ], + "type": "image", + "image_path": "2dd6a070b54dea85cca67c15249312175b3c234a4d8465f0ff509f7b9bc66f03.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 116, + 432, + 504, + 495 + ], + "blocks": [ + { + "bbox": [ + 116, + 432, + 504, + 495 + ], + "lines": [ + { + "bbox": [ + 116, + 432, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 116, + 432, + 504, + 495 + ], + "type": "image", + "image_path": "b1ceea31a35477784ca2848751bf8f055fef4bf34de33f6883042939667571be.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 116, + 499, + 504, + 563 + ], + "blocks": [ + { + "bbox": [ + 116, + 499, + 504, + 563 + ], + "lines": [ + { + "bbox": [ + 116, + 499, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 116, + 499, + 504, + 563 + ], + "type": "image", + "image_path": "52f19834dc472e800244344b94f45e6ba78b8e1ce6e5d6f1e74a5077acb42011.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 116, + 567, + 504, + 629 + ], + "blocks": [ + { + "bbox": [ + 116, + 567, + 504, + 629 + ], + "lines": [ + { + "bbox": [ + 116, + 567, + 504, + 629 + ], + "spans": [ + { + "bbox": [ + 116, + 567, + 504, + 629 + ], + "type": "image", + "image_path": "1efd9008ea291cbd03a58815d2537ab80fd48a1a8a742e1048e4fc078b52607b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 116, + 633, + 504, + 694 + ], + "blocks": [ + { + "bbox": [ + 116, + 633, + 504, + 694 + ], + "lines": [ + { + "bbox": [ + 116, + 633, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 116, + 633, + 504, + 694 + ], + "type": "image", + "image_path": "b3a9ab83cd4738f25c01c3b5a4c3ac93767c31780316f7283e4097b8757c2499.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 694, + 504, + 728 + ], + "lines": [ + { + "bbox": [ + 104, + 694, + 504, + 728 + ], + "spans": [ + { + "bbox": [ + 104, + 694, + 504, + 728 + ], + "type": "text", + "content": "Figure 9: Performance of zero-shot direct (blue) and zero-shot CoT (orange) across datasets and models. Graphs are sorted in ascending order by median delta (CoT, direct). The datasets benefiting substantially are all symbolic or semi-symbolic in nature." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 174, + 504, + 350 + ], + "blocks": [ + { + "bbox": [ + 105, + 132, + 504, + 164 + ], + "lines": [ + { + "bbox": [ + 105, + 132, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 132, + 504, + 164 + ], + "type": "text", + "content": "Table 9: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above " + }, + { + "bbox": [ + 105, + 132, + 504, + 164 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 105, + 132, + 504, + 164 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 174, + 504, + 350 + ], + "lines": [ + { + "bbox": [ + 106, + 174, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 504, + 350 + ], + "type": "table", + "html": "
Zero-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMeLaLima 2-7bMauu1/0MeLaLima 3-1-9bMeLaLima 3-1-7bGamma 2-9bPru-5 Small 8kQeu-2-7bQeu-2-7bGpr-ho MthGpr-loClauh-3-HouClauh-5-SomertGemini-1.5 Path
CommonsenseQA1.92.51.10.00.80.11.60.70.00.00.10.00.2
StrategyQA0.01.90.10.011.70.54.92.70.00.00.00.00.2
SiQA0.26.60.00.13.90.30.13.00.10.10.00.00.4
PiQA0.46.00.00.13.32.10.05.50.20.00.10.00.9
Winogrande0.03.00.10.02.10.25.10.40.00.00.00.03.6
Arc Easy0.01.80.50.00.00.29.10.73.50.40.20.03.2
Arc Challenge0.02.31.00.00.30.710.70.710.00.70.00.05.0
AGIeval LSAT LR0.40.00.00.00.00.20.00.00.02.50.00.00.2
AGIeval LSAT AR0.40.00.00.04.33.90.00.00.08.70.00.00.0
AGIeval LSAT RC0.40.40.00.00.00.00.00.00.09.70.00.00.4
ContextHub Deductive L10.00.00.00.01.20.02.30.00.00.00.20.00.2
ContextHub Deductive L20.00.00.00.00.00.02.21.00.00.02.80.00.0
ContextHub Abductive L10.00.00.00.00.00.30.00.00.00.00.00.00.0
ContextHub Abductive L20.00.00.00.00.00.11.50.20.00.00.80.00.0
MuSR Murder Mysteries0.00.00.00.00.00.00.00.00.00.00.00.00.0
MuSR Team Allocations0.00.00.00.03.60.00.00.00.00.00.08.40.4
MuSR Object Placements0.00.00.00.00.00.00.00.00.00.00.00.00.0
MMLU0.10.00.00.00.10.23.61.20.60.01.30.30.2
MMLU Pro0.71.31.00.31.03.76.812.20.40.30.30.40.6
GPQA1.37.10.00.08.712.75.415.20.00.01.60.00.7
MATH0.66.90.30.20.10.13.53.00.80.00.30.00.4
GSM8k0.24.12.50.02.70.01.70.20.00.012.75.50.0
BigGen Bench4.60.30.90.10.51.01.31.01.30.00.00.10.4
GSM8k-Hard4.87.62.00.40.40.23.21.10.10.55.20.50.2
MuSiQue0.10.00.00.00.00.10.00.00.00.00.00.20.1
Folio4.40.00.00.00.00.03.90.00.012.30.00.00.5
BigBench-Hard0.00.00.07.40.00.20.00.00.00.30.04.512.8
", + "image_path": "702ab985934a532ba974cf9b014e4583b0a5e45b3f3849ee36e39ad0e482ce6d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 503, + 504, + 677 + ], + "blocks": [ + { + "bbox": [ + 105, + 460, + 504, + 492 + ], + "lines": [ + { + "bbox": [ + 105, + 460, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 504, + 492 + ], + "type": "text", + "content": "Table 10: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the zero-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above " + }, + { + "bbox": [ + 105, + 460, + 504, + 492 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 105, + 460, + 504, + 492 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 503, + 504, + 677 + ], + "lines": [ + { + "bbox": [ + 106, + 503, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 106, + 503, + 504, + 677 + ], + "type": "table", + "html": "
datasetZero-shot CoT Unparseable Answer Rate by Percentage
Meta-Lima 2-TbMeta-10Meta-Lima 3.1-80Meta-Lima 3.1-70bGensim 19bPir-3-Small6kQwen-2-TbQwen-2-TbGPT-4MoGPT-4oCiaJa-3-HuluCiaJa-5-SonarGeminl 1.5 Plan
CommonsenseQA2.91.38.60.00.60.10.00.01.60.00.20.32.4
StrategyQA1.00.11.10.80.30.40.30.00.00.00.02.14.4
SiQA0.81.80.30.11.60.00.10.10.00.00.30.13.5
PiQA1.61.60.20.12.80.30.50.30.00.01.40.34.6
Winogrande0.91.40.20.20.90.40.30.00.00.00.02.03.4
Arc Easy0.20.40.20.00.51.61.60.00.50.00.00.00.4
Arc Challenge0.00.70.00.00.00.00.70.00.00.00.00.00.7
AGIEval LSAT LR3.32.20.00.01.20.02.00.00.00.00.00.00.2
AGIEval LSAT AR4.87.06.12.25.75.24.30.41.31.30.00.41.7
AGIEval LSAT RC7.11.10.00.00.73.06.70.00.00.00.00.00.4
ContextHub Deductive L10.71.30.00.00.00.00.00.00.00.00.00.00.3
ContextHub Deductive L20.20.40.00.00.00.00.00.00.00.00.00.00.4
ContextHub Abductive L10.60.00.00.00.00.00.00.00.00.00.00.00.0
ContextHub Abductive L20.00.20.10.00.00.30.00.00.00.00.00.00.4
MuSR Murder Mysteries0.00.40.00.00.011.60.40.00.00.00.06.83.6
MuSR Team Allocations5.23.20.80.00.80.40.40.00.00.00.00.00.0
MuSR Object Placements0.01.60.00.00.40.80.00.00.00.00.02.00.4
MMLU1.90.61.00.21.51.00.40.20.00.10.03.13.2
MMLU Pro4.45.413.13.312.53.65.42.02.41.90.45.04.4
GPQA4.510.39.41.68.51.83.80.70.00.00.011.815.0
MATH1.65.58.22.52.31.63.00.40.40.50.91.71.0
GSM8k1.71.40.710.50.40.60.40.00.00.00.30.00.1
BigGen Bench5.00.40.50.10.50.40.39.50.00.00.00.10.4
GSM8k-Hard2.18.710.24.510.73.23.51.00.80.53.01.82.7
MuSiQue1.40.08.30.10.00.00.00.00.00.00.00.73.1
Folio0.00.01.50.00.00.00.00.00.00.00.02.01.5
BigBench-Hard3.85.41.80.41.30.10.40.30.00.00.01.20.9
", + "image_path": "a7e3511c7b1a049d571584971e34c53b6185e7d89bd7342771ac75f8702f7813.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 182, + 504, + 342 + ], + "blocks": [ + { + "bbox": [ + 105, + 138, + 504, + 172 + ], + "lines": [ + { + "bbox": [ + 105, + 138, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 138, + 504, + 172 + ], + "type": "text", + "content": "Table 11: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot direct answer setting. Prompt modifications were made to decrease these numbers. No model is above " + }, + { + "bbox": [ + 105, + 138, + 504, + 172 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 105, + 138, + 504, + 172 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 182, + 504, + 342 + ], + "lines": [ + { + "bbox": [ + 107, + 182, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 107, + 182, + 504, + 342 + ], + "type": "table", + "html": "
Few-shot Direct Answer Unparseable Answer Rate by Percentage
datasetMetaLlama 2-7bMistral 7bMetaLlama 3-1-8bMetaLlama 3-1-7bGamma 2-9bPhi-3 Small 8kQwen 2-7bQwen 2-7bGPT4o MiniGemini 1.5 Flash
CommonsenseQA0.00.10.20.01.30.99.91.30.00.6
AGIEval LSAT LR6.70.00.00.00.00.00.00.00.00.2
AGIEval LSAT AR2.60.00.00.03.55.20.00.00.00.0
AGIEval LSAT RC0.00.00.00.00.00.00.00.00.00.0
ContextHub Deductive L10.02.80.00.00.010.70.30.00.00.0
ContextHub Deductive L20.00.10.00.00.00.30.20.00.00.0
ContextHub Abductive L10.02.80.00.00.00.60.00.00.00.0
ContextHub Abductive L20.02.00.00.00.00.00.00.00.00.0
MuSR Murder Mysteries-1.00.00.00.00.00.00.00.00.00.4
MuSR Team Allocations-1.00.00.00.00.00.00.00.00.00.0
MuSR Object Placements-1.00.00.00.00.41.20.00.00.00.0
MMLU4.20.20.00.00.10.00.40.10.00.2
MMLU Pro5.11.22.40.31.09.10.52.60.40.5
GPQA-1.01.30.00.03.67.413.41.10.00.0
MATH0.35.90.30.20.10.11.62.20.00.3
GSM8k0.10.10.50.00.12.20.00.20.00.0
", + "image_path": "ad7d6883efbe38b1043ded572f7e2f2cf8102fdecfbbf57fcc849c6bc0e1feb9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 509, + 504, + 670 + ], + "blocks": [ + { + "bbox": [ + 104, + 467, + 504, + 500 + ], + "lines": [ + { + "bbox": [ + 104, + 467, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 504, + 500 + ], + "type": "text", + "content": "Table 12: Percentage of responses per dataset per model that our answer parser could not extract an answer for in the few-shot CoT setting. Prompt modifications were made to decrease these numbers. No model is above " + }, + { + "bbox": [ + 104, + 467, + 504, + 500 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 104, + 467, + 504, + 500 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 509, + 504, + 670 + ], + "lines": [ + { + "bbox": [ + 107, + 509, + 504, + 670 + ], + "spans": [ + { + "bbox": [ + 107, + 509, + 504, + 670 + ], + "type": "table", + "html": "
Few-shot CoT Unparseable Answer Rate by Percentage
datasetMetaLlama2-7bMetaLlama3-18bMetaLlama3-178bMetaLlama3-178bGamma2-9bPhi5Small8cQwen2-7bQwen2-7bGPT40MiniGamin1.5Flush
CommonsenseQA0.70.91.80.10.20.10.00.00.03.4
AGIEval LSAT LR0.60.80.40.01.43.10.80.00.00.6
AGIEval LSAT AR2.29.13.90.911.73.03.51.70.01.3
AGIEval LSAT RC7.85.90.00.01.99.32.60.00.02.2
ContextHub Deductive L10.20.00.20.00.00.00.00.00.00.3
ContextHub Deductive L20.90.00.20.00.00.00.00.00.00.3
ContextHub Abductive L10.80.00.00.00.00.00.00.00.00.0
ContextHub Abductive L23.10.05.30.10.00.20.00.00.00.7
MuSR Murder Mysteries-1.01.20.00.00.40.80.00.00.014.0
MuSR Team Allocations-1.02.40.00.00.00.00.80.00.00.4
MuSR Object Placements-1.00.40.00.01.20.40.00.00.00.0
MMLU0.60.81.10.21.50.70.30.20.22.5
MMLU Pro0.61.98.52.114.11.81.90.81.13.9
GPQA-1.012.110.30.912.96.05.63.30.013.6
MATH1.56.88.22.411.12.62.91.10.51.8
GSM8k0.81.31.00.10.50.50.10.00.10.1
", + "image_path": "a6319c7cc10d8b1be07d409b3c70c34b2c8566c8c9137a1231562af89e58c526.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 190, + 504, + 357 + ], + "blocks": [ + { + "bbox": [ + 104, + 125, + 504, + 181 + ], + "lines": [ + { + "bbox": [ + 104, + 125, + 504, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 504, + 181 + ], + "type": "text", + "content": "Table 13: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 190, + 504, + 357 + ], + "lines": [ + { + "bbox": [ + 106, + 190, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 190, + 504, + 357 + ], + "type": "table", + "html": "
Zero-shot Direct Answer Span Location By Character Index
datasetMeta-Liuna 2-7bMetaLiuHa 3-18bMetaLiuHa 3-17bMetaLiuHa 3-18bGamma 2-9bPhi-3 Small 8kOpen-2-7bOpen-2-7bGPT-4a-MiniGPT-40Clude-3 HakuClude-3.5 SorenGeminl-1.5 Pals
CommonsenseQA98278881088107788
StrategyQA444527444444464488424187
SiQA888888298886688
PiQA788888258884588
Winogrande89888898895488
Arc Easy98888898887788
Arc Challenge88888898887788
AGIEval LSAT LR2524242424242524432125252626
AGIEval LSAT AR2524242424242624482325252627
AGIEval LSAT RC2524242424242524311825252625
ContextHub Deductive L11919191920191919191920201920
ContextHub Deductive L21919191919191919191920201919
ContextHub Abductive L11919191920191919191920201919
ContextHub Abductive L21919191920191919191920201919
MuSR Minder Mysteries882788888886488
MuSR Team Allocations272219192723262288302088
MuSR Object Placements882788888887688
MMLU1918191920181818191919191920
MMLU Pro2019381921191920191920201919
GPQA1919191921191919191920201920
MATH3031282830303333282831292828
GSM8k2229302828372428282829282828
GSM8k-Hard95711219134020788888
Folio3988888311381656870
BigBench-Hard3922252126322926281928281016
", + "image_path": "c4e923459f6217aa6b13c4e35c6231eb859eeb4e0beb0ba546cb86c3e215e28b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 518, + 504, + 684 + ], + "blocks": [ + { + "bbox": [ + 104, + 453, + 504, + 510 + ], + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 510 + ], + "type": "text", + "content": "Table 14: Average character index of where the answer span begins in a generated response for each dataset and model pair for the zero-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 518, + 504, + 684 + ], + "lines": [ + { + "bbox": [ + 106, + 518, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 504, + 684 + ], + "type": "table", + "html": "
Zero-shot CoT Answer Span Location By Character Index
datasetMeiLima 27bMeiLima 76MeiLima 3.1 8bMeiLima 3.1 70bGenma 29bPrl-3 Small 8kQues 27bQues 27bGPT-hMiaGPT-4Cluide-3 HabiCluide-5 SorenGenim 1.5 PlixB
CommonsenseQA441564845123723646657734189910866261103214165
StrategyQA726434996113126746036335869210337541158256195
SiQA56942384196523552847242084710946021016196169
PiQA6994558699142075324473646839355781092200150
Winogrande377324645694187326391298634750408889200173
Arc Easy6845811154131936761053435599012397891222340231
Arc Challenge76364411781316422596571387102012698281240372267
AGIEval LSAT LR205313241163167552468915607689499981561728906886
AGIEval LSAT AR1377179114222182712102718191264123011511202849817871
AGIEval LSAT RC1977103211031575739590117066097310791628786703709
ContextHub Deductive L1694368759711383327539402540580542556320254
ContextHub Deductive L28424721095990614442789585840758777655515503
ContextHub Abductive L1577461747879464440754638788879683594368325
ContextHub Abductive L28616001270122968657197685611151113894894601551
MuSR Murder Mysteries4951592195818471210124612411718196119651671175913491213
MuSR Team Allocations12121845229423101513143320212213256226981479185615961607
MuSR Object Placements917625135412666956419048191593153612101455616429
MMLU834512663622503277497407400461447409630413
MMLU Pro1371513788716640518954699926940590653660774
GPQA10347789179018065001018628541666486472981735
MATH7421118122211797486701189114511251153677675679698
GSM8k57263783471945352170964510481035708680541437
GSM8k-Hard916939102710695557661083105313501266594815605512
Folio72476514791379733668919488128515839071194934492
BigBench-Hard596230876861429349315443877973545863455346
", + "image_path": "989af41e1c80832f033f078ff46f63f0b983872addc5ffc5c0c7e90bfea2e9d1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 193, + 504, + 354 + ], + "blocks": [ + { + "bbox": [ + 104, + 127, + 504, + 183 + ], + "lines": [ + { + "bbox": [ + 104, + 127, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 104, + 127, + 504, + 183 + ], + "type": "text", + "content": "Table 15: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot direct answer setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 193, + 504, + 354 + ], + "lines": [ + { + "bbox": [ + 107, + 193, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 107, + 193, + 504, + 354 + ], + "type": "table", + "html": "
Few-shot Direct Answer Span Location By Character Index
datasetMetaLlama 2.7bMistral 7bMetaLlama 3.1.8bMetaLlama 3.1.70bGemma 2.9bPhi-3 Small BkQwen 2.7bQwen 2.72bGPT4o MiniGemini 1.5 Flash
CommonsenseQA8782788810888
AGIEval LSAT LR25242424242424243124
AGIEval LSAT AR25242424242424242724
AGIEval LSAT RC25242424242424242524
ContextHub Deductive L119191919191919191919
ContextHub Deductive L219191919191919191919
ContextHub Abductive L119191919191919191919
ContextHub Abductive L219191919191919191919
MuSR Murder Mysteries-18278888888
MuSR Team Allocations-12119192721272388
MuSR Object Placements-18278888888
MMLU19181919191818181919
MMLU Pro19193819202019191919
GPQA-1191919191919191919
MATH29362929283030412828
GSM8k22232322222322242728
", + "image_path": "f041762e1b1b8186aa50ecc657eda52ca9c55269424cae98517cd3081ef1ef11.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 521, + 504, + 681 + ], + "blocks": [ + { + "bbox": [ + 104, + 456, + 504, + 511 + ], + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 511 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 511 + ], + "type": "text", + "content": "Table 16: Average character index of where the answer span begins in a generated response for each dataset and model pair for the few-shot CoT setting. We use these numbers as a proxy for the model following instructions (i.e. generating reasoning before an answer). Prompt modifications were made to ensure CoT prompts resulted in longer generations and direct answer prompts led to short generations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 521, + 504, + 681 + ], + "lines": [ + { + "bbox": [ + 107, + 521, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 521, + 504, + 681 + ], + "type": "table", + "html": "
Few-shot CoT Answer Span Location By Character Index
datasetMetaLima 27bMetaLima 7bMetaLima 3.186MetaLima 3.170bGamma 2.98Phi-3 Small BkQwen 2.78Qwen 2.77bGPT-4o MiniGemini 1.5 Flash
CommonsenseQA301195470921145192280174219158
AGIEval LSAT LR1037510464539437359530599894523
AGIEval LSAT AR1024124788676857310257508351033670
AGIEval LSAT RC7993781312061641112412051086266
ContextHub Deductive L1383386406376359376388364416366
ContextHub Deductive L2736767829822823855612807884809
ContextHub Abductive L1301386428450431413541447575379
ContextHub Abductive L2709586967754804784829821905815
MuSR Murder Mysteries-1128016931702122513381246171919741419
MuSR Team Allocations-1219520872160162817552181215626321841
MuSR Object Placements-1907110412137069196769631351853
MMLU282266333245265260267243392218
MMLU Pro429397424411516425541325681396
GPQA-1848782774615711662703670594
MATH63070558464074752910748481261553
GSM8k374332352352398372415341651314
", + "image_path": "eb5531d446ad6066758c182b7a8d1196c7c3548d2b180a7ba0af4c2d752f268a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 133, + 504, + 213 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 506, + 125 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 506, + 125 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 506, + 125 + ], + "type": "text", + "content": "Table 17: The top 3 slices benefiting the most from CoT across MMLU and MMLU Pro for Llama 3.1 8b and 70b. 6 out of 12 of these top slices directly contain \"math\" or \"mathematics.\" We dive deeper into each category subsequently and observe that the questions leading to improvements in the other categories are mathematical in nature as well." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 133, + 504, + 213 + ], + "lines": [ + { + "bbox": [ + 107, + 133, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 107, + 133, + 504, + 213 + ], + "type": "table", + "html": "
MMLUMMLU Pro
ModelSubjectDirect (%)CoT (%)Err. Red. (%)NSubjectDirect (%)CoT (%)Err. Red. (%)N
Llama 3.1 8belementary mathematics46.888.478.1378math23.644.827.81350
Llama 3.1 8bhigh_school mathematics39.671.552.8270business29.445.623.0789
Llama 3.1 8bmiscellaneous83.989.937.3783physics27.941.418.81299
Llama 3.1 70belementary mathematics82.394.770.1378math44.568.342.91351
Llama 3.1 70bmedical_genetics93.097.057.1100business44.067.842.5789
Llama 3.1 70bhigh_school mathematics61.582.253.8270chemistry40.564.039.61132
", + "image_path": "bd6f800abf29621ab441123c4bfa60d355e63a3ff1e1385c2ba36722b9a5b404.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 504, + 258 + ], + "type": "text", + "content": "level. In doing so, we can test our hypotheses with much finer granularity than possible by relying on subjective groupings into tasks and categories." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": "Breakdown by the presence of equations We aim to design an instance-level classifier to determine if CoT is expected to help on a question or not. That is, we want a function " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "g: \\mathbf{q} \\to \\{0,1\\}" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "g(\\mathbf{q})" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": " returns 1 if " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\text{extract}(\\tilde{\\mathbf{y}}_{\\text{cot}}) = \\mathbf{y}^*" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\text{extract}(\\tilde{\\mathbf{y}}_{da}) \\neq \\mathbf{y}^*" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\mathbf{y}^*" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": " is the gold answer to " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": ". We explored different forms of " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": "; however, we ultimately found it most effective to use a classifier " + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "inline_equation", + "content": "g: (\\mathbf{q}, \\tilde{\\mathbf{y}}_{\\text{cot}}) \\to \\{0,1\\}" + }, + { + "bbox": [ + 104, + 269, + 504, + 346 + ], + "type": "text", + "content": " which also consults the chain-of-thought produced by the model. This allows us to featurize how the LM solves the problem, particularly whether it uses symbolic reasoning or not." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "text", + "content": "We find that " + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "text", + "content": " can be implemented with a single feature: does " + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{y}}_{\\mathrm{cot}}" + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "text", + "content": " contain a “=”? The “=” token very strongly indicates the presence of equations in the problem or its solution, which turn out to be a strong hallmark of symbolic reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 390, + 505, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 505, + 479 + ], + "type": "text", + "content": "We plot the overall CoT delta (performance of CoT minus the performance of direct answer) for both MMLU and MMLU Pro across multiple models between two bins according to this classifier " + }, + { + "bbox": [ + 104, + 390, + 505, + 479 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 390, + 505, + 479 + ], + "type": "text", + "content": ", labeled as \"With =\" and \"Without =\", in Figure 4. We also report the amount of performance gain explained by questions having an \" ==\" vs. not in Appendix G.1. We find that the majority of the performance gain from CoT on MMLU and MMLU Pro comes from questions that have an \" ==\" in the question or generated responses. Because \" ==\" are usually found in math problems, we equate this to CoT primarily benefiting MMLU and MMLU Pro on the math-related questions with very little to no gain (depending on the model) for non-math questions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 492, + 400, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 400, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 400, + 504 + ], + "type": "text", + "content": "G.1 PERFORMANCE IMPACTS OF “=” ON MMLU AND MMLU PRO" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 513, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 513, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 504, + 536 + ], + "type": "text", + "content": "Tables 18 and 19 show the amount of total improvement from using CoT over direct prompting that can be explained by the presence of “=” on MMLU and MMLU Pro over multiple models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 552, + 477, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 477, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 477, + 564 + ], + "type": "text", + "content": "H FULL RESULTS OF EVALUATIONS ON FORMAL REASONING DATASETS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "content": "As discussed in Section 5, we include detailed evaluation results of few-shot direct answer, few-shot CoT, direct answer solver, CoT solver, and tool-augmented prompting in Table 20. The unparseable rate stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except tool-augmented prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 659, + 504, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 695 + ], + "type": "text", + "content": "We note that all models have a low unparseable rate " + }, + { + "bbox": [ + 104, + 659, + 504, + 695 + ], + "type": "inline_equation", + "content": "(< 10\\%)" + }, + { + "bbox": [ + 104, + 659, + 504, + 695 + ], + "type": "text", + "content": " for all methods except tool-augmented prompting. By manually inspecting the outputs, we observe that the high unparseable rate for some models with tool-augmented prompting is caused by these models generating Python programs or" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "text", + "content": "6We explored implementing " + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "text", + "content": " with a logistic regression classifier with tfidf features over the " + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "inline_equation", + "content": "(\\mathbf{q},\\tilde{\\mathbf{y}}_{\\mathrm{cot}})" + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "text", + "content": " pairs, trained over a subset of the data from MMLU and MMLU Pro. This classifier actually allowed us to discover the “=” feature, but its accuracy did not exceed the accuracy of that single feature." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 177, + 504, + 335 + ], + "blocks": [ + { + "bbox": [ + 105, + 145, + 504, + 167 + ], + "lines": [ + { + "bbox": [ + 105, + 145, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 504, + 167 + ], + "type": "text", + "content": "Table 18: Total CoT deltas on MMLU broken down by the total gain from questions and responses with an “=” vs. without an “=”" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 177, + 504, + 335 + ], + "lines": [ + { + "bbox": [ + 107, + 177, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 107, + 177, + 504, + 335 + ], + "type": "table", + "html": "
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b6.00.65.49.8%10.9%
Mistral 7b4.11.22.928.6%9.8%
Llama 3.1 8b5.52.92.652.9%9.6%
Llama 3.1 70b1.91.80.194.0%10.6%
Gemma 2 9b2.62.00.678.5%10.0%
Phi-3 Small 8k3.11.51.747.4%8.3%
Qwen 2 7b2.53.0-0.5100.0%9.8%
Qwen 2 72b3.52.41.167.8%9.6%
GPT-4o Mini5.23.51.766.9%10.5%
GPT-4o4.22.41.857.6%10.3%
Claude-3 Haiku3.72.41.364.4%9.3%
Claude-3.5 Sonnet3.22.30.972.1%10.7%
Gemini 1.5 Flash3.01.71.259.0%10.1%
Gemini 1.5 Pro1.91.00.951.9%9.6%
", + "image_path": "6febf3ae283fbde75099e8e87cdca9a4231bae7768d586d6e567998e8662d802.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 505, + 504, + 663 + ], + "blocks": [ + { + "bbox": [ + 105, + 473, + 504, + 495 + ], + "lines": [ + { + "bbox": [ + 105, + 473, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 504, + 495 + ], + "type": "text", + "content": "Table 19: Total CoT deltas on MMLU Pro broken down by the total gain from questions and responses with an “=” vs. without an “=”" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 505, + 504, + 663 + ], + "lines": [ + { + "bbox": [ + 107, + 505, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 107, + 505, + 504, + 663 + ], + "type": "table", + "html": "
ModelTotal CoT DeltaCoT delta w/ =CoT delta w/o =Perf. Gain w/ =Fraction of N w/ =
Llama 2 7b1.61.30.379.6%43.6%
Mistral 7b3.81.91.950.7%41.8%
Llama 3.1 8b12.410.02.480.8%35.2%
Llama 3.1 70b11.411.10.397.6%39.6%
Gemma 2 9b7.67.40.297.9%40.2%
Phi-3 Small 8k11.69.91.785.7%42.7%
Qwen 2 7b10.08.91.188.6%41.6%
Qwen 2 72b19.016.12.984.7%41.4%
GPT-4o Mini20.618.42.389.0%44.0%
GPT-4o17.717.10.696.7%44.1%
Claude-3 Haiku8.77.80.990.1%42.0%
Claude-3.5 Sonnet16.214.81.391.9%43.4%
Gemini 1.5 Flash12.911.81.191.3%42.3%
Gemini 1.5 Pro10.08.61.485.7%41.8%
", + "image_path": "138c71e9121096b8b253e0e24407f383622f4841a8cbdaa5f8eea499d47f6c07.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 157 + ], + "type": "text", + "content": "Table 20: Performance and unparseable rates for few-shot direct answer, few-shot CoT, Plan + Direct Solver, Plan + CoT Solver, and Plan + Tool Solver Solver. \"Acc.\" stands for accuracy and \"% Unp.\" stands for the rate of unparseable model responses that either fail to pass our answer extraction parser (for all methods except Plan + Tool Solver prompting) or fail to be executed by symbolic solvers. For FOLIO and ContextHub, we compute the accuracy by making a random guess for the unparseable responses; for GSM8K and GSM8K-Hard, we consider the unparseable responses as incorrect." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 110, + 166, + 501, + 395 + ], + "blocks": [ + { + "bbox": [ + 110, + 166, + 501, + 395 + ], + "lines": [ + { + "bbox": [ + 110, + 166, + 501, + 395 + ], + "spans": [ + { + "bbox": [ + 110, + 166, + 501, + 395 + ], + "type": "table", + "html": "
DatasetMethodMistral 7bLlama 3.1 8bLlama 3.1 70bGPT-4o Mini
Acc.% Unp.Acc.% Unp.Acc.% Unp.Acc.% Unp.
GSM8KDirect Answer12.50.120.10.539.10.032.80.0
GSM8KCoT56.21.486.41.096.10.194.20.1
GSM8KPlan + CoT Solver45.01.078.70.494.70.092.00.1
GSM8KPlan + Direct Solver10.60.119.60.142.20.039.30.0
GSM8KPlan + Tool Solver59.88.680.31.394.40.490.51.5
GSM8K-HardDirect Answer2.90.74.40.612.80.712.37.6
GSM8K-HardCoT20.35.032.49.647.84.452.20.5
GSM8K-HardPlan + CoT Solver18.72.632.41.349.70.651.50.3
GSM8K-HardPlan + Direct Solver3.00.55.50.815.80.117.40.3
GSM8K-HardPlan + Tool Solver44.28.957.91.268.00.570.41.4
ContextHub Deductive L1Direct Answer59.22.823.00.050.00.044.30.0
ContextHub Deductive L1CoT46.20.273.00.267.50.059.20.0
ContextHub Deductive L1Plan + CoT Solver49.50.064.80.065.50.063.20.0
ContextHub Deductive L1Plan + Direct Solver45.83.055.80.053.50.056.20.0
ContextHub Deductive L1Plan + Tool Solver68.827.884.211.891.79.890.77.8
ContextHub Abductive L1Direct Answer21.72.836.10.058.90.059.20.0
ContextHub Abductive L1CoT23.90.040.00.062.20.076.90.0
ContextHub Abductive L1Plan + CoT Solver38.30.042.50.065.60.074.20.0
ContextHub Abductive L1Plan + Direct Solver46.93.933.30.363.10.061.70.0
ContextHub Abductive L1Plan + Tool Solver59.235.870.89.773.94.274.710.3
FOLIODirect Answer56.212.359.60.069.50.064.00.0
FOLIOCoT53.71.556.72.572.42.070.40.0
FOLIOPlan + CoT Solver53.70.055.70.073.90.570.40.0
FOLIOPlan + Direct Solver52.70.054.20.072.90.063.50.0
FOLIOPlan + Tool Solver48.846.854.228.670.016.762.625.1
", + "image_path": "96e75529ee65ce957f59c29c1af838549364a5b33f15b4fab321ee40fdc96cd9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 415, + 504, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 461 + ], + "type": "text", + "content": "formal specifications that fail to follow the format of the formal language (Python or z3) and that lead to execution errors. Such an issue is particularly severe for the smaller models. However, we note that despite the high unparseable rate, the overall accuracy of these models with tool augmentation is still on par with or outperforms other methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 475, + 276, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 276, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 276, + 488 + ], + "type": "text", + "content": "I DISCUSSION OF LIMITATIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 500, + 248, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 500, + 248, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 500, + 248, + 511 + ], + "type": "text", + "content": "I.1 LONG HORIZON PLANNING" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 521, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 504, + 654 + ], + "type": "text", + "content": "One set of tasks where symbolic reasoning helps substantially that our experiments haven't covered as thoroughly (with the exception of BiGGen-Bench) is long-horizon planning (Valmeekam et al., 2023; Xie et al., 2024; Gundawar et al., 2024; Valmeekam et al., 2024). There are two reasons we don't treat it here. First, we are primarily interested in tasks that are conveyed in language, and we see less complex planning in language-only tasks. Second, there has already been a large debate on the effectiveness of CoT, both pro (Huang et al., 2022; Hu et al., 2023) and against (Valmeekam et al., 2023; Kambhampati, 2024; Kambhampati et al., 2024b; Stechly et al., 2024a; Guan et al., 2024; Verma et al., 2024; Gundawar et al., 2024; Stechly et al., 2024b) using CoT and its derivatives like tree-of-thought (Yao et al., 2023; Kang et al., 2024), that has resulted in complex systems to help solve planning problems better. While story generation and interpretation involve elements of planning with natural language (Peng et al., 2022; Karpinska et al., 2024), such tasks are not conventionally formalized and benchmarked as planning and reasoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 667, + 244, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 244, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 244, + 678 + ], + "type": "text", + "content": "I.2 DATASET CONTAMINATION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "One limitation of our study is the presence of possible data contamination: it is unknown which benchmarks may have been explicitly pre-trained on by language models. If a model had memorized answers to benchmark questions, we would expect direct answering to close some of the gap with CoT, as the model can just reproduce a known answer rather than deriving it from scratch. We argue" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 159 + ], + "type": "text", + "content": "there are four reasons that our general conclusions are still trustworthy. First, we use a range of language model scales, including small models that have less capacity to memorize. Second, datasets with poor direct answering performance like GSM8K-Hard are unlikely to have been substantially memorized. Third, the inclusion of recent datasets such as MuSR (Sprague et al., 2024) and BiGGen Bench (Kim et al., 2024) helps to defray this risk. Fourth, our survey of the literature includes papers that were submitted to conferences in 2023, representing a range of older LLMs trained at various times." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 228, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 228, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 228, + 94 + ], + "type": "text", + "content": "J EXAMPLE PROMPTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 198, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 222 + ], + "type": "text", + "content": "We will release all prompts and model responses on our Huggingface repo. We list a few prompt response pairs here in this section." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 366, + 369, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 366, + 369, + 379 + ], + "spans": [ + { + "bbox": [ + 121, + 366, + 369, + 379 + ], + "type": "text", + "content": "AGIEval LSAT AR zero-shot CoT prompt for Llama 3.1 70B" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 389, + 324, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 389, + 324, + 402 + ], + "spans": [ + { + "bbox": [ + 121, + 389, + 324, + 402 + ], + "type": "text", + "content": "<|start_header_id|>user<|end_header_id|>" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 119, + 411, + 482, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 411, + 482, + 456 + ], + "spans": [ + { + "bbox": [ + 119, + 411, + 482, + 456 + ], + "type": "text", + "content": "Explain your reasoning step-by-step for each question before answering. Give your final answer in the format \\The answer is therefore " + }, + { + "bbox": [ + 119, + 411, + 482, + 456 + ], + "type": "inline_equation", + "content": "" + }, + { + "bbox": [ + 119, + 411, + 482, + 456 + ], + "type": "text", + "content": "\". Failure to comply with the answer formatting will result in no credit." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 456, + 484, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 456, + 484, + 698 + ], + "spans": [ + { + "bbox": [ + 120, + 456, + 484, + 698 + ], + "type": "text", + "content": "Of the eight students\\u2014George, Helen, Irving, Kyle, Lenore, Nina, Olivia, and Robert\\u2014in a seminar, exactly six will give individual oral reports during three consecutive days\\u2014Monday, Tuesday, and Wednesday. Exactly two reports will be given each day\\u2014one in the morning and one in the afternoon\\u2014according to the following conditions: Tuesday is the only day on which George can give a report. Neither Olivia nor Robert can give an afternoon report. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday.Q: Which one of the following could be the schedule of the students' reports? Answer Choices: (A)Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle (B)Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore (C)Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving (D)Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore (E)Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George Let's think step by step.<|eot_id|><|start_header_id|>assistant<|end_header_id|>" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 83, + 375, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 83, + 375, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 375, + 95 + ], + "type": "text", + "content": "AGIEval LSAT AR zero-shot CoT response for Llama 3.1 70B" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 106, + 489, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 106, + 489, + 555 + ], + "spans": [ + { + "bbox": [ + 121, + 106, + 489, + 555 + ], + "type": "text", + "content": "To determine which one of the following could be the schedule of the students' reports, let's analyze each answer choice in light of the given conditions. First, let's recall the conditions: 1. Exactly six students will give reports over three days. 2. Tuesday is the only day George can give a report. 3. Neither Olivia nor Robert can give an afternoon report. 4. If Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Let's evaluate each answer choice: (A) Mon. morning: Helen; Mon. afternoon: Robert Tues. morning: Olivia; Tues. afternoon: Irving Wed. morning: Lenore; Wed. afternoon: Kyle - This choice does not violate any conditions directly. However, we need to check if it aligns with all the rules, especially considering the Nina rule, which isn't directly applicable here since Nina isn't scheduled. George's requirement is also not met since George isn't scheduled on Tuesday. (B) Mon. morning: Irving; Mon. afternoon: Olivia Tues. morning: Helen; Tues. afternoon: Kyle Wed. morning: Nina; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports, unless Nina's report is given on Wednesday. Since Nina's report is on Wednesday, this condition is technically not violated, but we need to ensure all other conditions are met. George is not scheduled on Tuesday, which is a requirement. (C) Mon. morning: Lenore; Mon. afternoon: Helen Tues. morning: George; Tues. afternoon: Kyle Wed. morning: Robert; Wed. afternoon: Irving - This choice meets the condition that George gives a report on Tuesday. It also does not violate any other conditions directly. However, we need to ensure that the Nina rule isn't applicable or is met, which in this case, it's not applicable since Nina isn't scheduled. (D) Mon. morning: Nina; Mon. afternoon: Helen Tues. morning: Robert; Tues. afternoon: Irving Wed. morning: Olivia; Wed. afternoon: Lenore - This choice violates the condition that if Nina gives a report, then on the next day Helen and Irving must both give reports. Here, Helen is already scheduled on the same day as Nina, and while Irving is scheduled the next day, the condition specifies both Helen and Irving must give reports the next day, which is not the case since Helen's report is on the same day as Nina's. (E) Mon. morning: Olivia; Mon. afternoon: Nina Tues. morning: Irving; Tues. afternoon: Helen Wed. morning: Kyle; Wed. afternoon: George - This choice violates the condition that Tuesday is the only day George can give a report, as George is scheduled on Wednesday. Given the analysis, the only choice that does not directly violate any of the given conditions and meets the specific requirements (especially considering the placement of George and the handling of the Nina rule where applicable) is: The answer is therefore (C)." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 120, + 105, + 489, + 415 + ], + "blocks": [ + { + "bbox": [ + 121, + 83, + 320, + 95 + ], + "lines": [ + { + "bbox": [ + 121, + 83, + 320, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 320, + 95 + ], + "type": "text", + "content": "MATH zero-shot CoT prompt for Llama 3.1 70B" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 120, + 105, + 489, + 415 + ], + "lines": [ + { + "bbox": [ + 120, + 105, + 489, + 415 + ], + "spans": [ + { + "bbox": [ + 120, + 105, + 489, + 415 + ], + "type": "text", + "content": "<|begin_of_text>|<|start_header_id>|>user<|end_header_id>|> \nSolve the following math problem efficiently and clearly: \n- For simple problems (2 steps or fewer):\\nProvide a concise solution with minimal explanation. \n- For complex problems (3 steps or more): \nUse this step-by-step format:\\n## Step 1: [Concise description]\\n[Brief explanation and calculations] \n# Step 2: [Concise description] \n[Brief explanation and calculations] \n... \nRegardless of the approach, always conclude with: \nTherefore, the final answer is: " + }, + { + "bbox": [ + 120, + 105, + 489, + 415 + ], + "type": "inline_equation", + "content": "\\mathbb{S}\\backslash" + }, + { + "bbox": [ + 120, + 105, + 489, + 415 + ], + "type": "text", + "content": " boxed{answer}$. I hope it is correct. \nWhere [answer] is just the final number or expression that solves the problem. \nProblem: Chris labels every lattice point in the coordinate plane with the square of the distance from the point to the origin (a lattice point is a point such that both of its coordinates are integers). How many times does he label a point with the number $25$?<|eot_id>|><|start_header_id|>assistant" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 120, + 543, + 489, + 720 + ], + "blocks": [ + { + "bbox": [ + 121, + 520, + 326, + 532 + ], + "lines": [ + { + "bbox": [ + 121, + 520, + 326, + 532 + ], + "spans": [ + { + "bbox": [ + 121, + 520, + 326, + 532 + ], + "type": "text", + "content": "MATH zero-shot CoT response for Llama 3.1 70B" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 120, + 543, + 489, + 720 + ], + "lines": [ + { + "bbox": [ + 120, + 543, + 489, + 720 + ], + "spans": [ + { + "bbox": [ + 120, + 543, + 489, + 720 + ], + "type": "text", + "content": "## Step 1: Understand the problem Chris labels each lattice point in the coordinate plane with the square of the distance from the point to the origin. We need to find how many times he labels a point with the number 25. ## Step 2: Determine the equation for the distance The distance from a point (x, y) to the origin (0, 0) is given by the equation \\(\\mathbb{S}\\backslash \\mathrm{sqrt}\\{\\mathbf{x}^{\\wedge}2 +\\) \\(y^{\\wedge}2\\}\\). Since Chris labels the points with the square of the distance, we are looking for points where \\(\\$ x^{\\wedge}2 + y^{\\wedge}2 = 25\\). ## Step 3: Find the integer solutions to the equation We need to find all integer values of x and y that satisfy the equation \\(\\$ x^{\\wedge}2 + y^{\\wedge}2 = 25\\). The possible values of \\(\\$ x^{\\wedge}2\\) and \\(\\$ y^{\\wedge}2\\) that add up to 25 are: (0, 25), (25, 0), (1, 24), (24, 1), (4, 21), (21, 4), (9, 16), (16, 9). ## Step 4: Count the number of solutions However, we need to consider that some of these pairs have negative counterparts, and we should also count those. For example, (-0, 5) and (0, -5) are also solutions, as well as (-5, 0) and (5, 0). Similarly, (-3, 4), (3, -4), (-4, 3), and (4, -3) are also solutions. So, we have a total of 12 solutions. The final answer is: \\)\\backslash$ boxed{12}." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 83, + 400, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 83, + 400, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 400, + 95 + ], + "type": "text", + "content": "CommonsenseQA zero-shot direct answer prompt for Llama 3.1 70B" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 106, + 334, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 106, + 334, + 118 + ], + "spans": [ + { + "bbox": [ + 121, + 106, + 334, + 118 + ], + "type": "text", + "content": "<|start_header_id|>system<|end_header_id|>" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 128, + 483, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 128, + 483, + 183 + ], + "spans": [ + { + "bbox": [ + 121, + 128, + 483, + 183 + ], + "type": "text", + "content": "You are a helpful AI assistant that will answer reasoning questions. You will always say at the end \\\"Answer: \\\". You must only pick one answer and you must end your response with \\\"Answer: \\\" every time!<|eot_id>|<|start_header_id|>user<|end_header_id|>" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 120, + 193, + 473, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 193, + 473, + 216 + ], + "spans": [ + { + "bbox": [ + 120, + 193, + 473, + 216 + ], + "type": "text", + "content": "Question: Where would you find magazines along side many other printed works?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 227, + 218, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 227, + 218, + 293 + ], + "spans": [ + { + "bbox": [ + 121, + 227, + 218, + 293 + ], + "type": "text", + "content": "Answer Choices: \n(A) doctor \n(B) bookstore \n(C) market \n(D) train station \n(E) mortuary" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 303, + 487, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 303, + 487, + 348 + ], + "spans": [ + { + "bbox": [ + 120, + 303, + 487, + 348 + ], + "type": "text", + "content": "Only write the answer. Write the answer in the following format: " + }, + { + "bbox": [ + 120, + 303, + 487, + 348 + ], + "type": "inline_equation", + "content": "\"Answer:\"Your answer>" + }, + { + "bbox": [ + 120, + 303, + 487, + 348 + ], + "type": "text", + "content": "\" . You must always give an answer. You may only pick one answer choice, if you think multiple are correct only pick the one you think is best.<|eot_id|><|start_header_id|>assistant<|end_header_id|>" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 387, + 405, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 387, + 405, + 400 + ], + "spans": [ + { + "bbox": [ + 121, + 387, + 405, + 400 + ], + "type": "text", + "content": "CommonsenseQA zero-shot direct answer response for Llama 3.1 70B" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 122, + 411, + 129, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 411, + 129, + 419 + ], + "spans": [ + { + "bbox": [ + 122, + 411, + 129, + 419 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_content_list.json b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d85319d7b323deda492cffd76d79baace5360aba --- /dev/null +++ b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_content_list.json @@ -0,0 +1,2522 @@ +[ + { + "type": "text", + "text": "TO CODE, OR NOT TO CODE? EXPLORING IMPACT OF CODE IN PRE-TRAINING", + "text_level": 1, + "bbox": [ + 171, + 98, + 756, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, Sara Hooker {viraat, ahmetustun, sarahooker}@cohere.com", + "bbox": [ + 179, + 169, + 702, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 250, + 547, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Including code in the pre-training data mixture, even for models not specifically designed for code, has become a common practice in LLMs pre-training. While there has been anecdotal consensus among practitioners that code data plays a vital role in general LLMs' performance, there is only limited work analyzing the precise impact of code on non-code tasks. In this work, we systematically investigate the impact of code data on general performance. We ask \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation\". We conduct extensive ablations and evaluate across a broad range of natural language reasoning tasks, world knowledge tasks, code benchmarks, and LLM-as-a-judge win-rates for models with sizes ranging from 470M to 2.8B parameters. Across settings, we find a consistent results that code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact across all tasks. In particular, compared to text-only pre-training, the addition of code results in up to relative increase of $8.2\\%$ in natural language (NL) reasoning, $4.2\\%$ in world knowledge, $6.6\\%$ improvement in generative win-rates, and a $12x$ boost in code performance respectively. Our work suggests investments in code quality and preserving code during pre-training have positive impacts.", + "bbox": [ + 228, + 285, + 769, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 569, + 339, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The role of data has taken on critical significance in recent breakthroughs. State-of-the-art models highlight the importance of the pre-training data mixture, diversity of data sources (Brown et al., 2020; Longpre et al., 2023; Singh et al., 2024) combined with compute availability as key drivers on performance (Dubey et al., 2024; Üstün et al., 2024; Team et al., 2023; Aryabumi et al., 2024). A critical question is what properties of data impart the best general performance?", + "bbox": [ + 169, + 603, + 826, + 676 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Perhaps surprisingly, code is often included in pre-training even if a model is not explicitly intended to generate high-quality code. Code datasets differ significantly in terms of structure and textural characteristics from high-quality web datasets (Wikimedia; Raffel et al., 2019). Despite this, several previous generations of LLMs like PaLM (Chowdhery et al., 2022), Gopher (Rae et al., 2022) and Bloom (Workshop et al., 2023) that were not explicitly intended to support code generation, included code data together with high-quality natural language data in their pre-training mixture.", + "bbox": [ + 169, + 680, + 826, + 767 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In current state-of-the-art models, it is an accepted norm to not only include code data but further increase the proportion – for instance, Llama 3 (Dubey et al., 2024) has four times more code data in proportion (17%), of its pre-training mixture than Llama 2 (4.5%) (Touvron et al., 2023). While there has been consensus anecdotally among practitioners that code data plays a vital role in LLMs' performance, there has been only limited work analyzing the precise impact of code on non-code tasks. Prior work shows particular side benefits of the inclusion of code data, such as impact on scaling in limited data regime (Muennighoff et al., 2023a), entity tracking capabilities (Kim et al., 2024), and mathematical reasoning (Razeghi et al.). However, there has been no exhaustive study to date that systematically investigates the impact of code data on general performance. In this work, we ask \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation?\"", + "bbox": [ + 169, + 771, + 828, + 926 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We embark on an exhaustive set of large-scale controlled pre-training experiments. This includes a consideration of where in the training process adding code is beneficial, code proportions, the role of scaling, and the quality and properties of code added. While a costly endeavor to perform these ablations in a rigorous way, we find consistent and valuable results that code provides critical improvements to non-code performance. In particular, compared to text-only pre-training, for our best variant, the addition of code results in relative increase of $8.2\\%$ in natural language (NL) reasoning, $4.2\\%$ in world knowledge, $6.6\\%$ improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves NL reasoning by $3.7\\%$ , World knowledge by $6.8\\%$ , and code by $20\\%$ , relative to cooldown without code and leads to a $4.1\\%$ additional win-rate increase.", + "bbox": [ + 169, + 103, + 826, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Here, several factors matter including getting the proportion of code correct, improving the quality of code by including synthetic code and code adjacent data such as commits, and leveraging code across multiple stages of training including during cooldown. Our results suggest code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact on performance. We conduct an extensive evaluation on a broad range of benchmarks, which cover world knowledge tasks, natural language reasoning, and code generation, as well as LLM-as-a-judge win-rates. Across experiments on models ranging from 470 million to 2.8 billion parameter models, we find the following detailed results:", + "bbox": [ + 169, + 250, + 826, + 362 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Code provides critical improvements to non-code performance. Initialization with code pretrained models results in improved performance for natural language tasks. In particular, compared to text-only pre-training, for our best variant, the addition of code results in a relative increase of $8.2\\%$ in NL reasoning, $4.2\\%$ in world knowledge, $6.6\\%$ improvement in generative win-rates, and a 12x boost in code performance respectively.", + "2. Code quality and properties matter. Using markup-style programming languages, code-adjacent datasets such as GitHub commits and synthetically generated code improves the performance in pre-training. In particular, training on a higher quality synthetically generated code dataset results in a $9\\%$ and $44\\%$ increase in natural language reasoning and code performance, respectively, compared to web-based code data in pre-training. Additionally, continual pre-training from a code model that includes synthetic data results in $1.9\\%$ and $41\\%$ relative increases in natural language reasoning and code performance respectively, compared to initialization from a code model that does not include code data.", + "3. Code in cooldown enables further improvement across all tasks. Including code data in pretraining cooldown, where high-quality datasets are up-weighted, leads to an increase of $3.6\\%$ in NL reasoning, $10.1\\%$ in world knowledge, and $20\\%$ in code performance relative to no cooldown. More significantly, cooldown with code beats the baseline (no cooldown) by $52.3\\%$ win-rates, where win-rates are $4.1\\%$ higher compared to cooldown without code." + ], + "bbox": [ + 169, + 371, + 825, + 628 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 METHODOLOGY", + "text_level": 1, + "bbox": [ + 171, + 648, + 344, + 664 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We describe the details of our Pre-training Data (§ 2.1), Evaluation (§ 2.2), Training and Model details (§ 2.3). Figure 1 shows the high-level experimental framework. Precise details for each experiment and their results are presented in Section 3.", + "bbox": [ + 169, + 678, + 823, + 722 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 PRE-TRAINING DATA", + "text_level": 1, + "bbox": [ + 171, + 737, + 362, + 751 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we describe the details of our pre-training and cooldown datasets. We aim to evaluate the role of code in pre-training, following current state-of-art practices. Hence, we consider pretraining runs that consist of two phases: 1) continued pretraining and 2) cooldown. Continued pre-training refers to training a model that is initialized from a pre-trained model and trained for a fixed token budget. cooldown (Team et al., 2023; Parmar et al., 2024) involves up-weighting high-quality datasets and annealing the learning rate for a relatively small number of tokens during the final stages of training. This up-weighting of high-quality datasets for a smaller amount of steps at the end of training can significantly boost model quality.", + "bbox": [ + 169, + 763, + 826, + 876 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Text dataset. We use the SlimPajama pre-training corpus (Soboleva et al., 2023) as our source of natural language text data. SlimPajama is a de-duplicated, quality-filtered, multi-corpora, open-source dataset based on RedPajama-1.2T (Computer, 2023). SlimPajama consists of documents", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a4396e1682e56163e5de9733298ea2cb77f96523c6831e11de756216bfe5ae6c.jpg", + "image_caption": [ + "Figure 1: Overview of our experimental framework: We exhaustively evaluate the impact of code by varying: 1) the proportion of code in pre-training, 2) code quality and properties, 3) model initialization, 4) model scale, and 5) stage of training at which code is introduced. We evaluate the resulting model on a wide-ranging set of tasks, including natural language reasoning, world knowledge, code, and open-ended generations." + ], + "image_footnote": [], + "bbox": [ + 248, + 108, + 750, + 324 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "from CommonCrawl, C4, GitHub, Books, ArXiv, Wikipedia, and StackExchange. We filter out all documents from GitHub and StackExchange to remove code and code-adjacent data sources and ensure this is a text-only source. SlimPajama has a total of 627B tokens. After removing all code sources, this results in our text pre-training corpus with a total of 503B tokens.", + "bbox": [ + 169, + 435, + 823, + 492 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Code datasets. To explore the impact of different properties of code data, we use multiple sources of code in our experiments:", + "bbox": [ + 169, + 498, + 823, + 527 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- WEB-BASED CODE DATA: For our main source of code data, we start with the Stack dataset (Kocetkov et al., 2022) that was used to train StarCoder (Li et al., 2023a). The Stack consists of permissively licensed code data scraped from GitHub. We apply quality filters1 and restrict to the top 25 programming languages based on document count2. After all filtering steps, the size of the code-only and markup subset is 139B tokens.", + "- MARKDOWN DATA We also separately process markup-style languages such as Markdown, CSS, and HTML. After all filtering steps, the size of this markup subset is 180B tokens.", + "- SYNTHETIC CODE DATA: To ablate the quality of the code dataset, we use a proprietary synthetically generated code dataset that consists of Python programming problems that have been formally verified. We treat this as a high-quality source of code data (See the details in § 3.4). The final synthetic dataset consists of 3.2B code tokens.", + "- CODE ADJACENT DATA: Finally, to explore different properties of code data, we include a version of the code data which includes auxiliary data such as GitHub commits, jupyter notebooks, StackExchange threads. For GitHub commits, and jupyter notebooks we use the datasets provided as part of the Stack (Kocetkov et al., 2022). We use the version of StackExchange that is part of SlimPajama (Soboleva et al., 2023). In total we have 21.4B tokens of code-adjacent data." + ], + "bbox": [ + 215, + 540, + 823, + 791 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Pre-training cooldown datasets. Cooldown involves up-weighting higher quality datasets for the final steps of pre-training and has been found to improve performance on downstream tasks (Parmar et al., 2024; Team et al., 2023), in particular to impart instruction-following capabilities. We choose a cooldown mixture comprising high-quality text, math, code, and instruct-style text datasets.", + "bbox": [ + 169, + 804, + 823, + 861 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "See Appendix C.1 for details about quality filters", + "bbox": [ + 194, + 896, + 493, + 909 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "2Refer to Appendix C.2, C.3 for the full list of programming and markup-style languages included", + "bbox": [ + 194, + 909, + 777, + 924 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 EVALUATION", + "text_level": 1, + "bbox": [ + 171, + 104, + 307, + 118 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our goal is to systematically understand the impact of code on general performance, which requires a broad evaluation suite that extends to a large variety of downstream tasks beyond code generation. To achieve this, we evaluate models on benchmarks that are reasonable proxies for model ability on 1) world knowledge, 2) natural language reasoning, and 3) code performance. In addition, we report win-rates as evaluated by an LLM-as-a-judge. Table 1 (Appendix A) shows the full evaluation suite and their respective grouping, along with the metric used.", + "bbox": [ + 169, + 128, + 823, + 214 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For World knowledge, we use benchmarks to measure knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. Natural language reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering, natural language inference (NLI), sentence completion, co-reference resolution, and general intelligence. We include the full list of the constituent benchmarks with references in Table 1. Finally, while our main focus is general performance, we also want to measure any changes to code generation performance. For Code benchmarks, we focus on the function completion task where we use HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021).", + "bbox": [ + 169, + 220, + 826, + 347 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We evaluate performance at two scales: 470M to 2.8B parameter models. At 470M scale, model capabilities are limited, thus to ensure fair comparisons, we only compare benchmarks for which all models achieve scores above random similar to Muennighoff et al. (2023a); Lozhkov et al. (2024).", + "bbox": [ + 169, + 352, + 823, + 396 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "LLM-as-a-judge win-rates. In addition to task-specific discriminative performance, to allow for a more holistic view across all performance measures, we also evaluate generative performance using LLM-as-a-judge win-rates. This is particularly valuable given recent work that has shown that as performance on open-ended generations improves, there is deterioration in traditional academic tasks (Ustun et al., 2024; Ouyang et al., 2022; Iyer et al., 2022; Muennighoff et al., 2023c). The use of LLMs-as-a-Judge benchmarks (Fu et al., 2023; Liu et al., 2023; Chiang & yi Lee, 2023; Shimabucoro et al., 2024) has gained traction as an alternative to performing human evaluation, which tends to be laborious and expensive (Wang et al., 2023; Boubdir et al., 2023). LLMs as evaluators compare two completions based upon detailed prompts and are reasonable proxies aligned with human preference (Ustun et al., 2024; Dubois et al., 2024).", + "bbox": [ + 169, + 401, + 825, + 541 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use the Dolly-200 English dataset (Ustun et al., 2024; Singh et al., 2024), which consists of 200 hand-picked examples from the Dolly-15K dataset (Conover et al., 2023). These prompts are open-ended and capture general-purpose non-code use cases making them a valuable proxy for how code impacts more fluid and often open-ended tasks. For our win-rate evaluations, we use Command- $R^{+3}$ as the LLM judge. Details about the prompt are provided in Appendix D.", + "bbox": [ + 169, + 547, + 823, + 619 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 TRAINING AND MODEL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 633, + 444, + 647 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use 470M and 2.8B parameters decoder-only auto-regressive Transformer models (Radford et al., 2019) that are trained with a standard language modeling objective. We use parallel attention layers, (Chowdhery et al., 2022; Wang & Komatsuzaki, 2021), SwiGLU activation (Shazeer, 2020), no biases in dense layers, and a byte-pair-encoding tokenizer with a vocabulary size of 256K. All models are pre-trained using AdamW (Loshchilov & Hutter, 2019) with a max sequence length of 8192, batch size of 512 and a cosine LR schedule with a warmup of 1325 steps.", + "bbox": [ + 169, + 660, + 823, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Infrastructure. We use TPU v5e chips (Jouppi et al., 2017) for training and evaluation. All models are trained using Jax (Bradbury et al., 2018) framework. We pre-train 64 models in total. This is an enormous endeavour given the scale and computational resources required. Each pre-training run for 200B tokens takes 4736 TPU-chip hours for 470M and 13824 TPU-chip-hours for 2.8B parameter models. Each cooldown run for 40B tokens takes 1024 TPU-chip hours for 470M models.", + "bbox": [ + 169, + 750, + 823, + 821 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 RESULTS AND DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 840, + 433, + 856 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we will report descriptions and results for each experimental variants. We systematically investigate, (1) initializing an LLM with code pre-trained models (§ 3.1), and (2) the impact of", + "bbox": [ + 169, + 872, + 825, + 902 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "3https://huggingface.co/CohereForAI/c4ai-command-r-plus", + "bbox": [ + 194, + 909, + 550, + 924 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2c8f69e3bc02422673769f7929c8ccbc81d18d4666c5b42ed23fd45dafe7fa69.jpg", + "image_caption": [ + "Figure 2: Impact of initialization using code pre-trained models: Initializing model training with code pre-trained models improves reasoning and code generation compared to text-only models, where the improvement is the most when continued pre-training with high percentage text (Balanced $\\rightarrow$ Text, Code $\\rightarrow$ Text). Note that these variants are designed to isolate the role of initialization, so do not include cooldown." + ], + "image_footnote": [], + "bbox": [ + 222, + 103, + 772, + 263 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "model scale (§ 3.2), (3) varying proportion of code in pre-training data (§ 3.3), (4) quality and properties of the code data (§ 3.4), (5) code data in pre-training cooldown (§ 3.5). Finally, we compare the resulting pre-training recipes (§ 3.6). Figure 1 shows the key levers of our experimental design.", + "bbox": [ + 169, + 372, + 823, + 417 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 INITIALIZING AN LLM WITH CODE PRE-TRAINED MODELS", + "text_level": 1, + "bbox": [ + 171, + 431, + 627, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We explore different initializations of pre-trained models to understand if using an LM with a large portion of code data as initialization improves the performance. These key ablations, along with their token counts, are summarized in Table 2. We briefly describe below:", + "bbox": [ + 169, + 457, + 823, + 501 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Text LM (TEXT-ONLY BASELINE): Pre-trained model from scratch using glorot-normal initialization (Glorot et al., 2011) on the text-only data for 400B tokens.", + "- Balanced LM (BALANCED-ONLY): A model is trained with an equal ratio of code and text data (50% text and 50% code) in pre-training for 400B tokens.", + "- Balance-initialized Text LM (BALANCED $\\rightarrow$ TEXT): This model is initialized with a balanced LM (50% text and 50% code) and further pre-trained using text data for 200B tokens.", + "- Code-initialized Text LM (CODE $\\rightarrow$ TEXT): Different from other variants, this model is initialized with a code-LM which is pre-trained on a code dataset for 200B tokens. The code dataset contains a mixture of $80\\%$ code data and $20\\%$ markup-style code data. We then continually pre-train this model on text for another 200B tokens.4" + ], + "bbox": [ + 215, + 512, + 823, + 666 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Natural Language Reasoning As seen in Figure 2, initializing with $100\\%$ code pre-trained model (code $\\rightarrow$ text) has the best performance for NL Reasoning benchmarks, and is closely followed by the balanced $\\rightarrow$ text model. The code $\\rightarrow$ text model and balanced $\\rightarrow$ text model beat the text-only baseline on NL reasoning tasks by $8.8\\%$ and $8.2\\%$ relative improvement respectively. The balanced-only model improves upon the baseline by $3.2\\%$ . This shows that initialization from a pre-trained model with a mix of code has a strong positive effect on NL reasoning tasks. Further using a text mix with a small percentage of code for continual pre-training results in the best performance as evidenced by both the code $\\rightarrow$ text and balanced $\\rightarrow$ text models.", + "bbox": [ + 169, + 676, + 825, + 789 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "World Knowledge For World Knowledge tasks, we see that the balanced $\\rightarrow$ text model has the best performance over all other variants, beating the code $\\rightarrow$ text by $21\\%$ and text-only by $4.1\\%$ relative improvement. This suggests that performance on world knowledge tasks depends on a more balanced data mixture for initialization and a larger proportion of text in the continual pretraining stage. Overall, code data is still beneficial compared to text-only pre-training for world knowledge tasks.", + "bbox": [ + 169, + 796, + 823, + 881 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "4We use a 10% of code in text mixture data during continual pre-training of code-initialized models (balanced→text, code→text) to avoid a full distribution shift and maintain the benefits of code.", + "bbox": [ + 169, + 897, + 823, + 924 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/98ab9c458f60bc63afdf09a9f3a7c56fe711892c17265bf1505e931c5ccaaca6.jpg", + "image_caption": [ + "Figure 3: Impact of model scale on different tasks. We observe that scale provides pronounced gains across tasks of up-to $2.7\\mathrm{x}$ increase, however the overall trend remains the same across scales showing consistency of findings across model sizes." + ], + "image_footnote": [], + "bbox": [ + 225, + 103, + 772, + 262 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Trade-offs between NL tasks and code generation For code generation, balanced-only achieves the best performance, where we see a $46.7\\%$ and $54.5\\%$ relative improvement over balanced $\\rightarrow$ text and code $\\rightarrow$ text. This is expected as balanced-only includes $50\\%$ code throughout pre-training. However, this model trades off better code generation with lower performance in NL tasks. code $\\rightarrow$ text and balanced $\\rightarrow$ text achieves $2.9\\%$ and $2.3\\%$ relative increase in NL reasoning, and $17.3\\%$ and $22.2\\%$ relative increase in world knowledge respectively compared to balanced-only.", + "bbox": [ + 169, + 340, + 823, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Generative quality win-rates comparison Additionally, we compare the generative performance of each code variant (code $\\rightarrow$ text and balanced-only) against the text-only model. We report win-rates and observe that the presence of code has a strong positive impact on generation quality. Both code $\\rightarrow$ text and balanced-only) models beat the text-only variant by a $6.6\\%$ difference in win-loss rates. We again note that Dolly-200-English evaluation set we use for win-rate calculation is curated to reflect open ended questions and is a non-code evaluation. This confirms that code data in the pre-training mix does not only improves reasoning but also helps the model produce better quality generations.", + "bbox": [ + 169, + 446, + 826, + 559 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 IMPACT OF SCALE", + "text_level": 1, + "bbox": [ + 171, + 574, + 344, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To understand if the findings of Section 3.1 transfer to larger models, we train 2.8B parameters models with the same token budget following the same model variants at 470M scale. Figure 3 shows the results of 2.8B models in comparison with 470M results.", + "bbox": [ + 169, + 599, + 823, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison between 2.8B and 470M models Scaling model size to 2.8B enables higher performance for all model variants in all task categories, compared to 470M results. In terms of average performance across NL reasoning and world knowledge, balanced $\\rightarrow$ text model benefits from scaling-up by a $33.1\\%$ increase relative to the same model with 470M size. The improvement for code $\\rightarrow$ text and balanced-only are $31.7\\%$ and $30\\%$ relative increase.", + "bbox": [ + 169, + 648, + 823, + 720 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We find that the improvements in NL reasoning are relatively modest with $5.3\\%$ , $9.2\\%$ , and $5.2\\%$ relative gains for balanced $\\rightarrow$ text, code $\\rightarrow$ text, and balanced-only respectively. However, world knowledge and code performance nearly triples for all the model variants. In particular, 2.8B balanced $\\rightarrow$ text results increase by $2.7\\mathrm{x}$ in world knowledge and $2.5\\mathrm{x}$ in code evaluation compared to 470M.", + "bbox": [ + 169, + 726, + 823, + 797 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Trends between model variants in 2.8B Notably, in terms of initialization with code pre-trained models, the same trends seen in 470M parameter scale hold at 2.8B models. code $\\rightarrow$ text and balanced $\\rightarrow$ text models improve over balanced models by $6.9\\%$ and $6.1\\%$ relative gain, however, fall significantly behind in code generation performance with $43.1\\%$ and $46.3\\%$ relative drop. These results show that the trade-off between NL tasks and code generation increases with the model size. Overall our experiments scaling to a larger size shows that our results hold and are consistent with the trends we observe at 470M parameter ablations.", + "bbox": [ + 169, + 801, + 825, + 902 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "We include the extended Win-rates for these experiments in Appendix E.", + "bbox": [ + 194, + 909, + 632, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7e24086b373a64314a1087e59d1623c6a838d3d57c9e2acc40df3cf0273d96bd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 102, + 419, + 210 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/45f86072ce3adbc7f66d6a67edf2070b009c704bf42dfa4648e58333587db907.jpg", + "image_caption": [ + "Increasing Proportion of Code $\\rightarrow$", + "Figure 4: Impact of the proportion of code in pre-training for different tasks: We observe that as the code proportion of pre-training increases, the performance on code tasks increases linearly. In contrast, there is more sensitivity for NL reasoning and World knowledge tasks and an optimal range of code proportions where benefits are most tangible. Model size is 470M parameters and trained for 200B tokens." + ], + "image_footnote": [], + "bbox": [ + 421, + 102, + 616, + 210 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0391f65bb231307d946996aa142617c4ed192f040db1142b8318406c3172f3b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 617, + 102, + 772, + 210 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 CODE DATA PROPORTION IN PRE-TRAINING", + "text_level": 1, + "bbox": [ + 171, + 338, + 519, + 351 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In these experiments, we ablate the proportions of code data in the pre-training mixture to understand what is the optimal amount of code to maximize performance on non-code tasks. Here, we focus on the first phase of pre-training with random initialization. We train six models for 200B tokens with increasing code proportions: $0\\%$ , $25\\%$ , $50\\%$ , $75\\%$ , $90\\%$ , and $100\\%$ . The remaining proportion is filled with text data. For each variant, we train a new model independently in order to carefully ablate the impact of varying code proportions.", + "bbox": [ + 169, + 367, + 823, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Natural Language Reasoning and World Knowledge For NL Reasoning, as the amount of code increases, in Figure 4 we see an increase in performance compared to a text-only (0% code) model. The best performance is from a model with 25% code and 75% text, with a 3.4% relative improvement over a model with 0% code. While performance is maintained up to 75% code, it starts to rapidly erode at higher proportions with a sharp relative drop of 18.3% when the model is trained on 100% code compared to a model with no code.", + "bbox": [ + 169, + 457, + 823, + 542 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For World Knowledge tasks, we see an inverse relationship with increasing the amount of code. As seen in Figure 4 middle inset, there is a slight relative drop of $3.4\\%$ at $25\\%$ code and this relative drop worsens to $31\\%$ at $75\\%$ code compared to the no-code model. The fully code model (100% code) is unable to perform in world knowledge task (86% drop relative to text-only) as there are no data sources to acquire the required knowledge in the pre-training mix.", + "bbox": [ + 169, + 547, + 823, + 619 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Performance on Code For code evaluation, there is a linear increase in performance as the amount of code increases, with the best model being a code-only model. As observable in Figure 4 right inset, the $100\\%$ code leads to a 2.6x increase in the code benchmarks compared to the $25\\%$ code model. As expected, for the model with $0\\%$ code, the average pass@1 score drops to 0.", + "bbox": [ + 169, + 625, + 823, + 681 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.4 INFLUENCE OF CODE QUALITY AND PROPERTIES ON GENERAL PERFORMANCE", + "text_level": 1, + "bbox": [ + 171, + 705, + 763, + 720 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we investigate the properties of code data by varying its quality and composition. We study this firstly (a) from the perspective of training from scratch, as we want to isolate the exact effects of different properties of code data. Secondly (b), we incorporate the best variant of the code data (high-quality synthetic code), in our continual pre-training experiments to see if the impact of the code quality transfer. We report performance on NL reasoning and Code tasks.", + "bbox": [ + 169, + 734, + 823, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We study the effect of the following properties: (1) MARKUP-STYLE DATA: we separate markup-style programming languages ( $\\S$ 2.1) from the rest of web-based code (Appendix C.3). We replace $20\\%$ of code-only tokens with markup-style tokens. (2) CODE ADJACENT DATA: Instead of using purely web-based code data, we replaced $15\\%$ percentage of code tokens with code-adjacent datasets - GitHub issues ( $5\\%$ ), StackExchange ( $5\\%$ ) and Jupyter Notebooks ( $5\\%$ ), resulting in a code-adjacent model. (3) CODE QUALITY: To control the quality of the code, we replaced $10\\%$ of existing code tokens with a synthetically generated high-quality code dataset. The remaining proportions of web-based code data are kept the same, resulting in a code-synth model.", + "bbox": [ + 169, + 811, + 823, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bfe1105936533411e23b23642a672fa16cffe114e17aca190b7e14798646b3d1.jpg", + "image_caption": [ + "(a) Code-only Pre-training" + ], + "image_footnote": [], + "bbox": [ + 184, + 103, + 529, + 223 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/697f623b23d26d3d291cb46d4ef3c5bdb5b3f1be7f8936b1aeba94d76a428934.jpg", + "image_caption": [ + "(b)Continual Pre-training", + "Figure 5: Impact of using different properties of code data: (a) As the most impactful code data source, synthetically generated high-quality code improves NL reasoning and code performance for code pre-training. (b) These improvements with synthetically generated high-quality code data also transfer the continual pre-training setting. All models are of size 470M parameters." + ], + "image_footnote": [], + "bbox": [ + 537, + 102, + 813, + 223 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Code-only pre-training We compare the above variants to a model that is trained only on web-based code data (code) from the stack dataset (Kocetkov et al., 2022), which forms our baseline model. All the variants are pre-trained using the same amount of tokens (200B) for fair comparison.", + "bbox": [ + 169, + 340, + 823, + 383 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 5a, we evaluate the impact of code quality and code composition. We observe that across all variants, including diverse code sources and also synthetic code leads to gains in natural language performance relative to code, however, only synthetically generated code improves the code benchmarks. We relate this to our code evaluation where we measure performance in python, thus different programming languages or code-adjacent data slightly decrease the results. Here, code+markup and code+adjacent leads to $2.8\\%$ and $6.3\\%$ relative improvement in NL reasoning compared to code (web-code-only), but cause $15.7\\%$ and $9.4\\%$ drop in code evaluation.", + "bbox": [ + 169, + 388, + 823, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our synthetic code data (code+synth) is the most impactful ablation. It is particularly impressive given its relatively small share of the overall dataset. Despite a small weighting of $10\\%$ , the inclusion of synthetic data leads to relative improvements of $9\\%$ on NL reasoning, and $44.9\\%$ on code benchmarks compared to the baseline of web-code-only. We note that the lifts observed for synthetic data are even more impressive given the limited amount of synthetic data available compared to code-adjacent data (3.2B tokens vs 21.4B tokens) or code+markup data (3.2B tokens vs 40B tokens), and the weighting during pre-training allocation ( $10\\%$ vs $15\\%$ vs $20\\%$ for synthetic data, code-adjacent, code-markup respectively). This suggests a key future lever of improvement in increasing the proportion of such high-quality code data sources.", + "bbox": [ + 169, + 494, + 823, + 619 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Continual pre-training Here, based on the findings from code-only pre-training, we incorporated code + synth into our best continual pre-training variant (balanced + synth $\\rightarrow$ text). We compare this against the same variant without synthetic code data (balanced $\\rightarrow$ text) to evaluate the benefits of synthetic data. We use the same amount of code and text tokens in these experiments.", + "bbox": [ + 169, + 626, + 823, + 681 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Figure 5b, balanced+synth→text achieves $2\\%$ and $35\\%$ relative improvement over balanced→text in NL reasoning and code, respectively. This further confirms that even a small percentage of a high-quality code data, not only improves performance in code pre-training but also increases code and non-code performance after continual pre-training with text data.", + "bbox": [ + 169, + 688, + 823, + 746 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.5 CODE IN PRE-TRAINING COOLDOWN", + "text_level": 1, + "bbox": [ + 171, + 763, + 472, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we evaluate the impact of code at the final stage of pre-training. Here, we consider cooldown, where we up-weight high-quality text, math, code, and instruct-style datasets. We change the learning rate schedule from cosine-based to linear annealing with a final learning rate of $1e - 6$ . We evaluate the impact of code in cooldown by comparing 3 models: a pre-trained model before cooldown, cooldown without code data, and cooldown with $20\\%$ code data. For our pre-trained model, we use balanced-text as it is our best pre-trained variant. We preserve the same token budget across variants - 40B tokens which is $10\\%$ of the token budget of the pre-trained model.", + "bbox": [ + 169, + 791, + 823, + 888 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of code used during cooldown in different tasks In Figure 6a, we evaluate the impact of code in cooldown on model performance in NL Reasoning, world knowledge, and code benchmarks.", + "bbox": [ + 169, + 895, + 823, + 924 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/da623e2c7e25504e3b4dff71e5b5195640f97b335a0d0bcf47f61048a5f7f540.jpg", + "image_caption": [ + "(a) Downstream tasks" + ], + "image_footnote": [], + "bbox": [ + 176, + 102, + 524, + 204 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/bea05cec28c33b6ca910fd2883b14f385de8c7ef47fcf40a189caea3262e1abb.jpg", + "image_caption": [ + "(b) Generative win-rates", + "Figure 6: Impact of code data in pre-training cooldown: Including code data in the cooldown phase improves downstream relative to cooldown with no code. All cooldown variants benefit for downstream tasks and especially generative quality. We find the largest gains from cooldown with code, with the highest win-rates of $52.3\\%$ over a model with no cooldown." + ], + "image_footnote": [], + "bbox": [ + 540, + 106, + 810, + 203 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Across tasks, we find that a cooldown with code data is most beneficial with $3.6\\%$ , $10.1\\%$ , and $20\\%$ in NL reasoning, world knowledge, and code relative to the model without cooldown.", + "bbox": [ + 169, + 319, + 823, + 348 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In contrast, we find that cooldown without code does not provide any increases for both NL reasoning and Code, while providing a relative improvement of $3.1\\%$ in World Knowledge tasks compared to no cooldown, showing the critical role of code data in also cooldown phase of pre-training.", + "bbox": [ + 169, + 354, + 823, + 397 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Generative win-rates after cooldown As expected, cooldown has a significant impact on generative performance as measured by win-rates (seen in Figure 6b). This is because we up-weight high-quality data sources in pre-training mix including instruction-style datasets such as Dolly v2 (Conover et al., 2023). Both cooldown variants (cooldown w/o code, cooldown w/ code) beat no-cooldown model by large win-rates (48.2% and 52.3%) as seen in Figure 6b. Comparing the cooldown variants, including code leads an additional 4.1% generative win-rates against no-cooldown compared to cooldown without code.", + "bbox": [ + 169, + 402, + 823, + 501 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.6 COMPARING PRE-TRAINING RECIPES", + "text_level": 1, + "bbox": [ + 171, + 518, + 475, + 531 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Considering all our experiments, we summarize our findings and recommend recipes for pre-training with code data. Table 2 (Appendix B) shows the different variants along with pre-training phases.", + "bbox": [ + 169, + 542, + 823, + 573 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Best recipe for natural language tasks As seen in Sections 3.1, 3.3, and 3.5, including code in all phases of pre-training provides improvements across all task categories. When looking at the final recipes, we find that balanced $\\rightarrow$ text model followed by cooldown that includes code data corresponds to the best overall performance in natural language tasks considering NL reasoning, world knowledge, and generative performance. Notably this model achieves the highest generative win-rates with $37.7\\%$ vs 33.7 against text-only as shown in Figure 7.", + "bbox": [ + 169, + 579, + 823, + 662 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Best recipe for code performance Among complete recipes shown in Table 2, balanced-only provides the best performance in code benchmarks. This model achieves $20\\%$ relative gain compared to second best code $\\rightarrow$ text and $55\\%$ relative gain compared to balanced $\\rightarrow$ text. However, balanced-only falls behind in natural language performance by $2.5\\%$ relative difference and $5.0\\%$ win-rate difference (vs text-only), both compared to balanced $\\rightarrow$ text.", + "bbox": [ + 169, + 670, + 823, + 739 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Including code in all phases of pre-training is beneficial across our three task categories and generative performance. Our recommendation for the best overall performance is to include a balanced mixture of code and text data during pre-training from scratch (§ 3.3), use a relatively lower code percentage during continual pre-training (§ 3.1), and include code data into cooldown mixture. Further, we recommend including high-quality code data during all phases of pre-training (§ 3.4).", + "bbox": [ + 169, + 746, + 823, + 816 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 837, + 341, + 852 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Understanding the impact of pre-training mixes Several works have studied the effects of data age, quality, toxicity and domain of pre-training data (Longpre et al., 2023; Üstün et al., 2024). Several works have looked at the impact of filtering (Raffel et al., 2020; Rae et al., 2021; Penedo et al., 2023), de-duping (Zhang et al., 2022) and data pruning (Lozhkov et al., 2024; Marion et al., 2023;", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0acead19bcc93dc191f7b4b05f59a204988bbe81cefebf3380086b79102e5615.jpg", + "image_caption": [ + "Figure 7: Generative performance as measured by win-rates for variants with full-coutdown." + ], + "image_footnote": [], + "bbox": [ + 223, + 102, + 408, + 237 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/717a60ebef812534c77f398e4b5a40fba0b77fa12b96fb613eb94aff0608eedb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 426, + 103, + 598, + 236 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/57d292212ebabb7a14ce6e8158b991527af35051c5e192939ade8e652c7d273e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 617, + 103, + 772, + 236 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Chimoto et al., 2024; Boubdir et al., 2023). Furthermore, several works have considered the role of synthetic data at improving performance (Shimabucoro et al., 2024; Dang et al., 2024; Aakanksha et al., 2024) and helping bridge the gap in performance between open weights and proprietary models (Gunasekar et al., 2023; Li et al., 2023b). In contrast to our work which focuses explicitly on understanding the role of code, these studies focus on characteristics of training data as a whole.", + "bbox": [ + 169, + 291, + 826, + 362 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Understanding the role of code Including code in the pre-training data mixture, even for models not specifically designed for code, has been a common practice in LLMs pre-training (Dubey et al., 2024; Gemini-Team et al., 2024; Groeneveld et al., 2024). In addition to serving the popular use case in code completion and generation (Chen et al., 2021), previous studies suggest that the addition of code improves the performance of LLMs on various NLP tasks, such as entity linking (Kim et al., 2024) and commonsense reasoning (Madaan et al., 2022b)), mathematical reasoning tasks (Liang et al., 2022; Madaan et al., 2022a; Gao et al., 2023; Shao et al., 2024), and general reasoning capabilities (Muennighoff et al., 2023a; Fu & Khot, 2022; Ma et al., 2023). Muennighoff et al. (2023b) demonstrated Python code data can be used to improve pretraining performance. They focused on a low-resource pre-training regime with limited data and an evaluation set-up limited to perplexity evaluations. Zhang et al. (2024) investigated the impact of code on LLMs' internal reasoning capability across various tasks and model families. They only focus on the effect of code in the supervised fine-tuning stage (SFT) primarily measuring the impact on reasoning. Zhu et al. (2024) report the performance of their DeepSeek-Coder-V2 models on General Natural Language benchmarks. They compare chat and instruct models, and do not investigate different phases of pre-training and properties of code.", + "bbox": [ + 169, + 368, + 826, + 592 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To the best of our knowledge, this work is the first study that presents a thorough investigation of the impact of code in pre-training on non-code tasks. Our experiment spans several axes and a exhaustive evaluation suite, with costly ablations at scale including model initialization strategies, different proportions and properties of code data, and model scales.", + "bbox": [ + 169, + 597, + 826, + 654 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 674, + 320, + 690 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We perform a first-of-its-kind systematic study to answer \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation\". We focus, not just on code performance but on downstream natural language performance, as well as generative quality using LLM-as-a-judge win-rates. We conduct ablations that look at initialization, proportions of code, quality and properties of code, and role of code in pre-training cooldown. We find across all scales of experiments that code provides critical improvements to performance on non-code tasks. Compared to text-only pre-training, for our best variant, the addition of code results in relative increase of $8.2\\%$ in natural language (NL) reasoning, $4.2\\%$ in world knowledge, $6.6\\%$ improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves $3.6\\%$ , $10.1\\%$ , and $20\\%$ in NL reasoning, world knowledge, and code relative to the model before cooldown and leads $52.3\\%$ generative win-rates. Finally, we find that adding a small amount of high-quality synthetic data can have an outsized impact on both NL reasoning ( $9\\%$ relative increase) and code performance ( $44.9\\%$ relative increase).", + "bbox": [ + 169, + 705, + 826, + 887 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 102, + 287, + 117 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aakanksha, Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. The multilingual alignment prism: Aligning global and local preferences to reduce harm, 2024. URL https://arxiv.org/abs/2406.18682.", + "Viraat Aryabumi, John Dang, Dwarak Talupuru, Saurabh Dash, David Cairuz, Hangyu Lin, Bharat Venkitesh, Madeline Smith, Jon Ander Campos, Yi Chern Tan, Kelly Marchisio, Max Bartolo, Sebastian Ruder, Acyr Locatelli, Julia Kreutzer, Nick Frosst, Aidan Gomez, Phil Blunsom, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. Aya 23: Open weight releases to further multilingual progress, 2024. URL https://arxiv.org/abs/2405.15032.", + "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. Program synthesis with large language models, 2021. URL https://arxiv.org/abs/2108.07732.", + "Meriem Boubdir, Edward Kim, Beyza Ermis, Marzieh Fadaee, and Sara Hooker. Which prompts make the difference? data prioritization for efficient human llm evaluation, 2023. URL https://arxiv.org/abs/2310.14424.", + "James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, George Necula, Adam Paszke, Jake VanderPlas, Skye Wanderman-Milne, and Qiao Zhang. JAX: composable transformations of Python+NumPy programs, 2018. URL http://github.com/google/jax.", + "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. arXiv, abs/2005.14165, 2020.", + "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fiotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374.", + "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, and Yuanzhi Li. Towards understanding the mixture-of-experts layer in deep learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=MaYzugDmQV.", + "Cheng-Han Chiang and Hung yi Lee. Can large language models be an alternative to human evaluations?, 2023.", + "Everlyn Asiko Chimoto, Jay Gala, Orevaoghene Ahia, Julia Kreutzer, Bruce A. Bassett, and Sara Hooker. Critical learning periods: Leveraging early training dynamics for efficient data pruning, 2024. URL https://arxiv.org/abs/2405.19462.", + "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen-tau Yih, Yejin Choi, Percy Liang, and Luke Zettlemoyer. QuAC: Question answering in context. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 2174-2184, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1241. URL https://aclanthology.org/D18-1241." + ], + "bbox": [ + 171, + 126, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan Firat, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. Palm: Scaling language modeling with pathways, 2022. URL https://arxiv.org/abs/2204.02311.", + "Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. BoolQ: Exploring the surprising difficulty of natural yes/no questions. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 2924–2936, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1300. URL https://aclanthology.org/N19-1300.", + "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018.", + "Together Computer. Redpajama: an open dataset for training large language models, 2023. URL https://github.com/togethercomputer/RedPajama-Data.", + "Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. URL https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm.", + "John Dang, Arash Ahmadian, Kelly Marchisio, Julia Kreutzer, Ahmet Üstün, and Sara Hooker. Rlhf can speak many languages: Unlocking multilingual preference optimization for llms, 2024. URL https://arxiv.org/abs/2407.02552.", + "Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The commitmentbank: Investigating projection in naturally occurring discourse, 2019.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurelien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Roziere, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Gregoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, Khalid El-Arini, Krithika Iyer, Kshitiz Malik, Kuenley Chiu, Kunal Bhalla, Lauren Rantala-Yeary, Laurens van der Maaten, Lawrence Chen, Liang Tan, Liz Jenkins, Louis Martin Lovish Madaan Lubo Malo Lukas Blecher Lukas Landzaat Luke de Oliveira Madeline Muzzi Mahesh Pasupuleti Mannat Singh Manohar Paluri Marcin Kardas Mathew Oldham Mathieu Rita Maya Pavlova" + ], + "bbox": [ + 171, + 103, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Melanie Kambadur, Mike Lewis, Min Si, Mitesh Kumar Singh, Mona Hassan, Naman Goyal, Narjes Torabi, Nikolay Bashlykov, Nikolay Bogoychev, Niladri Chatterji, Olivier Duchenne, Onur Celebi, Patrick Alrassy, Pengchuan Zhang, Pengwei Li, Petar Vasic, Peter Weng, Prajjwal Bhargava, Pratik Dubal, Praveen Krishnan, Punit Singh Koura, Puxin Xu, Qing He, Qingxiao Dong, Ragavan Srinivasan, Raj Ganapathy, Ramon Calderer, Ricardo Silveira Cabral, Robert Stojnic, Roberta Raileanu, Rohit Girdhar, Rohit Patel, Romain Sauvestre, Ronnie Polidoro, Roshan Sumbaly, Ross Taylor, Ruan Silva, Rui Hou, Rui Wang, Saghar Hosseini, Sahana Chennabasappa, Sanjay Singh, Sean Bell, Seohyun Sonia Kim, Sergey Edunov, Shaoliang Nie, Sharan Narang, Sharath Raporthy, Sheng Shen, Shengye Wan, Shruti Bhosale, Shun Zhang, Simon Vandenhende, Soumya Batra, Spencer Whitman, Sten Sootla, Stephane Collot, Suchin Gururangan, Sydney Borodinsky, Tamar Herman, Tara Fowler, Tarek Sheasha, Thomas Georgiou, Thomas Scialom, Tobias Speckbacher, Todor Mihaylov, Tong Xiao, Ujjwal Karn, Vedanuj Goswami, Vibhor Gupta, Vignesh Ramanathan, Viktor Kerkez, Vincent Gonguet, Virginie Do, Vish Vogeti, Vladan Petrovic, Weiwei Chu, Wenhan Xiong, Wenyin Fu, Whitney Meers, Xavier Martinet, Xiaodong Wang, Xiaqing Ellen Tan, Xinfeng Xie, Xuchao Jia, Xuewei Wang, Yaelle Goldschlag, Yashesh Gaur, Yasmine Babaei, Yi Wen, Yuwen Song, Yuchen Zhang, Yue Li, Yuning Mao, Zacharie Delpierre Coudert, Zheng Yan Zhengxing Chen Zoe Papakipos Aaditya Singh Aaron Grattafori Abha Jain Adam Kelsey Adam Shajnfeld Adithya Gangidi Adolfo Victoria,Ahuva Goldstand Ajay Menon Ajay Sharma Alex Boesenberg Alex Vaughan Alexei Baevski Allie Feinstein Amanda Kallet Amit Sangani Anam Yunus Andrei Lupu Andres Alvarado Andrew Caples Andrew GuAndrew Ho Andrew Poulton Andrew Ryan Ankit Ramchandani Annie Franco Aparajita Saraf Arkabandhu Chowdhury Ashley Gabriel Ashwin Bharambe Assaf Eisenman Azadeh Yazdan Beau James Ben Maurer Benjamin Leonhardi Bernie Huang Beth Loyd Beto De Paola Bhargavi ParanjapeBing LiuBo WuBoyu Ni Braden Hancock Bram Wasti Brandon Spence Brani Stojkovic Brian Gamido Britt Montalvo Carl Parker Carly Burton Catalina Mejia Changhan Wang Changkyu Kim Chao Zhou Chester Hu Ching-Hsiang Chu Chris Cai Chris Tindal Christoph Feichtenhofer Damon Civin Dana Beaty Daniel Kreymer Daniel Li Danny Wyatt David Atkins David Xu Davide Testuggine Delia David Devi Parikh Diana Liskovich Didem Foss Dingkang Wang Duc Le,Dustin Holland Edward Dowling Eissa Jamil Elaine Montgomery Eleonora Presani Emily Hahn Emily Wood Erik Brinkman Esteban Arcaute Evan Dunbar Evan Smothers Fei Sun Felix Kreuk Feng Tian First Ozgenel Francesco Caggioni Francisco Guzmán Frank Kanayet Frank Seide Gabriela Medina FlorezGabriella Schwarz Gada Badeer Georgia Swee Gil Halpern Govind Thattai Grant Herman Grigory Sizov Guangyi Zhang Guna Lakshminarayanan Hamid Shojanazeri Han Zou Hannah Wang Hanwen Zha Haroun Habeeb Harrison Rudolph Helen Suk Henry Aspegren Hunter Goldman Igor Molybog Igor Tufanov Irina-Elena Veliche Itai Gat Jake Weissman James Geboski James Kohli Japhet Asher Jean-Baptiste Gaya Jeff MarcusJeff Tang Jennifer Chan Jenny Zhen Jeremy Reizenstein Jeremy Teboul Jessica Zhong Jian Jin Jingyi Yang Joe Cummings Jon Carvill Jon Shepard Jonathan McPhie Jonathan Torres Josh Ginsburg Junjie Wang Kai Wu Kam Hou U Karan Saxena Karthik Prasad Kartikay Khandelwal Katayoun Zand Kathy Matosich Kaushik Veeraraghavan Kelly Michelena Keqian Li Kun Huang Kunal ChawlaKushal Lakhotia Kyle Huang Lailin Chen Lakshya Garg Lavender A Leandro Silva Lee Bell Lei Zhang Liangpeng Guo Licheng Yu Liron Moshkovich Luca Wehrstedt Madian Khabsa Manav Avalani Manish Bhatt Maria Tsimpoukelli Martynas Mankus Matan Hasson Matthew Lennie Matthias Reso Maxim Groshev Maxim Naumov Maya Lathi Meghan Keneally Michael L. Seltzer Michal Valko Michelle Restrepo Mihir Patel Mik Vyatskov Mikayel Samvelyan Mike Clark Mike Macey Mike Wang Miquel Jubert Hermoso Mo Metanat Mohammad Rastegari Munish Bansal Nandhini Santhanam Natascha Parks Natasha White Navyata Bawa Nayan Singhal Nick Egebo Nicolas Usunier Nikolay Pavlovich Laptev Ning Dong Ning Zhang Norman Cheng Oleg Chernoguz Olivia Hart Omkar Salpekar Ozlem Kalinli Parkin Kent Parth Parekh Paul Saab Pavan Balaji Pedro Rittner Philip Bontrager Pierre Roux Piotr Dollar Polina Zvyagina Prashant Ratanchandani British Yuvraj Qian Liang Rachad Alao Rachel Rodriguez Rafy Aub Ragtoham Murthy Raghu Nayani Rahul Mitra Raymond Li Rebekkah Hogan Robin Battey Rocky Wang Rohan Maheswari Russ Howes Rudy Tinott Sai Jayesh Bondu Samyak Datta Sara Chugh Sara Hunt Sargun Dhillon Sasha Sidorov Satadru Pan Saurabh Verma Seiji Yamamoto Sharadh Ramaswamy Shaun Lindsay Shaun Lindsay Sheng Feng Shenghao Lin Shengxin Cindy Zha Shiva Shankar Shuqiang Zhang Shuqiang Zhang Sinong WangSneha Agarwal Soji Sajuyigbe Soumith Chintala Stephanie Max Stephen Chen Steve Kehoe Steve Satterfield Sudarshan Govindaprasad Sumit Gupta Sungmin Cho", + "bbox": [ + 189, + 103, + 823, + 920 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Sunny Virk, Suraj Subramanian, Sy Choudhury, Sydney Goldman, Tal Remez, Tamar Glaser, Tamara Best, Thilo Kohler, Thomas Robinson, Tianhe Li, Tianjun Zhang, Tim Matthews, Timothy Chou, Tzook Shaked, Varun Vontimitta, Victoria Ajayi, Victoria Montanez, Vijai Mohan, Vinay Satish Kumar, Vishal Mangla, Vlad Ionescu, Vlad Poenaru, Vlad Tiberiu Mihailescu, Vladimir Ivanov, Wei Li, Wenchen Wang, Wenwen Jiang, Wes Bouaziz, Will Constable, Xiaocheng Tang, Xiaofang Wang, Xiaojian Wu, Xiaolan Wang, Xide Xia, Xilun Wu, Xinbo Gao, Yanjun Chen, Ye Hu, Ye Jia, Ye Qi, Yenda Li, Yilin Zhang, Ying Zhang, Yossi Adi, Youngjin Nam, Yu, Wang, Yuchen Hao, Yundi Qian, Yuzi He, Zach Rait, Zachary DeVito, Zef Rosnbrick, Zhaoduo Wen, Zhenyu Yang, and Zhiwei Zhao. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783.", + "Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Alpacafarm: A simulation framework for methods that learn from human feedback, 2024. URL https://arxiv.org/abs/2305.14387.", + "Hao Fu, Yao; Peng and Tushar Khot. How does gpt obtain its ability? tracing emergent abilities of language models to their sources. Yao Fu's Notion, Dec 2022. URL https://yaofu.notion.site/b9a57ac0acf74f30a1ab9e3e36fa1dc1?pvs=25.", + "Jinlan Fu, See-Kiong Ng, Zhengbao Jiang, and Pengfei Liu. Gptscore: Evaluate as you desire, 2023.", + "Yifu Gao, Yongquan He, Zhigang Kan, Yi Han, Linbo Qiao, and Dongsheng Li. Learning joint structural and temporal contextualized knowledge embeddings for temporal knowledge graph completion. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 417-430, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023-findings-acl.28.", + "Gemini-Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M. Dai, Anja Hauth, Katie Millican, David Silver, Melvin Johnson, Ioannis Antonoglou, Julian Schrittwieser, Amelia Glaese, Jilin Chen, Emily Pitler, Timothy Lillicrap, Angeliki Lazaridou, Orhan First, James Molloy, Michael Isard, Paul R. Barham, Tom Hennigan, Benjamin Lee, Fabio Viola, Malcolm Reynolds, Yuanzhong Xu, Ryan Doherty, Eli Collins, Clemens Meyer, Eliza Rutherford, Erica Moreira, Kareem Ayoub, Megha Goel, Jack Krawczyk, Cosmo Du, Ed Chi, Heng-Tze Cheng, Eric Ni, Purvi Shah, Patrick Kane, Betty Chan, Manaal Faruqui, Aliaksei Severyn, Hanzhao Lin, YaGuang Li, Yong Cheng, Abe Ittycheriah, Mahdis Mahdieh, Mia Chen, Pei Sun, Dustin Tran, Sumit Bagri, Balaji Lakshminarayanan, Jeremiah Liu, Andras Orban, Fabian Gura, Hao Zhou, Xinying Song, Aurelien Boffy, Harish Ganapathy, Steven Zheng, HyunJeong Choe, Agoston Weisz, Tao Zhu, Yifeng Lu, Siddharth Gopal, Jarrod Kahn, Maciej Kula, Jeff Pitman, Rushin Shah, Emanuel Taropa, Majd Al Merey, Martin Baeuml, Zhifeng Chen, Laurent El Shafey, Yujing Zhang, Olcan Sercinoglu, George Tucker, Enrique Piqueras, Maxim Krikun, Iain Barr, Nikolay Savinov, Ivo Danihelka, Becca Roelofs, Anaïs White, Anders Andreassen, Tamara von Glehn, Lakshman Yagati, Mehran Kazemi, Lucas Gonzalez, Misha Khalman, Jakub Sygnowski, Alexandre Frechette, Charlotte Smith, Laura Culp, Lev Proleev, Yi Luan, Xi Chen, James Lottes, Nathan Schucher, Federico Lebron, Alban Rrustemi, Natalie Clay, Phil Crone, Tomas Kocisky, Jeffrey Zhao, Bartek Perz, Dian Yu, Heidi Howard, Adam Bloniarz, Jack W. Rae, Han Lu, Laurent Sifre, Marcello Maggioni, Fred Alcober, Dan Garrette, Megan Barnes, Shantanu Thakoor, Jacob Austin, Gabriel Barth-Maron, William Wong,Rishabh Joshi,Rahma Chaabouni Deeni Fatiha Arun Ahuja,Gaurav Singh Tomar Evan Senter,Martin Chadwick,Ilya Kornakov,Nithya Attaluri Inaki Iturrate,Ruibo Liu,Yunxuan Li Sarah Cogan Jeremy Chen Chao Jia Chenjie Gu Qiao Zhang Jordan Grimstad Ale Jakse Hartman Xavier Garcia Thanumalayan Sankaranarayana PillaiJacob Devlin Michael LaskinDiego de Las Casas,Dasha ValterConnie TaoLorenzo BlancoAdria Puigdomenech BadiaDavid Reitter Mianna Chen Jenny Brennan Clara Rivera,Sergey BrinShariq Iqbal,Gabriela Surita Jane Labanowski Abhi Rao Stephanie Winkler Emilio Parisotto Yiming Gu Kate Olszewka Ravi Addanki Antoine Miech Annie Louis Denis Teplyashin Geoff Brown Elliot Catt Jan BalaguerJackie Xiang,Pidong Wang,Zoe Ashwood,Anton BriukhovAlbert WebsonSanjay GanapathySmit Sanghavi Ajay Kannan Ming-Wei ChangAxel StjerngrenJosip DjolongaYuting SunAnkur Bapna Matthew Aitchison Pedram Pejman Henryk Michalewski Tianhe Yu Cindy Wang,Juliette Love Junwhan Ahn Dawn Bloxwich,Kehang Han Peter Humphreys Thibault" + ], + "bbox": [ + 171, + 103, + 825, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Sellam, James Bradbury, Varun Godbole, Sina Samangooei, Bogdan Damoc, Alex Kaskasoli, Sebastien M. R. Arnold, Vijay Vasudevan, Shubham Agrawal, Jason Riesa, Dmitry Lepikhin, Richard Tanburn, Srivatsan Srinivasan, Hyeontaek Lim, Sarah Hodgkinson, Pranav Shyam, Johan Ferret, Steven Hand, Ankush Garg, Tom Le Paine, Jian Li, Yujia Li, Minh Giang, Alexander Neitz, Zaheer Abbas, Sarah York, Machel Reid, Elizabeth Cole, Aakanksha Chowdhery, Dipanian Das, Dominika Rogozinska, Vitaliy Nikolaev, Pablo Spechmann, Zachary Nado, Lukas Zilka, Flavien Prost, Luheng He, Marianne Monteiro, Gaurav Mishra, Chris Welty, Josh Newlan, Dawei Jia, Miltiadis Allamanis, Clara Huiyi Hu, Raoul de Liedekerke, Justin Gilmer, Carl Saroufim, Shruti Rijhwani, Shaobo Hou, Disha Shrivastava, Anirudh Baddepudi, Alex Goldin, Adnan Ozturel, Albin Cassirer, Yunhan Xu, Daniel Sohn, Devendra Sachan, Reinald Kim Amplayo, Craig Swanson, Dessie Petrova, Shashi Narayan, Arthur Guez, Siddhartha Brahma, Jessica Landon, Miteyan Patel, Ruizhe Zhao, Kevin Villela, Luyu Wang, Wenhao Jia, Matthew Rahtz, Mai Giménez, Legg Yeung, James Keeling, Petko Georgiev, Diana Mincu, Boxi Wu, Salem Haykal, Rachel Saputro, Kiran Vodrahalli, James Qin, Zeynep Cankara, Abhanshu Sharma, Nick Fernando, Will Hawkins, Behnam Neyshabur, Solomon Kim, Adrian Hutter, Priyanka Agrawal, Alex Castro-Ros, George van den Driessche, Tao Wang, Fan Yang, Shuo yiin Chang, Paul Komarek, Ross McIlroy, Mario Lučić Guodong Zhang Wael Farhan Michael Sharman Paul Natsev Paul Michel, Yamini Bansal Siyuan Qiao Kris Cao Siamak Shakeri Christina Butterfield Justin ChungPaul Kishan Rubenstein Shivani Agrawal Arthur MenschKedar Soparkar Karel Lenc Timothy ChungAedan PopeLoren MaggioreJackie KayPriya JhakraShibo WangJoshua MaynezMary Phuong Taylor Tobin Andrea Tacchetti Maja TrebaczKevin RobinsonYash Katariya Sebastian Riedel Paige Bailey Kefan Xiao Nimesh Ghelani Lora Aroyo Ambrose Slone Neil Houlsby Xuehan Xiong Zhen Yang Elena Gribovskaya Jonas Adler Mateo Wirth Lisa Lee Music Li Thais Kagohara Jay Pavagadhi Sophie Bridgers Anna Bortsova Sanjay Ghemawat,Zafarali Ahmed Tianqi LiuRichard PowellVijay BolinaMariko InumaPolina ZablotskaiaJames Besley,Da-Woon ChungTimothy Dozat Ramona ComanescuXiance Si Jeremy Greer Guolong Su Martin Polacek Raphael Lopez Kaufman Simon Tokumine Hexiang HuElena Buchatskaya Yingjie Miao Mohamed Elhawaty Aditya SiddhantNenad Tomasev Jinwei XingChristina Greer Helen Miller Shereen Ashraf Aurko RoyZizhao ZhangAda Ma Angelos Filos Milos Besta Rory Blevins Ted Klimenko Chih-Kuan Yeh Soravit Changpinyo Jiaq MuOscar ChangMantas PajarskasCarrie Muir Vered Cohen Charline Le Lan Krishna Haridasan Amit Marathe Steven Hansen Sholto Douglas Rajkumar Samuel Mingqiu WangSophia AustinChang LanJiepu JiangJustin Chiu Jaime Alonso Lorenzo Lars Lowe Sjösund Sebastien Cevey,Zach Gleicher Thi Avrahami Anudhyan BoralHansa Srinivasan Vittorio Selo Rhys May Konstantinos Aisos ,Leonard HussenotLivio Baldini Soares Kate Baumli Michael B.ChangAdria RecasensBen CaineAlexander PritzelFilip PaveticFabio Pardo Anita Gergely Justin FryeVinay RamaseshDan Horgan Kartikeya Badola Nora Kassner Subhrajit Roy,Ethan DyerVictor Campos Campos Alex Tomala Yunhao TangDalia El Badawy Elspeth White Basil Mustafa Oran LangAbhishek JindalSharad Vikram Zhitao GongSergi Caelles Ross Hemsley Gregory Thornton Fangxiaoyu Feng Wojciech Stokowiec Ce ZhengPhoebe Thacker,Cagliar Unlu Zhishuai Zhang Mohammad SalehJames Svensson Max BileschiPiyush PatilAnkesh Anand Roman RingKaterina TsihlasArpi Vezer Marco Selvi Toby Shevlane,Mikel RodriguezTom KwiatkowskiSamira Daruki Keran RongAllan Dafoe Nicholas FitzGerald Keren Gu-Lemberg Mina Khan Lisa Anne Hendricks Marie Pelllat,Vladimir FeinbergJames Cobon-KerrTara Sainath Maribeth Rauh Sayed Hadi Hashemi Richard Ives Yana Hasson Eric Noland Yuan Cao Nathan Byrd Le Hou Qingze Wang Thibault Sottiaux Michela Paganini Jean-Baptiste Lespiau Alexandre Moufarek Samer Hassan Kaushik Shivakumar Joost van AmersfoortAmol Mandhane Pratik Joshi Anirudh Goyal Matthew TungAndrew BrockHannah SheahanVedant MisraCheng LiNemanja Rakicevic Mostafa Deghhani Fangyu Liu Sid Mittal Junhyuk Oh Seb Noury Eren Sezener,Fantine Huot Matthew Lamm Nicola De Cao Charlie Chen Sidharth Mudgal Romina Stella Kevin Brooks Gautam Vasudevan Chenxi Liu Mainak Chain Nivedita Melinkeri Aaron Cohen Venus Wang Kristie Seymour,Sergey Zubkov,Rahul Goel Summer Yue,Sai Krishnakumaran,Brian Albert Nate Hurley Motoki Sano Anhad MohananeyJonah Joughin Egor Filonov Tomasz Kepa Yomna Eldawy Jiawern Lim Rahul Rishi Shirin Badiezadegan Taylor Bos Jerry ChangSanil Jain Sri Gayatri Sundara Padmanabhan Subha Puttagunta Kalpesh Krishna Leslie Baker Norbert Kalb,Vamsi Bedapudi Adam Kurzrok Shuntong Lei Anthony Yu Oren Litvin Xiang Zhou Zhichun WuSam SobellAndrea SicilianoAlan Papir Robby NealeJonas Bragagnolo Tej Toor Tina ChenValentin AnklinFeiran WangRichie FengMilad Gholami Kevin LingLijuan", + "bbox": [ + 189, + 103, + 823, + 920 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Liu, Jules Walter, Hamid Moghaddam, Arun Kishore, Jakub Adamek, Tyler Mercado, Jonathan Mallinson, Siddhinita Wandekar, Stephen Cagle, Eran Ofek, Guillermo Garrido, Clemens Lombriser, Maksim Mukha, Botu Sun, Hafeezul Rahman Mohammad, Josip Matak, Yadi Qian, Vikas Peswani, Pawe Janus, Quan Yuan, Leif Schelin, Oana David, Ankur Garg, Yifan He, Oleksii Duzhyi, Anton Algmyr, Timothee Lottaz, Qi Li, Vikas Yadav, Luyao Xu, Alex Chinien, Rakesh Shivanna, Aleksandr Chuklin, Josie Li, Carrie Spadine, Travis Wolfe, Kareem Mohamed, Subhabrata Das, Zihang Dai, Kyle He, Daniel von Dincklage, Shyam Upadhyay, Akanksha Maurya, Luyan Chi, Sebastian Krause, Khalid Salama, Pam G Rabinovitch, Pavan Kumar Reddy M, Aarush Selvan, Mikhail Dektiarev, Golnaz Ghiasi, Erdem Guven, Himanshu Gupta, Boyi Liu, Deepak Sharma, Idan Heimlich Shtacher, Shachi Paul, Oscar Akerlund, François-Xavier Aubet, Terry Huang, Chen Zhu, Eric Zhu, Elico Teixeira, Matthew Fritze, Francesco Bertolini, LianaEleonora Marinescu, Martin Bolle, Dominik Paulus, Khyatti Gupta, Tejasi Latkar, Max Chang Jason Sanders, Roopa Wilson, Xuewei Wu, Yi-Xuan Tan, Lam Nguyen Thiet, Tulsee Doshi Sid Lall, Swaroop Mishra, Wanming Chen, Thang Luong, Seth Benjamin Jasmine Lee, Ewa Andrejczuk, Dominik Rabiej, Vipul Ranjan, Krzysztof Styrc,Pengcheng Yin, Jon Simon, Malcolm Rose Harriott, Mudit Bansal, Alexei Robsky, Geoff Bacon, David Greene, Daniil Mirylenka Chen Zhou, Obaid Sarvana, Abhimanyu Goyal, Samuel Andermatt, Patrick Siegler, Ben Horn Assaf Israel, Francesco Pongetti Chih-Wei \"Louis\" Chen Marco Selvatici Pedro Silva Kathie Wang Jackson Tolins Kelvin Guu Roey YogevXiaochen Cai Alessandro Agostini,Maulik Shah,Hung Nguyen Noah O Donnaile,Sebastien Pereira Linda Friso Adam Stambler Adam KurzrokChenkai Kuang Yan Romanikhin Mark Geller ZJ Yan Kane Jang Cheng-Chun Lee Wojciech FicaEric Malmi Qijun Tan Dan Banica,Daniel Balle Ryan Pham,Yanping Huang Diana Avram Hongzhi Shi Jasot Singh Chris Hidey Niharika Ahuja Pranab SaxenaDan Dooley Srividya Pranavi PotharajuEileen ONeill AnandGokulchandranRyan FoleyKai Zhao Mike DusenberryYuan Liu Pulkit Mehta Raga Kotikalapudi Chalance Safranek-Shrader Andrew Goodman Joshua Kessinger,Eran Globen Prateek Kolhar Chris Gorgolewski Ali Ibrahim Yang SongAli Eichenbaum Thomas Brovelli Sahitya Potluri Preethi LahotiCip Baetu Ali Ghorbani Charles ChenAndy CrawfordShalini PalMukund Sridhar Petru GuritaAsier Mujika Igor Petrovski Pierre-Louis CedozChenmei Li Shiyuan Chen Niccolo Dal Santo Siddharth Goyal Jitesh Punjabi Karthik Kappaganthu Chester KwakPallavi LV Sarmishta Velury Himadri Choudhury Jamie Hall Premal Shah Ricardo Figueira Matt Thomas Minjie Lu Ting Zhou Chintu Kumar Thomas Jurdi Sharat Chikkerur Yenai Ma Adams Yu Soo KwakVictor AhdelSujeevan RajayogamTravis ChomaFei Liu Aditya Barua Colin Ji,Ji Ho Park Vincent HellendoornAlex Bailey Taylan Bilal Huanjie Zhou Mehrdad Khatir Charles Sutton Wojciech Rzadkowski Fiona Macintosh Konstantin Shagin Paul Medina Chen Liang Jinjing Zhou Pararth Shah Yingying Bi Attila Dankovics Shipra Banga Sabine Lehmann Marissa Bredesen Zifan Lin John Eric Hoffmann Jonathan Lai Raynald Chung Kai Yang Nihal Balani Arthur Brazinskas Andrei Sozanschi Matthew Hayes Hector Fernandez Alcalde Peter Makarov Will Chen Antonio Stella Liselotte Snijders Michael Mandl Ante Karrman Pawel Nowak Xinyi Wu Alex Dyck Krishnan Vaidyanathan Raghavender R Jessica Mallet Mitch Rudominer Eric JohnstonSushil Mittal Akhil Udathu Janara Christensen,Vishal Verma,Zach Irving Andreas Santucci Gamaleldin Elsayed Elnaz Davoodi Marin Georgiev Ian Tenney Nan Hua Geoffrey Cideron Edouard Leurent Mahmoud Alnahlawi Ionut Georgescu Nan Wei Ivy ZhengDylan Scandinaro Heinrich Jiang Jasper Snoke Mukund Sundararajan Xuezhi WangZack Ontiveros Itay Karo Jeremy Cole Vinu Rajashekhar Lara Tumeh Eyal Ben-David Rishub Jain Jonathan Uesato Romina Datta Oskar Bunyan Shimu Wu John Zhang Piotr Stanczyk Ye Zhang David Steiner Subhajit Naskar Michael Azzam Matthew Johnson Adam Paszke Chung-Cheng Chiu Jaume Sanchez Elias Afroz Mohiuddin Faizan Muhammad Jin Miao Andrew Lee Nino Vieillard Jane Park Jiageng Zhang Jeff Stanway Drew Garmon Abhijit Karmarkar Zhe Dong Jong LeeAviral Kumar Luowei Zhou Jonathan Evens William Isaac Geoffrey Irving Edward Loper Michael Fink Isha Arkatkar Nanxin Chen Izhak Shafran Ivan Petrychenko Zhe Chen Johnson Jia Anselm Levskaya Zhenkai Zhu Peter Grabowski Yu Mao Alberto Magni Kaisheng Yao Javier Snader,Norman Casagrande Evan PalmerPaul SuganthanAlfonso Castano Irene Giannoumis Wooyeol Kim Mikolaj Rybinski Ashwin Sreevatsa Jennifer Prendki David Soergel Adrian Goedeckemeyer Willi Gierke Mohsen Jafari Meenu Gaba Jeremy Wiesner Diana Gage Wright Yawen Wei Harsha Vashisht Yana Kulizhskaya Jay Hoover Maigo Le Lu Li Chimezie Iwuanyanwu Lu Liu Kevin Ramirez Andrey Khorlin Albert Cui Tian LIN Marcus Wu Ricardo Aguilar Keith Pallo Abhishek Chakladar Ginger Perng Elena Allica Abellan Mingyang Zhang Ishita Dasgupta Nate Kushman Ivo Penchev Alena Repina Xihui Wu Tom", + "bbox": [ + 187, + 103, + 823, + 922 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 46 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "van der Weide, Priya Ponnapalli, Caroline Kaplan, Jiri Simsa, Shuangfeng Li, Olivier Doussé, Fan Yang, Jeff Piper, Nathan Ie, Rama Pasumarthi, Nathan Lintz, Anitha Vijayakumar, Daniel Andor, Pedro Valenzuela, Minnie Lui, Cosmin Paduraru, Daiyi Peng, Katherine Lee, Shuyuan Zhang, Somer Greene, Duc Dung Nguyen, Paula Kurylowicz, Cassidy Hardin, Lucas Dixon, Lili Janzer, Kiam Choo, Ziqiang Feng, Biao Zhang, Achintya Singhal, Dayou Du, Dan McKinnon, Natasha Antropova, Tolga Bolukbasi, Orgad Keller, David Reid, Daniel Finchelstein, Maria Abi Raad, Remi Crocker, Peter Hawkins, Robert Dadashi, Colin Gaffney, Ken Franko, Anna Bulanova, Rémi Leblond, Shirley Chung, Harry Askham, Luis C. Cobo, Kelvin Xu, Felix Fischer, Jun Xu, Christina Sorokin, Chris Alberti, Chu-Cheng Lin, Colin Evans, Alek Dimitriev, Hannah Forbes, Dylan Banarse, Zora Tung, Mark Omernick, Colton Bishop, Rachel Sterneck, Rohan Jain, Jiawei Xia, Ehsan Amid, Francesco Piccinno, Xingyu Wang, Praseem Banzal, Daniel J. Mankowitz, Alex Polozov, Victoria Krakovna, Sasha Brown, Mohammad Hossein Bateni, Dennis Duan, Vlad Firoiu, Meghana Thotakuri, Tom Natan, Matthieu Geist, Ser tan Girgin, Hui Li, Jiayu Ye, Ofir Roval, Reiko Tojo, Michael Kwong, James Lee-Thorp, Christopher Yew, Danila Sinopalnikov, Sabela Ramos, John Mellor, Abhishek Sharma, Kathy Wu, David Miller, Nicolas Sonnerat, Denis Vnukov, Rory Greig, Jennifer Beattie, Emily Caveness, Libin Bai, Julian Eisenschlos, Alex Korchemniy, Tomy Tsai, Mimi Jasarevic, Weize Kong Phuong Dao Zeyu Zheng Frederick Liu Fan Yang Rui Zhu Tian Huey Teh Jason Sanmiya Evgeny Gladchenko Nejc Trdin Daniel Toyama Evan Rosen,Sasan Tavakkol Linting Xue Chen Elkind Oliver Woodman John Carpenter George Papamakarios Rupert Kemp Sushant Kafle Tanya Grunina Rishika Sinha Alice Talbert Diane Wu,Denese Owusu-Afriyie Cosmo DuChloe Thornton Jordi Pont-Tuset Pradyumna Narayana Jing Li Saaber Fatehi John WietingOmar Ajmeri Benigno Uria Yeongil Ko Laura Knight Amelie Heliou Ning Niu Shane Gu Chenxi Pang Yeqing Li,Nir Levine,Ariel StolovichRebeca Santamaria-FernandezSonam Goenk,Wenny Yustalim Robin Strudel Ali Elqursh Charlie Deck Hyo LeeZonglin Li Kyle Levin Raphael Hoffmann Dan Holtmann-Rice Olivier Bachem Sho Arora Christy Koh Soheil Hassas Yeganeh Siim Poder Mukarram Tariq Yanhua Sun Lucian Ionita Mojtaba Seyedhosseini Pouya Tafti Zhiyu Liu Anmol Gulati Jasmine Liu Xinyu Ye Bart Chrzaszcz Lily Wang Nikhil Sethi Tianrun Li Ben Brown Shreya Singh Wei Fan Aaron Parisi Joe Stanton Vinod Koverkathu Christopher A. Choquette-Choo Yunjie Li TJ Lu Abe Ittycheriah Prakash Shroff Mani Varadarajan Sanaz Bahargam Rob Willoughby David Gaddy Guillaume Desjardins Marco Cornero Brona Robenegek,Bhavishya Mittal Ben Albrecht Ashish Shenoy Fedor Moiseev Henrik Jacobsson Alireza Ghaffarkhah,Morgane Riviere Alanna Walton Clément Crepy Alicia Parrish Zongwei ZhouClement Farabet Carey Radebaugh Praveen Srinivasan Claudia van der Salm Andreas Fidjeland Salvatore Scellato Eri Latorre-Chimoto Hanna Klimczak-Plucinska David Bridson Dario de Cesare Tom Hudson Piermaria Mendolicchio Lexi Walker Alex Morris Matthew Mauger Alexey Guseynov Alison Reid Seth Odoom Lucia Loher Victor Cotruta Madhavi Yenugula Dominik Grewe Anastasia Petrushkina Tom Duerig Antonio Sanchez Steve YadlowskyAmy Shen Amir Globerson,Lynette Webb Sahil Dua Dong Li Surya BhupatirajuDan Hurt Haroon Qureshi Ananth Agarwal Tomer Shani Matan Eyal Anuj Khare Shreyas Rammohan Belle Lei Wang Chetan Tekur Mihir Sanjay Kale Jinliang Wei Ruoxin Sang Brennan Saeta Tyler Liechty Yi Sun Yao Zhao Stephan Lee Pandu Nayak Doug Fritz Manish Reddy Vuyyyuru John Aslanides Nidhi Vyas Martin Wicke Xiao Ma Evgenii Eltsychev Nina Martin Hardie Cate James Manyika Keyvan Amiri Yelin Kim Xi Xiong Kai Kang Florian Luisier Nilesh Tripuraneni David Madras Mandy Guo Austin Waters Oliver Wang Joshua Ainslie Jason Baldridge Han Zhang Garima Pruthi Jakob Bauer Feng Yang Riham Mansour Jason Gelman Yang XuGeorge Polovets Ji Liu Honglong CaiWarren ChenXiangHai Sheng Emily Xue Sherjil Ozair Christof Angermueller Xiaowei Li Anoop Sinha Weiren Wang Julia Wiesinger Emmanueloul Koukoumidis Yuan Tian Anand Iyer Madhu Gurumurthy Mark Goldenson Parashar Shah MK Blake Hongkun Yu Anthony Urbanowicz Jennimaria Palomaki Chrisantha Fernando Ken Durden Harsh Mehta Nikola Momchev Elahe Rahimtoroghi Maria Georgaki Amit Raul Sebastian Ruder Morgan Redshaw Jinhyuk Lee Denny Zhou Komal Jalan Dinghua Li Blake Hechtman Parker Schuh Milad Nasr Kieran Milan Vladimir Mikulik Juliana Franco Tim Green Nam Nguyen Joe Kelley Aroma Mahendru Andrea Hu Joshua Howland Ben Vargas Jeffrey Hui Kshitij Bansal,Vikram Rao Rakesh Ghiya Emma Wang Ke Ye Jean Michel Sarr Melanie Moranski Preston Madeleine Elish Steve Li Aakash Kaku Jigar Gupta Ice Pasupat Da-Cheng Juan Milan Someswar Tejvi M., Xinyun Chen Aida Amini Alex Fabrikant Eric Chu Xuanyi Dong Amrutta Muthal Senaka Buthpitiya Sarthak Jauhari Nan Hua Urvashi Khandelwal Ayal Hitron Jie Ren Larissa Rinaldi Shahar Drath Avigail Dabush", + "bbox": [ + 189, + 103, + 823, + 920 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 46 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nan-Jiang Jiang, Harshal Godhia, Uli Sachs, Anthony Chen, Yicheng Fan, Hagai Taitelbaum, Hila Noga, Zhuyun Dai, James Wang, Chen Liang, Jenny Hamer, Chun-Sung Ferng, Chenel Elkind, Aviel Atias, Paulina Lee, Vít Listík, Mathias Carlen, Jan van de Kerkhof, Marcin Pikus, Krunoslav Zaher, Paul Müller, Sasha Zykova, Richard Stefanec, Vitaly Gatsko, Christoph Hirnschall, Ashwin Sethi, Xingyu Federico Xu, Chetan Ahuja, Beth Tsai, Anca Stefanoiu, Bo Feng, Keshav Dhandhania, Manish Katyal, Akshay Gupta, Atharva Parulekar, Divya Pitta, Jing Zhao, Vivaan Bhatia, Yashodha Bhavnani, Omar Alhadlaq, Xiaolin Li, Peter Danenberg, Dennis Tu, Alex Pine, Vera Filippova, Abhipso Ghosh, Ben Limonchik, Bhargava Urala, Chaitanya Krishna Lanka, Derik Clive, Yi Sun, Edward Li, Hao Wu, Kevin Hongtongsak, Ianna Li, Kalind Thakkar, Kanyush Omarov, Kushal Majmundar, Michael Alverson, Michael Kucharski, Mohak Patel, Mudit Jain, Maksim Zabelin, Paolo Pelagatti, Rohan Kohli, Saurabh Kumar, Joseph Kim, Swetha Sankar, Vineet Shah, Lakshmi Ramachandruni, Xiangkai Zeng, Ben Bariach, Laura Weidinger, Amar Subramanya, Sissie Hsiao, Demis Hassabis, Koray Kavukcuoglu, Adam Sadovsky, Quoc Le, Trevor Strohman, Yonghui Wu, Slav Petrov, Jeffrey Dean, and Oriol Vinyals. Gemini: A family of highly capable multimodal models, 2024.", + "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. Deep sparse rectifier neural networks. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 315-323, 2011.", + "Dirk Groeneveld, Iz Beltagy, Pete Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Harsh Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. Olmo: Accelerating the science of language models. arXiv preprint arXiv:2402.00838, 2024.", + "Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, et al. Textbooks are all you need. arXiv preprint arXiv:2306.11644, 2023.", + "Srinivasan Iyer, Xi Victoria Lin, Ramakanth Pasunuru, Todor Mihaylov, Daniel Simig, Ping Yu, Kurt Shuster, Tianlu Wang, Qing Liu, Punit Singh Koura, et al. Opt-iml: Scaling language model instruction meta learning through the lens of generalization. arXiv preprint arXiv:2212.12017, 2022.", + "Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1601-1611, Vancouver, Canada, July 2017. Association for Computational Linguistics. doi: 10.18653/v1/P17-1147. URL https://aclanthology.org/P17-1147.", + "Norman P. Jouppi, Cliff Young, Nishant Patil, David A. Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, Rick Boyle, Pierre-luc Cantin, Clifford Chao, Chris Clark, Jeremy Coriell, Mike Daley, Matt Dau, Jeffrey Dean, Ben Gelb, Tara Vazir Ghaemmaghami, Rajendra Gottipati, William Gulland, Robert Hagmann, C. Richard Ho, Doug Hogberg, John Hu, Robert Hundt, Dan Hurt, Julian Ibarz, Aaron Jaffey, Alek Jaworski, Alexander Kaplan, Harshit Khaitan, Daniel Killebrew, Andy Koch, Naveen Kumar, Steve Lacy, James Laudon, James Law, Diemthu Le, Chris Leary, Zhuyuan Liu, Kyle Lucke, Alan Lundin, Gordon MacKean, Adriana Maggiore, Maire Mahony, Kieran Miller, Rahul Nagarajan, Ravi Narayanaswami, Ray Ni, Kathy Nix, Thomas Norrie, Mark Omernick, Narayana Penukonda, Andy Phelps, Jonathan Ross, Matt Ross, Amir Salek, Emad Samadiani, Chris Severn, Gregory Sizikov, Matthew Snelham, Jed Souter, Dan Steinberg, Andy Swing, Mercedes Tan, Gregory Thorson, Bo Tian, Horia Toma, Erick Tuttle, Vijay Vasudevan, Richard Walter, Walter Wang, Eric Wilcox, and Doe Hyun Yoon. In-Datacenter Performance Analysis of a Tensor Processing Unit. In Proceedings of the 44th Annual International Symposium on Computer Architecture ISCA, 2017.", + "Najoung Kim, Sebastian Schuster, and Shubham Toshniwal. Code pretraining improves entity tracking abilities of language models. arXiv preprint arXiv:2405.21068, 2024.", + "Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, Dzmitry Bahdanau, Leandro von Werra, and Harm de Vries. The stack: 3 tb of permissively licensed source code. Preprint, 2022." + ], + "bbox": [ + 171, + 103, + 823, + 924 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. doi: 10.1162/tacl_a_00276. URL https://doi.org/10.1162/tacl_a_00276.", + "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6086-6096, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1612. URL https://www.aclweb.org/anthology/P19-1612.", + "Raymond Li, Loubna Ben Allal, Yangtian Zi, Niklas Muennighoff, Denis Kocetkov, Chenghao Mou, Marc Marone, Christopher Akiki, Jia Li, Jenny Chim, et al. Starcoder: may the source be with you! arXiv preprint arXiv:2305.06161, 2023a.", + "Yuanzhi Li, Sebastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023b.", + "Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, et al. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110, 2022.", + "Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. G-eval: Nlg evaluation using gpt-4 with better human alignment, 2023.", + "Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. arXiv, abs/2305.13169, 2023.", + "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. URL https://arxiv.org/abs/1711.05101.", + "Anton Lozhkov, Loubna Ben Allal, Leandro von Werra, and Thomas Wolf. Fineweb-edu, May 2024. URL https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu.", + "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help llms reasoning? arXiv preprint arXiv:2309.16298, 2023.", + "Aman Madaan, Dheeraj Rajagopal, Niket Tandon, Yiming Yang, and Antoine Bosselut. Conditional set generation using seq2seq models. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 4874-4896, Abu Dhabi, United Arab Emirates, December 2022a. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.324.", + "Aman Madaan, Shuyan Zhou, Uri Alon, Yiming Yang, and Graham Neubig. Language models of code are few-shot commonsense learners. arXiv preprint arXiv:2210.07128, 2022b.", + "Max Marion, Ahmet Üstün, Luiza Pozzobon, Alex Wang, Marzieh Fadaee, and Sara Hooker. When less is more: Investigating data pruning for pretraining llms at scale, 2023. URL https:// arxiv.org/abs/2309.04564.", + "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and cloze evaluation for deeper understanding of commonsense stories. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 839-849, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-1098. URL https://aclanthology.org/N16-1098.", + "Niklas Muennighoff, Alexander M Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models. arXiv preprint arXiv:2305.16264, 2023a." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Niklas Muennighoff, Alexander M. Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models, 2023b. URL https://arxiv.org/abs/2305.16264.", + "Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15991-16111, Toronto, Canada, July 2023c. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891.", + "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35: 27730-27744, 2022.", + "Jupinder Parmar, Shrimai Prabhumoye, Joseph Jennings, Mostofa Patwary, Sandeep Subramanian, Dan Su, Chen Zhu, Deepak Narayanan, Aastha Jhunjunwala, Ayush Dattagupta, Vibhu Jawa, Jiwei Liu, Ameya Mahabaleshwarkar, Osvald Nitski, Annika Brundyn, James Maki, Miguel Martinez, Jiaxuan You, John Kamalu, Patrick LeGresley, Denys Fridman, Jared Casper, Ashwath Aithal, Oleksii Kuchaiev, Mohammad Shoeybi, Jonathan Cohen, and Bryan Catanzaro. Nemotron-4 15b technical report, 2024. URL https://arxiv.org/abs/2402.16819.", + "Guilherme Penedo, Quentin Malartic, Daniel Hesslow, Ruxandra Cojocaru, Alessandro Cappelli, Hamza Alobeidli, Baptiste Pannier, Ebtesam Almazrouei, and Julien Launay. The refined web dataset for falcon llm: Outperforming curated corpora with web data, and web data only, 2023.", + "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.", + "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021.", + "Jack W. Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, Eliza Rutherford, Tom Hennigan, Jacob Menick, Albin Cassirer, Richard Powell, George van den Driessche, Lisa Anne Hendricks, Maribeth Rauh, Po-Sen Huang, Amelia Glaese, Johannes Welbl, Sumanth Dathathri, Saffron Huang, Jonathan Uesato, John Mellor, Irina Higgins, Antonia Creswell, Nat McAleese, Amy Wu, Erich Olsen, Siddhant Jayakumar, Elena Buchatskaya, David Budden, Esme Sutherland, Karen Simonyan, Michela Paganini, Laurent Sifre, Lena Martens, Xiang Lorraine Li, Adhiguna Kuncoro, Aida Nematzadeh, Elena Gribovskaya, Domenic Donato, Angeliki Lazaridou, Arthur Mensch, Jean-Baptiste Lespiau, Maria Tsimpoukelli, Nikolai Grigorev, Doug Fritz, Thibault Sottiaux, Mantas Pajarskas, Toby Pohlen, Zhitao Gong, Daniel Toyama, Cyprien de Masson d'Autume, Yu-jia Li, Tayfun Terzi, Vladimir Mikulik, Igor Babuschkin, Aidan Clark, Diego de Las Casas, Aurelia Guy, Chris Jones, James Bradbury, Matthew Johnson, Blake Hechtman, Laura Weidinger, Iason Gabriel, William Isaac, Ed Lockhart, Simon Osindero, Laura Rimell, Chris Dyer, Oriol Vinyals, Kareem Ayoub, Jeff Stanway, Lorrayne Bennett, Demis Hassabis, Koray Kavukcuoglu, and Geoffrey Irving. Scaling language models: Methods, analysis & insights from training gopher, 2022. URL https://arxiv.org/abs/2112.11446.", + "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, 2019.", + "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, abs/1910.10683, 2020." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In The Second Tiny Papers Track at ICLR 2024.", + "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale, 2019.", + "Maarten Sap, Hannah Rashkin, Derek Chen, Ronan LeBras, and Yejin Choi. Socialiaq: Common-sense reasoning about social interactions. arXiv, abs/1904.09728, 2019.", + "Minjoon Seo, Tom Kwiatkowski, Ankur Parikh, Ali Farhadi, and Hannaneh Hajishirzi. Phrase-indexed question answering: A new challenge for scalable document comprehension. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 559-564, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1052. URL https://aclanthology.org/D18-1052.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, YK Li, Yu Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "Noam Shazeer. Glu variants improve transformer, 2020. URL https://arxiv.org/abs/2002.05202.", + "Luísa Shimabucoro, Sebastian Ruder, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. Llm see, llm do: Guiding data generation to target non-differentiable objectives, 2024. URL https://arxiv.org/abs/2407.01490.", + "Shivalika Singh, Freddie Vargus, Daniel Dsouza, Borje F. Karlsson, Abinaya Mahendiran, Wei-Yin Ko, Herumb Shandilya, Jay Patel, Deividas Mataciunas, Laura OMahony, Mike Zhang, Ramith Hettiarachchi, Joseph Wilson, Marina Machado, Luisa Souza Moura, Dominik Krzeminski, Hakimeh Fadaei, Irem Ergun, Ifeoma Okoh, Aisha Alaagib, Oshan Mudannayake, Zaid Alyafeai, Vu Minh Chien, Sebastian Ruder, Surya Guthikonda, Emad A. Alghamdi, Sebastian Gehrmann, Niklas Muennighoff, Max Bartolo, Julia Kreutzer, Ahmet Üstün, Marzieh Fadaee, and Sara Hooker. Aya dataset: An open-access collection for multilingual instruction tuning. arXiv preprint arXiv:2402.06619, 2024.", + "Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama, 2023. URL https://huggingface.co/datasets/cerebras/SlimPajama-627B.", + "Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023.", + "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. arXiv, abs/2307.09288, 2023.", + "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Superglue: A stickier benchmark for general-purpose language understanding systems, 2020. URL https://arxiv.org/abs/1905.00537." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingofflolz/mesh-transformer-jax, May 2021.", + "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13484-13508, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.acl-long.754.", + "Johannes Welbl, Nelson F Liu, and Matt Gardner. Crowdsourcing multiple choice science questions. pp. 94-106, September 2017. doi: 10.18653/v1/W17-4413. URL https://aclanthology.org/W17-4413.", + "Wikipedia. Wikipedia downloads. URL https://dumps.wikipedia.org.", + "BigScience Workshop, :, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagne, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurencion, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, Dragomir Radev, Eduardo Gonzalez Ponferrada, Efrat Levkovizh, Ethan Kim, Eyal Bar Natan, Francesco De Toni, Gerard Dupont, German Kruszewski, Giada Pistilli, Hady Elsahar, Hamza Benyamina, Hieu Tran, Ian Yu, Idris Abdulmumin, Isaac Johnson, Itziar Gonzalez-Dios, Javier de la Rosa, Jenny Chim, Jesse Dodge, Jian Zhu, Jonathan Chang, Jorg Frohberg, Joseph Tobing, Joydeep Bhattacharjee, Khalid Almubarak, Kimbo Chen, Kyle Lo, Leandro Von Werra, Leon Weber, Long Phan, Loubna Ben allal, Ludovic Tanguy, Manan Dey, Manuel Romero Munoz, Maraim Masoud, Maria Grandury, Mario Saško, Max Huang, Maximin Coavoux, Mayank Singh, Mike Tian-Jian Jiang, Minh Chien Vu, Mohammad A. Jauhar, Mustafa Ghaleb, Nishant Subramani, Nora Kassner, Nurulaqilla Khamis, Olivier Nguyen, Omar Espejel, Ona de Gibert, Paulo Villegas, Peter Henderson, Pierre Colombo, Priscilla Amuok, Quentin Lhoest, Rheza Harliman, Rishi Bommasani, Roberto Luis López, Rui Ribeiro, Salomey Osei, Sampa Pyysalo, Sebastian Nagel, Shamik Bose, Shamsuddeen Hassan Muhammad, Shanya Sharma, Shayne Longpre, Somaieh Nikpoor, Stanislav Silberberg, Suhas Pai, Sydney Zink, Tiago Timponi Torrent, Timo Schick, Tristan Thrush, Valentin Danchev Vassilina Nikoulina Veronika Laippala Violette Lepercq,Vrinda Prabhu,Zaid Alyafeai Zeerak Talat Arun Raja Benjamin HeinzerlingChenglei Si Davut Emre Taşar Elizabeth Salesky Sabrina J.Mielke Wilson Y. Lee Abheesht Sharma Andrea Santilli Antoine Chaffin Arnaud Stiegler Debajyoti Datta Eliza Szczechla Gunjan Chhablani Han WangHarshit Pandey,Hendrik Strobelt Jason Alan Fries Jos RozenLeo Gao Lintang Sutawika,M Saiful Bari Maged S.Al-shaibani Matteo Manica Nihal Nayak Ryan Teehan Samuel Albanie Sheng Shen Srulik Ben-David Stephen H.Bach Taewoon KimTali Bers Thibault Fevry Trishala Neeraj Urmish Thakker Vikas Raunak Xiangru Tang Zheng-Xin Yong Zhiqing Sun Shaked Brody Yallow Uri Hadar Tojarieh Adam Roberts Hyung Won Chung Jaesung TaoJason Phang Ofir Press Conglong Li Deepak Narayanan Hatim Bourfoune Jared Casper Jeff Rasley Max Ryabinin Mayank Mishra Minjia Zhang Mohammad Shoeybi Myriam Peyrounette Nicolas Patry Nouamane Tazi Omar Sanseviero Patrick von Platen Pierre Cornette Pierre François Lavallee Remi Lacroix Samyam Rajbhandari Sanchit Gandhi Shaden Smith Stephane Requena Suraj Patil Tim Dettmers Ahmed Baruwa Amanpreet Singh Anastasia Cheveleva Anne-Laure Ligozat Arjun Subramonian Aurélie Névol Charles Lovering Dan Garrette Deepak Tunuguntla Ehud Reiter Ekaterina Taktasheva Ekaterina Voloshina Eli Bogdanov Genta Indra Winata Hailey Schoelkopf Jan-Christoph Kalo Jekaterina Novikova Jessica Zosa Forde Jordan Clive Jungo Kasai Ken Kawamura Liam Hazan Marine Carpuat Miruna Clinciu Najoung Kim Newton Cheng Oleg Serikov Omer Antverg Oskar van der Wal Rui Zhang Ruochen Zhang Sebastian Gehrmann Shachar Mirkin Shani Pais Tatiana Shavrina Thomas Scialom Tian Yun Tomasz Limisiewicz Verena Rieser Vitaly Protasov Vladislav Mikhailov Yada Pruksachatkun Yonatan Belinkov Zachary Bamberger Zdenek Kasner Alice Rueda Amanda Pestana Amir Feizpour Armar Khan Amy Faranak Ana Santos Anthony" + ], + "bbox": [ + 171, + 103, + 825, + 924 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Hevia, Antigona Unldreaj, Arash Aghagol, Arezoo Abdollahi, Aycha Tammour, Azadeh Haji-Hosseini, Bahareh Behroozzi, Benjamin Ajibade, Bharat Saxena, Carlos Munoz Ferrandis, Daniel McDuff, Danish Contractor, David Lansky, Davis David, Douwe Kiela, Duong A. Nguyen, Edward Tan, Emi Baylor, Ezinwanne Ozoani, Fatima Mirza, Frankline Ononiwu, Habib Rezanejad, Hessie Jones, Indrani Bhattacharya, Irene Solaiman, Irina Sedenko, Isar Nejadgholi, Jesse Passmore, Josh Seltzer, Julio Bonis Sanz, Livia Dutra, Mairon Samagaio, Maraim Elbadri, Margot Mieskes, Marissa Gerchick, Martha Akinlolu, Michael McKenna, Mike Qiu, Muhammed Ghauri, Mykola Burynok, Nafis Abrar, Nazneen Rajani, Nour Elkott, Nour Fahmy, Olanrewaju Samuel, Ran An, Rasmus Kromann, Ryan Hao, Samira Alizadeh, Sarmad Shubber, Silas Wang, Sourav Roy, Sylvain Viguier, Thanh Le, Tobi Oyebade, Trieu Le, Yoyo Yang, Zach Nguyen, Abhinav Ramesh Kashyap, Alfredo Palasciano, Alison Callahan, Anima Shukla, Antonio Miranda-Escalada, Ayush Singh, Benjamin Beilharz, Bo Wang, Caio Brito, Chenxi Zhou, Chirag Jain, Chuxin Xu, Clémentine Fourrier, Daniel León Periñan, Daniel Molano, Dian Yu, Enrique Manjavacas, Fabio Barth, Florian Fuhrimann, Gabriel Altay, Giyaseddin Bayrak, Gully Burns, Helena U. Vrabec, Imane Bello, Ishani Dash, Jihyun Kang, John Giorgi, Jonas Golde, Jose David Posada, Karthik Rangasai Sivaraman, Lokesh Bulchandani, Lu Liu, Luisa Shinzato Madeleine Hahn de Bykhovetz, Maiko Takeuchi, Marc Pamies, Maria A Castillo, Marianna Nezhurina, Mario Sänger, Matthias Samwald, Michael Cullan, Michael Weinberg, Michiel De Wolf, Mina Mihaljcic, Minna Liu Moritz Freidank Myungsun KangNatasha Seelam Nathan Dahlberg,Nicholas Michio Broad,Nikolaus MuellnerPascale Fung Patrick Haller Ramya Chandrasekhar,Renata Eisenberg,Robert Martin,Rodrigo Canalli,Rosaline Su,Ruisi Su,Samuel Cahyawijaya,Samuele GardaShlok S Deshmukh,Shubanshu Mishra,Sid Kblawi,Simon Ott Sinee Sang-aroonsiri,Srishti Kumar Stefan Schweter,Sushil Bharati Tanmay LaudTheo Gigant Tomoya Kainuma,Wojciech Kusa,Yanis Labrak,Yash Shailesh Bajaj,Yash Venkatraman Yifan Xu,Yingxin XuYu Xu,Zhe TanZhongli XieZifan Ye,Mathilde Bras,Younes Belkada and Thomas Wolf.Bloom: A 176b-parameter open-access multilingual language model, 2023. URL https://arxiv.org/abs/2211.05100.", + "bbox": [ + 187, + 103, + 823, + 478 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? arXiv, abs/1905.07830, 2019.", + "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, Todor Mihaylov, Myle Ott, Sam Shleifer, Kurt Shuster, Daniel Simig, Punit Singh Koura, Anjali Sridhar, Tianlu Wang, and Luke Zettlemoyer. Opt: Open pre-trained transformer language models, 2022.", + "Xinlu Zhang, Zhiyu Zoey Chen, Xi Ye, Xianjun Yang, Lichang Chen, William Yang Wang, and Linda Ruth Petzold. Unveiling the impact of coding data instruction fine-tuning on large language models reasoning. arXiv preprint arXiv:2405.20535, 2024.", + "Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024.", + "Ahmet Üstün, Viraat Aryabumi, Zheng-Xin Yong, Wei-Yin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. Aya model: An instruction finetuned open-access multilingual language model, 2024." + ], + "bbox": [ + 171, + 487, + 823, + 750 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "ETHICS STATEMENT AND LIMITATIONS", + "text_level": 1, + "bbox": [ + 171, + 102, + 503, + 118 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "While we systematically study the impact of code data on downstream natural language tasks, we do not study its impact on safety and bias. Additionally, given the nature of pre-training and the number of ablations we have conducted we were limited by the scale of larger model sizes due to prohibitive compute costs.", + "bbox": [ + 169, + 133, + 826, + 191 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "REPRODUCIBILITY", + "text_level": 1, + "bbox": [ + 171, + 209, + 333, + 224 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We provide details about our data mixture (Section 2.1), data filtering (Appendix C.1, C.2, C.3), evaluation (Section 2.2, Appendix A) and training (Section 2.3) setups. We believe these details provide a clear picture on how to obtain our data setup, model ablations and evaluation results.", + "bbox": [ + 169, + 241, + 823, + 284 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "A EVALUATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 303, + 398, + 319 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We briefly describe the details of our evaluation benchmarks and the composite datasets used for each category below:", + "bbox": [ + 169, + 334, + 823, + 363 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. World knowledge. These benchmarks aim to measure world knowledge, testing knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. We report the average exact match scores for both these benchmarks.", + "2. Natural language reasoning. The Natural language (NL) reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering (Clark et al., 2019; Seo et al., 2018; Welbl et al., 2017; Sap et al., 2019; Choi et al., 2018), natural language inference (NLI) (Wang et al., 2020; de Marneffte et al., 2019; Wang et al., 2020), sentence completion (Mostafazadeh et al., 2016; Zellers et al., 2019), co-reference resolution (Sakaguchi et al., 2019) and general intelligence (Clark et al., 2018). We include a full list of the constituent benchmarks in Table 1. We report the average accuracy scores across all benchmarks.", + "3. Code. While our main focus is general performance, we also want to measure any changes to code generation performance. For code benchmarks, we focus on the function completion task. We evaluate on HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021). We report the average pass@1 scores of these benchmarks." + ], + "bbox": [ + 210, + 375, + 826, + 607 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "B SUMMARY RESULTS FOR PRE-TRAINING RECIPES", + "text_level": 1, + "bbox": [ + 171, + 630, + 625, + 646 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Summary results are shown in Table 2.", + "bbox": [ + 171, + 661, + 429, + 676 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C CODE-DATASETS FILTERING", + "text_level": 1, + "bbox": [ + 171, + 696, + 442, + 712 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.1 QUALITY FILTERS", + "text_level": 1, + "bbox": [ + 171, + 727, + 344, + 742 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In addition to the dedduplication and quality filtering applied on the GitHub scrapes by Starcoder for The Stack dataset (Li et al., 2023a), we apply filters to remove documents with greater than 1000 float numbers, with instances of the string $0 \\times$ that are lists of top-level domains, and with 'generated by' in the first 400 characters", + "bbox": [ + 169, + 753, + 826, + 810 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.2 PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET", + "text_level": 1, + "bbox": [ + 171, + 825, + 705, + 840 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Programming languages included in our version of The Stack dataset are present in Table 3", + "bbox": [ + 171, + 852, + 769, + 867 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.3 MARKUP-STYLE PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET", + "text_level": 1, + "bbox": [ + 171, + 883, + 821, + 898 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Markup-style languages included in our version of The Stack dataset are in Table 4", + "bbox": [ + 171, + 909, + 718, + 924 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/370a2828b37def0568aff41c3df22bc6420744c2a2dc57342146cd3cfb410a08.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TaskDatasetMetric
WORLD KNOWLEDGE TASKS
Question AnsweringTriviaQA (Joshi et al., 2017)0-shotAcc.
NaturalQuestionsOpen (Lee et al., 2019)0-shotAcc.
NATURAL LANGUAGE REASONING
Question AnsweringBoolQ (Clark et al., 2019)0-shotAcc.
PiQA (Seo et al., 2018)0-shotAcc.
SciQ (Welbl et al., 2017)0-shotAcc.
SocialQA (Sap et al., 2019)0-shotAcc.
QUAC (Choi et al., 2018)0-shotAcc.
Natural Language InferenceSuperGLUE-CB (Wang et al., 2020; de Marneffé et al., 2019)0-shotAcc.
SuperGLUE-COPA (Wang et al., 2020)0-shotAcc.
Sentence CompletionStoryCloze (Mostafazadeh et al., 2016)0-shotAcc.
HellaSwag (Zellers et al., 2019)0-shotAcc.
Coreference ResolutionWinogrande (Sakaguchi et al., 2019)0-shotAcc.
General IntelligenceARC-Easy (Clark et al., 2018)0-shotAcc.
TEXT GENERATION
Open-Ended GenerationDolly-200 (English) (Singh et al., 2024)0-shotwin-rate
CODE GENERATION
Function completionHumanEval (Chen et al., 2021)0-shotpass@1
MBPP (Austin et al., 2021)0-shotpass@1
", + "bbox": [ + 207, + 99, + 794, + 349 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/e436392147d7301cae44171b952a680b8907968533a98587efe33a7df44cfcdc.jpg", + "table_caption": [ + "Table 1: Datasets considered for evaluation: We conduct extensive evaluations across benchmarks detailed above. These provide valuable proxies for performance in natural language reasoning, world knowledge, open ended text generation, and code generation tasks." + ], + "table_footnote": [], + "table_body": "
Model VariantRecipeToken CountNatural LanguageCodeTotal Avg.
TextCodeReason.Know.Avg.
TEXT-ONLYPre-training400B-49.09.529.20.419.6
Cooldown+32B+8B54.111.132.64.423.2
BALANCED-ONLYPre-training200B200B51.88.130.09.023.0
Cooldown+32B+8B53.211.132.18.424.2
BALANCED→TEXTPre-training Init.100B100B52.07.429.67.822.4
Continue Pre-train.+180B+20B53.09.931.54.822.6
Cooldown+32B+8B54.910.932.95.823.9
CODE→TEXTPre-training Init.-200B44.71.523.115.520.6
Continue Pre-train.+180B+20B53.39.531.44.122.3
Cooldown+32B+8B52.110.331.27.523.3
", + "bbox": [ + 207, + 415, + 792, + 583 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 2: Model variants with the corresponding pre-training recipes: Pre-training recipes include initial pre-training, continued pre-training, and cooldown phases. Balanced $\\rightarrow$ Text achieves the best NL performance while Balanced-only performs significantly better in code generation.", + "bbox": [ + 169, + 592, + 823, + 636 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "D LLM JUDGE PROMPT AND PREAMBLE FOR WIN-RATES", + "text_level": 1, + "bbox": [ + 171, + 660, + 666, + 676 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Preamble", + "text_level": 1, + "bbox": [ + 171, + 691, + 243, + 705 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "You are a helpful following assistant whose goal is to select the preferred", + "bbox": [ + 169, + 714, + 812, + 742 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "(least wrong) output for a given instruction.", + "bbox": [ + 174, + 742, + 612, + 757 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Prompt", + "text_level": 1, + "bbox": [ + 171, + 763, + 232, + 777 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Which of the following answers is the best one for the given instruction.", + "bbox": [ + 169, + 784, + 761, + 811 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "A good answer should follow these rules:", + "bbox": [ + 171, + 813, + 563, + 825 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) It should have correct reasoning,", + "2) It should answer the request in the instruction,", + "3) It should be factually correct and semantically comprehensible,", + "4) It should be grammatically correct and fluent." + ], + "bbox": [ + 171, + 827, + 816, + 882 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Instruction: instruction", + "bbox": [ + 171, + 896, + 419, + 910 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/f4a3e903a014576d8d0554a3c729ae6487310106d13cba7cbf9076c780ed64d3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Language NameProportion of total code documents
java15.54
javascript15.29
php12.46
python9.60
c-sharp8.30
typescript7.92
c6.63
cpp4.91
go3.49
ruby2.69
shell1.82
kotlin1.76
Swift1.52
Vue1.48
rust1.00
scala0.94
JSX0.83
sql0.74
dart0.72
makefile0.53
lua0.47
haskell0.45
smalltalk0.43
tex0.37
clojure0.10
", + "bbox": [ + 323, + 101, + 674, + 439 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/64918e1134cae412b8c538546ecc7dffba51409b6391f52ca845a844ccf1fdce.jpg", + "table_caption": [ + "Table 3: Programming languages included in our version of The Stack dataset" + ], + "table_footnote": [], + "table_body": "
Language NameProportion of total code documents
markdown54.23
yaml10.77
json9.97
html8.57
css6.86
SCSS5.84
restructuredtext2.26
TOML1.25
rmarkdown0.02
Sass0.22
", + "bbox": [ + 303, + 494, + 694, + 645 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 4: Markup-style languages included in our version of The Stack dataset", + "bbox": [ + 241, + 655, + 756, + 672 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Answer (A): completion_a \nAnswer (B): completion_b \nFIRST provide a concise comparison of the two answers which explains \nwhich answer you prefer and why. \nSECOND, on a new line, state exactly one of 'Preferred: Answer (A)' or 'Preferred: Answer (B)' to indicate your choice of preferred response. \nYour response should use the format: \nComparison: \nPreferred: <'Answer (A)' or 'Answer (B)'>", + "guess_lang": "txt", + "bbox": [ + 169, + 715, + 803, + 926 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "E GENERATIVE WIN-RATES FOR IMPACT OF INITIALIZATION", + "text_level": 1, + "bbox": [ + 171, + 102, + 689, + 118 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/010a96056f4b958d13db52d5e1b4d70ef0db1a2846ea9f5553ec924432c3772b.jpg", + "image_caption": [ + "Figure 8: Impact of initialization on generative quality as judged by LLM-as-a-judge." + ], + "image_footnote": [], + "bbox": [ + 240, + 138, + 519, + 315 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/1f4ab37c52750ad830097a2cfd0cc3e97ed46650431d831913e3ea67803dac32.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 140, + 754, + 314 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "F EVALUATION OF 470M COOLDOWN MODELS ON GSM8K", + "text_level": 1, + "bbox": [ + 171, + 375, + 683, + 390 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/937a3c8cec3999d07174f00929dff46a62dc5ae9893d57e4b42b002b5eea7a9d.jpg", + "image_caption": [ + "Mathematical Evaluation" + ], + "image_footnote": [], + "bbox": [ + 362, + 431, + 635, + 617 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/2198d10d441a490dce596eb6cef60a5843dc892d8c541979cc1eb9043d2ab53d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 625, + 354, + 640 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "text $\\rightarrow$ no-code cooldown", + "bbox": [ + 362, + 626, + 578, + 640 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/ed8bfb10c083e92fdfdb6076cad5798956f4b34fe94d82ba856ec5a1e41f9886.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 315, + 643, + 354, + 659 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "text $\\rightarrow$ cooldown w/ code", + "bbox": [ + 362, + 645, + 576, + 659 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/60c004598d25c1f7ce4d49f2b2dce9ad16115bea7b6b2420dd89f392de91625b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 315, + 664, + 354, + 678 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "balanced $\\rightarrow$ text $\\rightarrow$ no-code cooldown", + "bbox": [ + 362, + 664, + 679, + 678 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/bcdd78c6a09e68b0aa568fb31013bf1df36f16e3ac2d7d2ab259b146a97c8ece.jpg", + "image_caption": [ + "Figure 9: Evaluation of 470M cooldown models on GSM8K Including code in any stage of the pre-training improves performance compared to the model where no code has been seen in any of the training stages: pre-training, continual pre-training and cooldown. The most performant model in this comparison has seen code in all stages including cooldown where it leads a significant improvement (from 2.9 to 4.12, +42% relative gain)." + ], + "image_footnote": [], + "bbox": [ + 315, + 683, + 354, + 698 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "balanced $\\rightarrow$ text $\\rightarrow$ cooldown w/ code", + "bbox": [ + 362, + 683, + 679, + 698 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_model.json b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2304e9471b2efdf6e2be1c84b5023088be77bb63 --- /dev/null +++ b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_model.json @@ -0,0 +1,3620 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.757, + 0.149 + ], + "angle": 0, + "content": "TO CODE, OR NOT TO CODE? EXPLORING IMPACT OF CODE IN PRE-TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.17, + 0.703, + 0.217 + ], + "angle": 0, + "content": "Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, Sara Hooker {viraat, ahmetustun, sarahooker}@cohere.com" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.251, + 0.548, + 0.266 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.286, + 0.77, + 0.538 + ], + "angle": 0, + "content": "Including code in the pre-training data mixture, even for models not specifically designed for code, has become a common practice in LLMs pre-training. While there has been anecdotal consensus among practitioners that code data plays a vital role in general LLMs' performance, there is only limited work analyzing the precise impact of code on non-code tasks. In this work, we systematically investigate the impact of code data on general performance. We ask \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation\". We conduct extensive ablations and evaluate across a broad range of natural language reasoning tasks, world knowledge tasks, code benchmarks, and LLM-as-a-judge win-rates for models with sizes ranging from 470M to 2.8B parameters. Across settings, we find a consistent results that code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact across all tasks. In particular, compared to text-only pre-training, the addition of code results in up to relative increase of \\(8.2\\%\\) in natural language (NL) reasoning, \\(4.2\\%\\) in world knowledge, \\(6.6\\%\\) improvement in generative win-rates, and a \\(12x\\) boost in code performance respectively. Our work suggests investments in code quality and preserving code during pre-training have positive impacts." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.57, + 0.341, + 0.587 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.604, + 0.827, + 0.677 + ], + "angle": 0, + "content": "The role of data has taken on critical significance in recent breakthroughs. State-of-the-art models highlight the importance of the pre-training data mixture, diversity of data sources (Brown et al., 2020; Longpre et al., 2023; Singh et al., 2024) combined with compute availability as key drivers on performance (Dubey et al., 2024; Üstün et al., 2024; Team et al., 2023; Aryabumi et al., 2024). A critical question is what properties of data impart the best general performance?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.681, + 0.828, + 0.768 + ], + "angle": 0, + "content": "Perhaps surprisingly, code is often included in pre-training even if a model is not explicitly intended to generate high-quality code. Code datasets differ significantly in terms of structure and textural characteristics from high-quality web datasets (Wikimedia; Raffel et al., 2019). Despite this, several previous generations of LLMs like PaLM (Chowdhery et al., 2022), Gopher (Rae et al., 2022) and Bloom (Workshop et al., 2023) that were not explicitly intended to support code generation, included code data together with high-quality natural language data in their pre-training mixture." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.772, + 0.829, + 0.927 + ], + "angle": 0, + "content": "In current state-of-the-art models, it is an accepted norm to not only include code data but further increase the proportion – for instance, Llama 3 (Dubey et al., 2024) has four times more code data in proportion (17%), of its pre-training mixture than Llama 2 (4.5%) (Touvron et al., 2023). While there has been consensus anecdotally among practitioners that code data plays a vital role in LLMs' performance, there has been only limited work analyzing the precise impact of code on non-code tasks. Prior work shows particular side benefits of the inclusion of code data, such as impact on scaling in limited data regime (Muennighoff et al., 2023a), entity tracking capabilities (Kim et al., 2024), and mathematical reasoning (Razeghi et al.). However, there has been no exhaustive study to date that systematically investigates the impact of code data on general performance. In this work, we ask \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation?\"" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.244 + ], + "angle": 0, + "content": "We embark on an exhaustive set of large-scale controlled pre-training experiments. This includes a consideration of where in the training process adding code is beneficial, code proportions, the role of scaling, and the quality and properties of code added. While a costly endeavor to perform these ablations in a rigorous way, we find consistent and valuable results that code provides critical improvements to non-code performance. In particular, compared to text-only pre-training, for our best variant, the addition of code results in relative increase of \\(8.2\\%\\) in natural language (NL) reasoning, \\(4.2\\%\\) in world knowledge, \\(6.6\\%\\) improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves NL reasoning by \\(3.7\\%\\), World knowledge by \\(6.8\\%\\), and code by \\(20\\%\\), relative to cooldown without code and leads to a \\(4.1\\%\\) additional win-rate increase." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.25, + 0.828, + 0.363 + ], + "angle": 0, + "content": "Here, several factors matter including getting the proportion of code correct, improving the quality of code by including synthetic code and code adjacent data such as commits, and leveraging code across multiple stages of training including during cooldown. Our results suggest code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact on performance. We conduct an extensive evaluation on a broad range of benchmarks, which cover world knowledge tasks, natural language reasoning, and code generation, as well as LLM-as-a-judge win-rates. Across experiments on models ranging from 470 million to 2.8 billion parameter models, we find the following detailed results:" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.372, + 0.825, + 0.442 + ], + "angle": 0, + "content": "1. Code provides critical improvements to non-code performance. Initialization with code pretrained models results in improved performance for natural language tasks. In particular, compared to text-only pre-training, for our best variant, the addition of code results in a relative increase of \\(8.2\\%\\) in NL reasoning, \\(4.2\\%\\) in world knowledge, \\(6.6\\%\\) improvement in generative win-rates, and a 12x boost in code performance respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.826, + 0.557 + ], + "angle": 0, + "content": "2. Code quality and properties matter. Using markup-style programming languages, code-adjacent datasets such as GitHub commits and synthetically generated code improves the performance in pre-training. In particular, training on a higher quality synthetically generated code dataset results in a \\(9\\%\\) and \\(44\\%\\) increase in natural language reasoning and code performance, respectively, compared to web-based code data in pre-training. Additionally, continual pre-training from a code model that includes synthetic data results in \\(1.9\\%\\) and \\(41\\%\\) relative increases in natural language reasoning and code performance respectively, compared to initialization from a code model that does not include code data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.56, + 0.825, + 0.63 + ], + "angle": 0, + "content": "3. Code in cooldown enables further improvement across all tasks. Including code data in pretraining cooldown, where high-quality datasets are up-weighted, leads to an increase of \\(3.6\\%\\) in NL reasoning, \\(10.1\\%\\) in world knowledge, and \\(20\\%\\) in code performance relative to no cooldown. More significantly, cooldown with code beats the baseline (no cooldown) by \\(52.3\\%\\) win-rates, where win-rates are \\(4.1\\%\\) higher compared to cooldown without code." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.372, + 0.826, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.65, + 0.345, + 0.665 + ], + "angle": 0, + "content": "2 METHODOLOGY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.679, + 0.825, + 0.723 + ], + "angle": 0, + "content": "We describe the details of our Pre-training Data (§ 2.1), Evaluation (§ 2.2), Training and Model details (§ 2.3). Figure 1 shows the high-level experimental framework. Precise details for each experiment and their results are presented in Section 3." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.738, + 0.363, + 0.752 + ], + "angle": 0, + "content": "2.1 PRE-TRAINING DATA" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.764, + 0.827, + 0.877 + ], + "angle": 0, + "content": "In this section, we describe the details of our pre-training and cooldown datasets. We aim to evaluate the role of code in pre-training, following current state-of-art practices. Hence, we consider pretraining runs that consist of two phases: 1) continued pretraining and 2) cooldown. Continued pre-training refers to training a model that is initialized from a pre-trained model and trained for a fixed token budget. cooldown (Team et al., 2023; Parmar et al., 2024) involves up-weighting high-quality datasets and annealing the learning rate for a relatively small number of tokens during the final stages of training. This up-weighting of high-quality datasets for a smaller amount of steps at the end of training can significantly boost model quality." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Text dataset. We use the SlimPajama pre-training corpus (Soboleva et al., 2023) as our source of natural language text data. SlimPajama is a de-duplicated, quality-filtered, multi-corpora, open-source dataset based on RedPajama-1.2T (Computer, 2023). SlimPajama consists of documents" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.109, + 0.75, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.339, + 0.828, + 0.41 + ], + "angle": 0, + "content": "Figure 1: Overview of our experimental framework: We exhaustively evaluate the impact of code by varying: 1) the proportion of code in pre-training, 2) code quality and properties, 3) model initialization, 4) model scale, and 5) stage of training at which code is introduced. We evaluate the resulting model on a wide-ranging set of tasks, including natural language reasoning, world knowledge, code, and open-ended generations." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.436, + 0.825, + 0.493 + ], + "angle": 0, + "content": "from CommonCrawl, C4, GitHub, Books, ArXiv, Wikipedia, and StackExchange. We filter out all documents from GitHub and StackExchange to remove code and code-adjacent data sources and ensure this is a text-only source. SlimPajama has a total of 627B tokens. After removing all code sources, this results in our text pre-training corpus with a total of 503B tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.499, + 0.825, + 0.528 + ], + "angle": 0, + "content": "Code datasets. To explore the impact of different properties of code data, we use multiple sources of code in our experiments:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.541, + 0.825, + 0.611 + ], + "angle": 0, + "content": "- WEB-BASED CODE DATA: For our main source of code data, we start with the Stack dataset (Kocetkov et al., 2022) that was used to train StarCoder (Li et al., 2023a). The Stack consists of permissively licensed code data scraped from GitHub. We apply quality filters1 and restrict to the top 25 programming languages based on document count2. After all filtering steps, the size of the code-only and markup subset is 139B tokens." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.615, + 0.825, + 0.645 + ], + "angle": 0, + "content": "- MARKDOWN DATA We also separately process markup-style languages such as Markdown, CSS, and HTML. After all filtering steps, the size of this markup subset is 180B tokens." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.648, + 0.825, + 0.704 + ], + "angle": 0, + "content": "- SYNTHETIC CODE DATA: To ablate the quality of the code dataset, we use a proprietary synthetically generated code dataset that consists of Python programming problems that have been formally verified. We treat this as a high-quality source of code data (See the details in § 3.4). The final synthetic dataset consists of 3.2B code tokens." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.708, + 0.825, + 0.792 + ], + "angle": 0, + "content": "- CODE ADJACENT DATA: Finally, to explore different properties of code data, we include a version of the code data which includes auxiliary data such as GitHub commits, jupyter notebooks, StackExchange threads. For GitHub commits, and jupyter notebooks we use the datasets provided as part of the Stack (Kocetkov et al., 2022). We use the version of StackExchange that is part of SlimPajama (Soboleva et al., 2023). In total we have 21.4B tokens of code-adjacent data." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.541, + 0.825, + 0.792 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.805, + 0.825, + 0.862 + ], + "angle": 0, + "content": "Pre-training cooldown datasets. Cooldown involves up-weighting higher quality datasets for the final steps of pre-training and has been found to improve performance on downstream tasks (Parmar et al., 2024; Team et al., 2023), in particular to impart instruction-following capabilities. We choose a cooldown mixture comprising high-quality text, math, code, and instruct-style text datasets." + }, + { + "type": "page_footnote", + "bbox": [ + 0.195, + 0.897, + 0.495, + 0.91 + ], + "angle": 0, + "content": "See Appendix C.1 for details about quality filters" + }, + { + "type": "page_footnote", + "bbox": [ + 0.195, + 0.91, + 0.779, + 0.925 + ], + "angle": 0, + "content": "2Refer to Appendix C.2, C.3 for the full list of programming and markup-style languages included" + }, + { + "type": "list", + "bbox": [ + 0.195, + 0.897, + 0.779, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.308, + 0.119 + ], + "angle": 0, + "content": "2.2 EVALUATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.215 + ], + "angle": 0, + "content": "Our goal is to systematically understand the impact of code on general performance, which requires a broad evaluation suite that extends to a large variety of downstream tasks beyond code generation. To achieve this, we evaluate models on benchmarks that are reasonable proxies for model ability on 1) world knowledge, 2) natural language reasoning, and 3) code performance. In addition, we report win-rates as evaluated by an LLM-as-a-judge. Table 1 (Appendix A) shows the full evaluation suite and their respective grouping, along with the metric used." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.221, + 0.827, + 0.348 + ], + "angle": 0, + "content": "For World knowledge, we use benchmarks to measure knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. Natural language reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering, natural language inference (NLI), sentence completion, co-reference resolution, and general intelligence. We include the full list of the constituent benchmarks with references in Table 1. Finally, while our main focus is general performance, we also want to measure any changes to code generation performance. For Code benchmarks, we focus on the function completion task where we use HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.353, + 0.825, + 0.397 + ], + "angle": 0, + "content": "We evaluate performance at two scales: 470M to 2.8B parameter models. At 470M scale, model capabilities are limited, thus to ensure fair comparisons, we only compare benchmarks for which all models achieve scores above random similar to Muennighoff et al. (2023a); Lozhkov et al. (2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.402, + 0.826, + 0.542 + ], + "angle": 0, + "content": "LLM-as-a-judge win-rates. In addition to task-specific discriminative performance, to allow for a more holistic view across all performance measures, we also evaluate generative performance using LLM-as-a-judge win-rates. This is particularly valuable given recent work that has shown that as performance on open-ended generations improves, there is deterioration in traditional academic tasks (Ustun et al., 2024; Ouyang et al., 2022; Iyer et al., 2022; Muennighoff et al., 2023c). The use of LLMs-as-a-Judge benchmarks (Fu et al., 2023; Liu et al., 2023; Chiang & yi Lee, 2023; Shimabucoro et al., 2024) has gained traction as an alternative to performing human evaluation, which tends to be laborious and expensive (Wang et al., 2023; Boubdir et al., 2023). LLMs as evaluators compare two completions based upon detailed prompts and are reasonable proxies aligned with human preference (Ustun et al., 2024; Dubois et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.825, + 0.62 + ], + "angle": 0, + "content": "We use the Dolly-200 English dataset (Ustun et al., 2024; Singh et al., 2024), which consists of 200 hand-picked examples from the Dolly-15K dataset (Conover et al., 2023). These prompts are open-ended and capture general-purpose non-code use cases making them a valuable proxy for how code impacts more fluid and often open-ended tasks. For our win-rate evaluations, we use Command-\\(R^{+3}\\) as the LLM judge. Details about the prompt are provided in Appendix D." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.635, + 0.445, + 0.648 + ], + "angle": 0, + "content": "2.3 TRAINING AND MODEL DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.661, + 0.825, + 0.745 + ], + "angle": 0, + "content": "We use 470M and 2.8B parameters decoder-only auto-regressive Transformer models (Radford et al., 2019) that are trained with a standard language modeling objective. We use parallel attention layers, (Chowdhery et al., 2022; Wang & Komatsuzaki, 2021), SwiGLU activation (Shazeer, 2020), no biases in dense layers, and a byte-pair-encoding tokenizer with a vocabulary size of 256K. All models are pre-trained using AdamW (Loshchilov & Hutter, 2019) with a max sequence length of 8192, batch size of 512 and a cosine LR schedule with a warmup of 1325 steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.751, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Infrastructure. We use TPU v5e chips (Jouppi et al., 2017) for training and evaluation. All models are trained using Jax (Bradbury et al., 2018) framework. We pre-train 64 models in total. This is an enormous endeavour given the scale and computational resources required. Each pre-training run for 200B tokens takes 4736 TPU-chip hours for 470M and 13824 TPU-chip-hours for 2.8B parameter models. Each cooldown run for 40B tokens takes 1024 TPU-chip hours for 470M models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.842, + 0.434, + 0.857 + ], + "angle": 0, + "content": "3 RESULTS AND DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.873, + 0.826, + 0.903 + ], + "angle": 0, + "content": "In this section, we will report descriptions and results for each experimental variants. We systematically investigate, (1) initializing an LLM with code pre-trained models (§ 3.1), and (2) the impact of" + }, + { + "type": "page_footnote", + "bbox": [ + 0.195, + 0.91, + 0.551, + 0.925 + ], + "angle": 0, + "content": "3https://huggingface.co/CohereForAI/c4ai-command-r-plus" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.104, + 0.774, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.276, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Figure 2: Impact of initialization using code pre-trained models: Initializing model training with code pre-trained models improves reasoning and code generation compared to text-only models, where the improvement is the most when continued pre-training with high percentage text (Balanced \\(\\rightarrow\\) Text, Code \\(\\rightarrow\\) Text). Note that these variants are designed to isolate the role of initialization, so do not include cooldown." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.418 + ], + "angle": 0, + "content": "model scale (§ 3.2), (3) varying proportion of code in pre-training data (§ 3.3), (4) quality and properties of the code data (§ 3.4), (5) code data in pre-training cooldown (§ 3.5). Finally, we compare the resulting pre-training recipes (§ 3.6). Figure 1 shows the key levers of our experimental design." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.432, + 0.629, + 0.447 + ], + "angle": 0, + "content": "3.1 INITIALIZING AN LLM WITH CODE PRE-TRAINED MODELS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.458, + 0.825, + 0.502 + ], + "angle": 0, + "content": "We explore different initializations of pre-trained models to understand if using an LM with a large portion of code data as initialization improves the performance. These key ablations, along with their token counts, are summarized in Table 2. We briefly describe below:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.513, + 0.825, + 0.541 + ], + "angle": 0, + "content": "- Text LM (TEXT-ONLY BASELINE): Pre-trained model from scratch using glorot-normal initialization (Glorot et al., 2011) on the text-only data for 400B tokens." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.545, + 0.825, + 0.573 + ], + "angle": 0, + "content": "- Balanced LM (BALANCED-ONLY): A model is trained with an equal ratio of code and text data (50% text and 50% code) in pre-training for 400B tokens." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.578, + 0.825, + 0.606 + ], + "angle": 0, + "content": "- Balance-initialized Text LM (BALANCED \\(\\rightarrow\\) TEXT): This model is initialized with a balanced LM (50% text and 50% code) and further pre-trained using text data for 200B tokens." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.61, + 0.825, + 0.667 + ], + "angle": 0, + "content": "- Code-initialized Text LM (CODE \\(\\rightarrow\\) TEXT): Different from other variants, this model is initialized with a code-LM which is pre-trained on a code dataset for 200B tokens. The code dataset contains a mixture of \\(80\\%\\) code data and \\(20\\%\\) markup-style code data. We then continually pre-train this model on text for another 200B tokens.4" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.513, + 0.825, + 0.667 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.678, + 0.826, + 0.79 + ], + "angle": 0, + "content": "Natural Language Reasoning As seen in Figure 2, initializing with \\(100\\%\\) code pre-trained model (code \\(\\rightarrow\\) text) has the best performance for NL Reasoning benchmarks, and is closely followed by the balanced \\(\\rightarrow\\) text model. The code \\(\\rightarrow\\) text model and balanced \\(\\rightarrow\\) text model beat the text-only baseline on NL reasoning tasks by \\(8.8\\%\\) and \\(8.2\\%\\) relative improvement respectively. The balanced-only model improves upon the baseline by \\(3.2\\%\\). This shows that initialization from a pre-trained model with a mix of code has a strong positive effect on NL reasoning tasks. Further using a text mix with a small percentage of code for continual pre-training results in the best performance as evidenced by both the code \\(\\rightarrow\\) text and balanced \\(\\rightarrow\\) text models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.797, + 0.825, + 0.882 + ], + "angle": 0, + "content": "World Knowledge For World Knowledge tasks, we see that the balanced \\(\\rightarrow\\) text model has the best performance over all other variants, beating the code \\(\\rightarrow\\) text by \\(21\\%\\) and text-only by \\(4.1\\%\\) relative improvement. This suggests that performance on world knowledge tasks depends on a more balanced data mixture for initialization and a larger proportion of text in the continual pretraining stage. Overall, code data is still beneficial compared to text-only pre-training for world knowledge tasks." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.925 + ], + "angle": 0, + "content": "4We use a 10% of code in text mixture data during continual pre-training of code-initialized models (balanced→text, code→text) to avoid a full distribution shift and maintain the benefits of code." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.104, + 0.773, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.274, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Figure 3: Impact of model scale on different tasks. We observe that scale provides pronounced gains across tasks of up-to \\(2.7\\mathrm{x}\\) increase, however the overall trend remains the same across scales showing consistency of findings across model sizes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.342, + 0.825, + 0.442 + ], + "angle": 0, + "content": "Trade-offs between NL tasks and code generation For code generation, balanced-only achieves the best performance, where we see a \\(46.7\\%\\) and \\(54.5\\%\\) relative improvement over balanced \\(\\rightarrow\\) text and code \\(\\rightarrow\\) text. This is expected as balanced-only includes \\(50\\%\\) code throughout pre-training. However, this model trades off better code generation with lower performance in NL tasks. code \\(\\rightarrow\\) text and balanced \\(\\rightarrow\\) text achieves \\(2.9\\%\\) and \\(2.3\\%\\) relative increase in NL reasoning, and \\(17.3\\%\\) and \\(22.2\\%\\) relative increase in world knowledge respectively compared to balanced-only." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.447, + 0.827, + 0.56 + ], + "angle": 0, + "content": "Generative quality win-rates comparison Additionally, we compare the generative performance of each code variant (code \\(\\rightarrow\\) text and balanced-only) against the text-only model. We report win-rates and observe that the presence of code has a strong positive impact on generation quality. Both code \\(\\rightarrow\\) text and balanced-only) models beat the text-only variant by a \\(6.6\\%\\) difference in win-loss rates. We again note that Dolly-200-English evaluation set we use for win-rate calculation is curated to reflect open ended questions and is a non-code evaluation. This confirms that code data in the pre-training mix does not only improves reasoning but also helps the model produce better quality generations." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.575, + 0.345, + 0.589 + ], + "angle": 0, + "content": "3.2 IMPACT OF SCALE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.825, + 0.644 + ], + "angle": 0, + "content": "To understand if the findings of Section 3.1 transfer to larger models, we train 2.8B parameters models with the same token budget following the same model variants at 470M scale. Figure 3 shows the results of 2.8B models in comparison with 470M results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.65, + 0.825, + 0.721 + ], + "angle": 0, + "content": "Comparison between 2.8B and 470M models Scaling model size to 2.8B enables higher performance for all model variants in all task categories, compared to 470M results. In terms of average performance across NL reasoning and world knowledge, balanced \\(\\rightarrow\\) text model benefits from scaling-up by a \\(33.1\\%\\) increase relative to the same model with 470M size. The improvement for code \\(\\rightarrow\\) text and balanced-only are \\(31.7\\%\\) and \\(30\\%\\) relative increase." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.825, + 0.798 + ], + "angle": 0, + "content": "We find that the improvements in NL reasoning are relatively modest with \\(5.3\\%\\), \\(9.2\\%\\), and \\(5.2\\%\\) relative gains for balanced \\(\\rightarrow\\) text, code \\(\\rightarrow\\) text, and balanced-only respectively. However, world knowledge and code performance nearly triples for all the model variants. In particular, 2.8B balanced \\(\\rightarrow\\) text results increase by \\(2.7\\mathrm{x}\\) in world knowledge and \\(2.5\\mathrm{x}\\) in code evaluation compared to 470M." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.803, + 0.826, + 0.903 + ], + "angle": 0, + "content": "Trends between model variants in 2.8B Notably, in terms of initialization with code pre-trained models, the same trends seen in 470M parameter scale hold at 2.8B models. code \\(\\rightarrow\\) text and balanced \\(\\rightarrow\\) text models improve over balanced models by \\(6.9\\%\\) and \\(6.1\\%\\) relative gain, however, fall significantly behind in code generation performance with \\(43.1\\%\\) and \\(46.3\\%\\) relative drop. These results show that the trade-off between NL tasks and code generation increases with the model size. Overall our experiments scaling to a larger size shows that our results hold and are consistent with the trends we observe at 470M parameter ablations." + }, + { + "type": "page_footnote", + "bbox": [ + 0.195, + 0.91, + 0.633, + 0.925 + ], + "angle": 0, + "content": "We include the extended Win-rates for these experiments in Appendix E." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.103, + 0.42, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.103, + 0.617, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.435, + 0.212, + 0.595, + 0.221 + ], + "angle": 0, + "content": "Increasing Proportion of Code \\(\\rightarrow\\)" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.103, + 0.774, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.234, + 0.825, + 0.304 + ], + "angle": 0, + "content": "Figure 4: Impact of the proportion of code in pre-training for different tasks: We observe that as the code proportion of pre-training increases, the performance on code tasks increases linearly. In contrast, there is more sensitivity for NL reasoning and World knowledge tasks and an optimal range of code proportions where benefits are most tangible. Model size is 470M parameters and trained for 200B tokens." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.339, + 0.521, + 0.352 + ], + "angle": 0, + "content": "3.3 CODE DATA PROPORTION IN PRE-TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.825, + 0.452 + ], + "angle": 0, + "content": "In these experiments, we ablate the proportions of code data in the pre-training mixture to understand what is the optimal amount of code to maximize performance on non-code tasks. Here, we focus on the first phase of pre-training with random initialization. We train six models for 200B tokens with increasing code proportions: \\(0\\%\\), \\(25\\%\\), \\(50\\%\\), \\(75\\%\\), \\(90\\%\\), and \\(100\\%\\). The remaining proportion is filled with text data. For each variant, we train a new model independently in order to carefully ablate the impact of varying code proportions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.458, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Natural Language Reasoning and World Knowledge For NL Reasoning, as the amount of code increases, in Figure 4 we see an increase in performance compared to a text-only (0% code) model. The best performance is from a model with 25% code and 75% text, with a 3.4% relative improvement over a model with 0% code. While performance is maintained up to 75% code, it starts to rapidly erode at higher proportions with a sharp relative drop of 18.3% when the model is trained on 100% code compared to a model with no code." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.549, + 0.825, + 0.62 + ], + "angle": 0, + "content": "For World Knowledge tasks, we see an inverse relationship with increasing the amount of code. As seen in Figure 4 middle inset, there is a slight relative drop of \\(3.4\\%\\) at \\(25\\%\\) code and this relative drop worsens to \\(31\\%\\) at \\(75\\%\\) code compared to the no-code model. The fully code model (100% code) is unable to perform in world knowledge task (86% drop relative to text-only) as there are no data sources to acquire the required knowledge in the pre-training mix." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.825, + 0.682 + ], + "angle": 0, + "content": "Performance on Code For code evaluation, there is a linear increase in performance as the amount of code increases, with the best model being a code-only model. As observable in Figure 4 right inset, the \\(100\\%\\) code leads to a 2.6x increase in the code benchmarks compared to the \\(25\\%\\) code model. As expected, for the model with \\(0\\%\\) code, the average pass@1 score drops to 0." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.707, + 0.764, + 0.722 + ], + "angle": 0, + "content": "3.4 INFLUENCE OF CODE QUALITY AND PROPERTIES ON GENERAL PERFORMANCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.825, + 0.807 + ], + "angle": 0, + "content": "In this section, we investigate the properties of code data by varying its quality and composition. We study this firstly (a) from the perspective of training from scratch, as we want to isolate the exact effects of different properties of code data. Secondly (b), we incorporate the best variant of the code data (high-quality synthetic code), in our continual pre-training experiments to see if the impact of the code quality transfer. We report performance on NL reasoning and Code tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We study the effect of the following properties: (1) MARKUP-STYLE DATA: we separate markup-style programming languages (\\(\\S\\) 2.1) from the rest of web-based code (Appendix C.3). We replace \\(20\\%\\) of code-only tokens with markup-style tokens. (2) CODE ADJACENT DATA: Instead of using purely web-based code data, we replaced \\(15\\%\\) percentage of code tokens with code-adjacent datasets - GitHub issues (\\(5\\%\\)), StackExchange (\\(5\\%\\)) and Jupyter Notebooks (\\(5\\%\\)), resulting in a code-adjacent model. (3) CODE QUALITY: To control the quality of the code, we replaced \\(10\\%\\) of existing code tokens with a synthetically generated high-quality code dataset. The remaining proportions of web-based code data are kept the same, resulting in a code-synth model." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.104, + 0.531, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.28, + 0.231, + 0.44, + 0.245 + ], + "angle": 0, + "content": "(a) Code-only Pre-training" + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.103, + 0.815, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.598, + 0.231, + 0.754, + 0.245 + ], + "angle": 0, + "content": "(b)Continual Pre-training" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.256, + 0.825, + 0.312 + ], + "angle": 0, + "content": "Figure 5: Impact of using different properties of code data: (a) As the most impactful code data source, synthetically generated high-quality code improves NL reasoning and code performance for code pre-training. (b) These improvements with synthetically generated high-quality code data also transfer the continual pre-training setting. All models are of size 470M parameters." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.825, + 0.385 + ], + "angle": 0, + "content": "Code-only pre-training We compare the above variants to a model that is trained only on web-based code data (code) from the stack dataset (Kocetkov et al., 2022), which forms our baseline model. All the variants are pre-trained using the same amount of tokens (200B) for fair comparison." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.489 + ], + "angle": 0, + "content": "In Figure 5a, we evaluate the impact of code quality and code composition. We observe that across all variants, including diverse code sources and also synthetic code leads to gains in natural language performance relative to code, however, only synthetically generated code improves the code benchmarks. We relate this to our code evaluation where we measure performance in python, thus different programming languages or code-adjacent data slightly decrease the results. Here, code+markup and code+adjacent leads to \\(2.8\\%\\) and \\(6.3\\%\\) relative improvement in NL reasoning compared to code (web-code-only), but cause \\(15.7\\%\\) and \\(9.4\\%\\) drop in code evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.621 + ], + "angle": 0, + "content": "Our synthetic code data (code+synth) is the most impactful ablation. It is particularly impressive given its relatively small share of the overall dataset. Despite a small weighting of \\(10\\%\\), the inclusion of synthetic data leads to relative improvements of \\(9\\%\\) on NL reasoning, and \\(44.9\\%\\) on code benchmarks compared to the baseline of web-code-only. We note that the lifts observed for synthetic data are even more impressive given the limited amount of synthetic data available compared to code-adjacent data (3.2B tokens vs 21.4B tokens) or code+markup data (3.2B tokens vs 40B tokens), and the weighting during pre-training allocation (\\(10\\%\\) vs \\(15\\%\\) vs \\(20\\%\\) for synthetic data, code-adjacent, code-markup respectively). This suggests a key future lever of improvement in increasing the proportion of such high-quality code data sources." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.825, + 0.683 + ], + "angle": 0, + "content": "Continual pre-training Here, based on the findings from code-only pre-training, we incorporated code + synth into our best continual pre-training variant (balanced + synth \\(\\rightarrow\\) text). We compare this against the same variant without synthetic code data (balanced \\(\\rightarrow\\) text) to evaluate the benefits of synthetic data. We use the same amount of code and text tokens in these experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.689, + 0.825, + 0.747 + ], + "angle": 0, + "content": "As shown in Figure 5b, balanced+synth→text achieves \\(2\\%\\) and \\(35\\%\\) relative improvement over balanced→text in NL reasoning and code, respectively. This further confirms that even a small percentage of a high-quality code data, not only improves performance in code pre-training but also increases code and non-code performance after continual pre-training with text data." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.765, + 0.473, + 0.779 + ], + "angle": 0, + "content": "3.5 CODE IN PRE-TRAINING COOLDOWN" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.825, + 0.89 + ], + "angle": 0, + "content": "In this section, we evaluate the impact of code at the final stage of pre-training. Here, we consider cooldown, where we up-weight high-quality text, math, code, and instruct-style datasets. We change the learning rate schedule from cosine-based to linear annealing with a final learning rate of \\(1e - 6\\). We evaluate the impact of code in cooldown by comparing 3 models: a pre-trained model before cooldown, cooldown without code data, and cooldown with \\(20\\%\\) code data. For our pre-trained model, we use balanced-text as it is our best pre-trained variant. We preserve the same token budget across variants - 40B tokens which is \\(10\\%\\) of the token budget of the pre-trained model." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Impact of code used during cooldown in different tasks In Figure 6a, we evaluate the impact of code in cooldown on model performance in NL Reasoning, world knowledge, and code benchmarks." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.103, + 0.526, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.212, + 0.424, + 0.225 + ], + "angle": 0, + "content": "(a) Downstream tasks" + }, + { + "type": "image", + "bbox": [ + 0.542, + 0.107, + 0.811, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.609, + 0.212, + 0.756, + 0.225 + ], + "angle": 0, + "content": "(b) Generative win-rates" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.237, + 0.825, + 0.294 + ], + "angle": 0, + "content": "Figure 6: Impact of code data in pre-training cooldown: Including code data in the cooldown phase improves downstream relative to cooldown with no code. All cooldown variants benefit for downstream tasks and especially generative quality. We find the largest gains from cooldown with code, with the highest win-rates of \\(52.3\\%\\) over a model with no cooldown." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.32, + 0.825, + 0.349 + ], + "angle": 0, + "content": "Across tasks, we find that a cooldown with code data is most beneficial with \\(3.6\\%\\), \\(10.1\\%\\), and \\(20\\%\\) in NL reasoning, world knowledge, and code relative to the model without cooldown." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.355, + 0.825, + 0.398 + ], + "angle": 0, + "content": "In contrast, we find that cooldown without code does not provide any increases for both NL reasoning and Code, while providing a relative improvement of \\(3.1\\%\\) in World Knowledge tasks compared to no cooldown, showing the critical role of code data in also cooldown phase of pre-training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.404, + 0.825, + 0.502 + ], + "angle": 0, + "content": "Generative win-rates after cooldown As expected, cooldown has a significant impact on generative performance as measured by win-rates (seen in Figure 6b). This is because we up-weight high-quality data sources in pre-training mix including instruction-style datasets such as Dolly v2 (Conover et al., 2023). Both cooldown variants (cooldown w/o code, cooldown w/ code) beat no-cooldown model by large win-rates (48.2% and 52.3%) as seen in Figure 6b. Comparing the cooldown variants, including code leads an additional 4.1% generative win-rates against no-cooldown compared to cooldown without code." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.519, + 0.476, + 0.532 + ], + "angle": 0, + "content": "3.6 COMPARING PRE-TRAINING RECIPES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.544, + 0.825, + 0.574 + ], + "angle": 0, + "content": "Considering all our experiments, we summarize our findings and recommend recipes for pre-training with code data. Table 2 (Appendix B) shows the different variants along with pre-training phases." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.825, + 0.664 + ], + "angle": 0, + "content": "Best recipe for natural language tasks As seen in Sections 3.1, 3.3, and 3.5, including code in all phases of pre-training provides improvements across all task categories. When looking at the final recipes, we find that balanced \\(\\rightarrow\\) text model followed by cooldown that includes code data corresponds to the best overall performance in natural language tasks considering NL reasoning, world knowledge, and generative performance. Notably this model achieves the highest generative win-rates with \\(37.7\\%\\) vs 33.7 against text-only as shown in Figure 7." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.825, + 0.741 + ], + "angle": 0, + "content": "Best recipe for code performance Among complete recipes shown in Table 2, balanced-only provides the best performance in code benchmarks. This model achieves \\(20\\%\\) relative gain compared to second best code \\(\\rightarrow\\) text and \\(55\\%\\) relative gain compared to balanced \\(\\rightarrow\\) text. However, balanced-only falls behind in natural language performance by \\(2.5\\%\\) relative difference and \\(5.0\\%\\) win-rate difference (vs text-only), both compared to balanced \\(\\rightarrow\\) text." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.747, + 0.825, + 0.817 + ], + "angle": 0, + "content": "Including code in all phases of pre-training is beneficial across our three task categories and generative performance. Our recommendation for the best overall performance is to include a balanced mixture of code and text data during pre-training from scratch (§ 3.3), use a relatively lower code percentage during continual pre-training (§ 3.1), and include code data into cooldown mixture. Further, we recommend including high-quality code data during all phases of pre-training (§ 3.4)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.838, + 0.342, + 0.853 + ], + "angle": 0, + "content": "4 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Understanding the impact of pre-training mixes Several works have studied the effects of data age, quality, toxicity and domain of pre-training data (Longpre et al., 2023; Üstün et al., 2024). Several works have looked at the impact of filtering (Raffel et al., 2020; Rae et al., 2021; Penedo et al., 2023), de-duping (Zhang et al., 2022) and data pruning (Lozhkov et al., 2024; Marion et al., 2023;" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.103, + 0.41, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.427, + 0.104, + 0.599, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.104, + 0.774, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.25, + 0.802, + 0.266 + ], + "angle": 0, + "content": "Figure 7: Generative performance as measured by win-rates for variants with full-coutdown." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.292, + 0.827, + 0.363 + ], + "angle": 0, + "content": "Chimoto et al., 2024; Boubdir et al., 2023). Furthermore, several works have considered the role of synthetic data at improving performance (Shimabucoro et al., 2024; Dang et al., 2024; Aakanksha et al., 2024) and helping bridge the gap in performance between open weights and proprietary models (Gunasekar et al., 2023; Li et al., 2023b). In contrast to our work which focuses explicitly on understanding the role of code, these studies focus on characteristics of training data as a whole." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.369, + 0.828, + 0.593 + ], + "angle": 0, + "content": "Understanding the role of code Including code in the pre-training data mixture, even for models not specifically designed for code, has been a common practice in LLMs pre-training (Dubey et al., 2024; Gemini-Team et al., 2024; Groeneveld et al., 2024). In addition to serving the popular use case in code completion and generation (Chen et al., 2021), previous studies suggest that the addition of code improves the performance of LLMs on various NLP tasks, such as entity linking (Kim et al., 2024) and commonsense reasoning (Madaan et al., 2022b)), mathematical reasoning tasks (Liang et al., 2022; Madaan et al., 2022a; Gao et al., 2023; Shao et al., 2024), and general reasoning capabilities (Muennighoff et al., 2023a; Fu & Khot, 2022; Ma et al., 2023). Muennighoff et al. (2023b) demonstrated Python code data can be used to improve pretraining performance. They focused on a low-resource pre-training regime with limited data and an evaluation set-up limited to perplexity evaluations. Zhang et al. (2024) investigated the impact of code on LLMs' internal reasoning capability across various tasks and model families. They only focus on the effect of code in the supervised fine-tuning stage (SFT) primarily measuring the impact on reasoning. Zhu et al. (2024) report the performance of their DeepSeek-Coder-V2 models on General Natural Language benchmarks. They compare chat and instruct models, and do not investigate different phases of pre-training and properties of code." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.598, + 0.828, + 0.655 + ], + "angle": 0, + "content": "To the best of our knowledge, this work is the first study that presents a thorough investigation of the impact of code in pre-training on non-code tasks. Our experiment spans several axes and a exhaustive evaluation suite, with costly ablations at scale including model initialization strategies, different proportions and properties of code data, and model scales." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.675, + 0.321, + 0.691 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.706, + 0.827, + 0.888 + ], + "angle": 0, + "content": "We perform a first-of-its-kind systematic study to answer \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation\". We focus, not just on code performance but on downstream natural language performance, as well as generative quality using LLM-as-a-judge win-rates. We conduct ablations that look at initialization, proportions of code, quality and properties of code, and role of code in pre-training cooldown. We find across all scales of experiments that code provides critical improvements to performance on non-code tasks. Compared to text-only pre-training, for our best variant, the addition of code results in relative increase of \\(8.2\\%\\) in natural language (NL) reasoning, \\(4.2\\%\\) in world knowledge, \\(6.6\\%\\) improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves \\(3.6\\%\\), \\(10.1\\%\\), and \\(20\\%\\) in NL reasoning, world knowledge, and code relative to the model before cooldown and leads \\(52.3\\%\\) generative win-rates. Finally, we find that adding a small amount of high-quality synthetic data can have an outsized impact on both NL reasoning (\\(9\\%\\) relative increase) and code performance (\\(44.9\\%\\) relative increase)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.289, + 0.118 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.127, + 0.826, + 0.17 + ], + "angle": 0, + "content": "Aakanksha, Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. The multilingual alignment prism: Aligning global and local preferences to reduce harm, 2024. URL https://arxiv.org/abs/2406.18682." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.178, + 0.826, + 0.25 + ], + "angle": 0, + "content": "Viraat Aryabumi, John Dang, Dwarak Talupuru, Saurabh Dash, David Cairuz, Hangyu Lin, Bharat Venkitesh, Madeline Smith, Jon Ander Campos, Yi Chern Tan, Kelly Marchisio, Max Bartolo, Sebastian Ruder, Acyr Locatelli, Julia Kreutzer, Nick Frosst, Aidan Gomez, Phil Blunsom, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. Aya 23: Open weight releases to further multilingual progress, 2024. URL https://arxiv.org/abs/2405.15032." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.826, + 0.301 + ], + "angle": 0, + "content": "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. Program synthesis with large language models, 2021. URL https://arxiv.org/abs/2108.07732." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.31, + 0.826, + 0.353 + ], + "angle": 0, + "content": "Meriem Boubdir, Edward Kim, Beyza Ermis, Marzieh Fadaee, and Sara Hooker. Which prompts make the difference? data prioritization for efficient human llm evaluation, 2023. URL https://arxiv.org/abs/2310.14424." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.362, + 0.826, + 0.42 + ], + "angle": 0, + "content": "James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, George Necula, Adam Paszke, Jake VanderPlas, Skye Wanderman-Milne, and Qiao Zhang. JAX: composable transformations of Python+NumPy programs, 2018. URL http://github.com/google/jax." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.428, + 0.826, + 0.526 + ], + "angle": 0, + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. arXiv, abs/2005.14165, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.535, + 0.826, + 0.689 + ], + "angle": 0, + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fiotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.698, + 0.826, + 0.755 + ], + "angle": 0, + "content": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, and Yuanzhi Li. Towards understanding the mixture-of-experts layer in deep learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=MaYzugDmQV." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.763, + 0.826, + 0.793 + ], + "angle": 0, + "content": "Cheng-Han Chiang and Hung yi Lee. Can large language models be an alternative to human evaluations?, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.802, + 0.826, + 0.845 + ], + "angle": 0, + "content": "Everlyn Asiko Chimoto, Jay Gala, Orevaoghene Ahia, Julia Kreutzer, Bruce A. Bassett, and Sara Hooker. Critical learning periods: Leveraging early training dynamics for efficient data pruning, 2024. URL https://arxiv.org/abs/2405.19462." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.854, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen-tau Yih, Yejin Choi, Percy Liang, and Luke Zettlemoyer. QuAC: Question answering in context. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 2174-2184, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1241. URL https://aclanthology.org/D18-1241." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.127, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.104, + 0.827, + 0.285 + ], + "angle": 0, + "content": "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan Firat, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. Palm: Scaling language modeling with pathways, 2022. URL https://arxiv.org/abs/2204.02311." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.294, + 0.827, + 0.38 + ], + "angle": 0, + "content": "Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. BoolQ: Exploring the surprising difficulty of natural yes/no questions. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 2924–2936, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1300. URL https://aclanthology.org/N19-1300." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.387, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.439, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Together Computer. Redpajama: an open dataset for training large language models, 2023. URL https://github.com/togethercomputer/RedPajama-Data." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.477, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. URL https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.543, + 0.825, + 0.586 + ], + "angle": 0, + "content": "John Dang, Arash Ahmadian, Kelly Marchisio, Julia Kreutzer, Ahmet Üstün, and Sara Hooker. Rlhf can speak many languages: Unlocking multilingual preference optimization for llms, 2024. URL https://arxiv.org/abs/2407.02552." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.595, + 0.825, + 0.624 + ], + "angle": 0, + "content": "Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The commitmentbank: Investigating projection in naturally occurring discourse, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.633, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurelien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Roziere, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Gregoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, Khalid El-Arini, Krithika Iyer, Kshitiz Malik, Kuenley Chiu, Kunal Bhalla, Lauren Rantala-Yeary, Laurens van der Maaten, Lawrence Chen, Liang Tan, Liz Jenkins, Louis Martin Lovish Madaan Lubo Malo Lukas Blecher Lukas Landzaat Luke de Oliveira Madeline Muzzi Mahesh Pasupuleti Mannat Singh Manohar Paluri Marcin Kardas Mathew Oldham Mathieu Rita Maya Pavlova" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.104, + 0.825, + 0.921 + ], + "angle": 0, + "content": "Melanie Kambadur, Mike Lewis, Min Si, Mitesh Kumar Singh, Mona Hassan, Naman Goyal, Narjes Torabi, Nikolay Bashlykov, Nikolay Bogoychev, Niladri Chatterji, Olivier Duchenne, Onur Celebi, Patrick Alrassy, Pengchuan Zhang, Pengwei Li, Petar Vasic, Peter Weng, Prajjwal Bhargava, Pratik Dubal, Praveen Krishnan, Punit Singh Koura, Puxin Xu, Qing He, Qingxiao Dong, Ragavan Srinivasan, Raj Ganapathy, Ramon Calderer, Ricardo Silveira Cabral, Robert Stojnic, Roberta Raileanu, Rohit Girdhar, Rohit Patel, Romain Sauvestre, Ronnie Polidoro, Roshan Sumbaly, Ross Taylor, Ruan Silva, Rui Hou, Rui Wang, Saghar Hosseini, Sahana Chennabasappa, Sanjay Singh, Sean Bell, Seohyun Sonia Kim, Sergey Edunov, Shaoliang Nie, Sharan Narang, Sharath Raporthy, Sheng Shen, Shengye Wan, Shruti Bhosale, Shun Zhang, Simon Vandenhende, Soumya Batra, Spencer Whitman, Sten Sootla, Stephane Collot, Suchin Gururangan, Sydney Borodinsky, Tamar Herman, Tara Fowler, Tarek Sheasha, Thomas Georgiou, Thomas Scialom, Tobias Speckbacher, Todor Mihaylov, Tong Xiao, Ujjwal Karn, Vedanuj Goswami, Vibhor Gupta, Vignesh Ramanathan, Viktor Kerkez, Vincent Gonguet, Virginie Do, Vish Vogeti, Vladan Petrovic, Weiwei Chu, Wenhan Xiong, Wenyin Fu, Whitney Meers, Xavier Martinet, Xiaodong Wang, Xiaqing Ellen Tan, Xinfeng Xie, Xuchao Jia, Xuewei Wang, Yaelle Goldschlag, Yashesh Gaur, Yasmine Babaei, Yi Wen, Yuwen Song, Yuchen Zhang, Yue Li, Yuning Mao, Zacharie Delpierre Coudert, Zheng Yan Zhengxing Chen Zoe Papakipos Aaditya Singh Aaron Grattafori Abha Jain Adam Kelsey Adam Shajnfeld Adithya Gangidi Adolfo Victoria,Ahuva Goldstand Ajay Menon Ajay Sharma Alex Boesenberg Alex Vaughan Alexei Baevski Allie Feinstein Amanda Kallet Amit Sangani Anam Yunus Andrei Lupu Andres Alvarado Andrew Caples Andrew GuAndrew Ho Andrew Poulton Andrew Ryan Ankit Ramchandani Annie Franco Aparajita Saraf Arkabandhu Chowdhury Ashley Gabriel Ashwin Bharambe Assaf Eisenman Azadeh Yazdan Beau James Ben Maurer Benjamin Leonhardi Bernie Huang Beth Loyd Beto De Paola Bhargavi ParanjapeBing LiuBo WuBoyu Ni Braden Hancock Bram Wasti Brandon Spence Brani Stojkovic Brian Gamido Britt Montalvo Carl Parker Carly Burton Catalina Mejia Changhan Wang Changkyu Kim Chao Zhou Chester Hu Ching-Hsiang Chu Chris Cai Chris Tindal Christoph Feichtenhofer Damon Civin Dana Beaty Daniel Kreymer Daniel Li Danny Wyatt David Atkins David Xu Davide Testuggine Delia David Devi Parikh Diana Liskovich Didem Foss Dingkang Wang Duc Le,Dustin Holland Edward Dowling Eissa Jamil Elaine Montgomery Eleonora Presani Emily Hahn Emily Wood Erik Brinkman Esteban Arcaute Evan Dunbar Evan Smothers Fei Sun Felix Kreuk Feng Tian First Ozgenel Francesco Caggioni Francisco Guzmán Frank Kanayet Frank Seide Gabriela Medina FlorezGabriella Schwarz Gada Badeer Georgia Swee Gil Halpern Govind Thattai Grant Herman Grigory Sizov Guangyi Zhang Guna Lakshminarayanan Hamid Shojanazeri Han Zou Hannah Wang Hanwen Zha Haroun Habeeb Harrison Rudolph Helen Suk Henry Aspegren Hunter Goldman Igor Molybog Igor Tufanov Irina-Elena Veliche Itai Gat Jake Weissman James Geboski James Kohli Japhet Asher Jean-Baptiste Gaya Jeff MarcusJeff Tang Jennifer Chan Jenny Zhen Jeremy Reizenstein Jeremy Teboul Jessica Zhong Jian Jin Jingyi Yang Joe Cummings Jon Carvill Jon Shepard Jonathan McPhie Jonathan Torres Josh Ginsburg Junjie Wang Kai Wu Kam Hou U Karan Saxena Karthik Prasad Kartikay Khandelwal Katayoun Zand Kathy Matosich Kaushik Veeraraghavan Kelly Michelena Keqian Li Kun Huang Kunal ChawlaKushal Lakhotia Kyle Huang Lailin Chen Lakshya Garg Lavender A Leandro Silva Lee Bell Lei Zhang Liangpeng Guo Licheng Yu Liron Moshkovich Luca Wehrstedt Madian Khabsa Manav Avalani Manish Bhatt Maria Tsimpoukelli Martynas Mankus Matan Hasson Matthew Lennie Matthias Reso Maxim Groshev Maxim Naumov Maya Lathi Meghan Keneally Michael L. Seltzer Michal Valko Michelle Restrepo Mihir Patel Mik Vyatskov Mikayel Samvelyan Mike Clark Mike Macey Mike Wang Miquel Jubert Hermoso Mo Metanat Mohammad Rastegari Munish Bansal Nandhini Santhanam Natascha Parks Natasha White Navyata Bawa Nayan Singhal Nick Egebo Nicolas Usunier Nikolay Pavlovich Laptev Ning Dong Ning Zhang Norman Cheng Oleg Chernoguz Olivia Hart Omkar Salpekar Ozlem Kalinli Parkin Kent Parth Parekh Paul Saab Pavan Balaji Pedro Rittner Philip Bontrager Pierre Roux Piotr Dollar Polina Zvyagina Prashant Ratanchandani British Yuvraj Qian Liang Rachad Alao Rachel Rodriguez Rafy Aub Ragtoham Murthy Raghu Nayani Rahul Mitra Raymond Li Rebekkah Hogan Robin Battey Rocky Wang Rohan Maheswari Russ Howes Rudy Tinott Sai Jayesh Bondu Samyak Datta Sara Chugh Sara Hunt Sargun Dhillon Sasha Sidorov Satadru Pan Saurabh Verma Seiji Yamamoto Sharadh Ramaswamy Shaun Lindsay Shaun Lindsay Sheng Feng Shenghao Lin Shengxin Cindy Zha Shiva Shankar Shuqiang Zhang Shuqiang Zhang Sinong WangSneha Agarwal Soji Sajuyigbe Soumith Chintala Stephanie Max Stephen Chen Steve Kehoe Steve Satterfield Sudarshan Govindaprasad Sumit Gupta Sungmin Cho" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.104, + 0.825, + 0.244 + ], + "angle": 0, + "content": "Sunny Virk, Suraj Subramanian, Sy Choudhury, Sydney Goldman, Tal Remez, Tamar Glaser, Tamara Best, Thilo Kohler, Thomas Robinson, Tianhe Li, Tianjun Zhang, Tim Matthews, Timothy Chou, Tzook Shaked, Varun Vontimitta, Victoria Ajayi, Victoria Montanez, Vijai Mohan, Vinay Satish Kumar, Vishal Mangla, Vlad Ionescu, Vlad Poenaru, Vlad Tiberiu Mihailescu, Vladimir Ivanov, Wei Li, Wenchen Wang, Wenwen Jiang, Wes Bouaziz, Will Constable, Xiaocheng Tang, Xiaofang Wang, Xiaojian Wu, Xiaolan Wang, Xide Xia, Xilun Wu, Xinbo Gao, Yanjun Chen, Ye Hu, Ye Jia, Ye Qi, Yenda Li, Yilin Zhang, Ying Zhang, Yossi Adi, Youngjin Nam, Yu, Wang, Yuchen Hao, Yundi Qian, Yuzi He, Zach Rait, Zachary DeVito, Zef Rosnbrick, Zhaoduo Wen, Zhenyu Yang, and Zhiwei Zhao. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.254, + 0.825, + 0.31 + ], + "angle": 0, + "content": "Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Alpacafarm: A simulation framework for methods that learn from human feedback, 2024. URL https://arxiv.org/abs/2305.14387." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.32, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Hao Fu, Yao; Peng and Tushar Khot. How does gpt obtain its ability? tracing emergent abilities of language models to their sources. Yao Fu's Notion, Dec 2022. URL https://yaofu.notion.site/b9a57ac0acf74f30a1ab9e3e36fa1dc1?pvs=25." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.373, + 0.825, + 0.39 + ], + "angle": 0, + "content": "Jinlan Fu, See-Kiong Ng, Zhengbao Jiang, and Pengfei Liu. Gptscore: Evaluate as you desire, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.399, + 0.825, + 0.47 + ], + "angle": 0, + "content": "Yifu Gao, Yongquan He, Zhigang Kan, Yi Han, Linbo Qiao, and Dongsheng Li. Learning joint structural and temporal contextualized knowledge embeddings for temporal knowledge graph completion. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 417-430, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023-findings-acl.28." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.479, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Gemini-Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M. Dai, Anja Hauth, Katie Millican, David Silver, Melvin Johnson, Ioannis Antonoglou, Julian Schrittwieser, Amelia Glaese, Jilin Chen, Emily Pitler, Timothy Lillicrap, Angeliki Lazaridou, Orhan First, James Molloy, Michael Isard, Paul R. Barham, Tom Hennigan, Benjamin Lee, Fabio Viola, Malcolm Reynolds, Yuanzhong Xu, Ryan Doherty, Eli Collins, Clemens Meyer, Eliza Rutherford, Erica Moreira, Kareem Ayoub, Megha Goel, Jack Krawczyk, Cosmo Du, Ed Chi, Heng-Tze Cheng, Eric Ni, Purvi Shah, Patrick Kane, Betty Chan, Manaal Faruqui, Aliaksei Severyn, Hanzhao Lin, YaGuang Li, Yong Cheng, Abe Ittycheriah, Mahdis Mahdieh, Mia Chen, Pei Sun, Dustin Tran, Sumit Bagri, Balaji Lakshminarayanan, Jeremiah Liu, Andras Orban, Fabian Gura, Hao Zhou, Xinying Song, Aurelien Boffy, Harish Ganapathy, Steven Zheng, HyunJeong Choe, Agoston Weisz, Tao Zhu, Yifeng Lu, Siddharth Gopal, Jarrod Kahn, Maciej Kula, Jeff Pitman, Rushin Shah, Emanuel Taropa, Majd Al Merey, Martin Baeuml, Zhifeng Chen, Laurent El Shafey, Yujing Zhang, Olcan Sercinoglu, George Tucker, Enrique Piqueras, Maxim Krikun, Iain Barr, Nikolay Savinov, Ivo Danihelka, Becca Roelofs, Anaïs White, Anders Andreassen, Tamara von Glehn, Lakshman Yagati, Mehran Kazemi, Lucas Gonzalez, Misha Khalman, Jakub Sygnowski, Alexandre Frechette, Charlotte Smith, Laura Culp, Lev Proleev, Yi Luan, Xi Chen, James Lottes, Nathan Schucher, Federico Lebron, Alban Rrustemi, Natalie Clay, Phil Crone, Tomas Kocisky, Jeffrey Zhao, Bartek Perz, Dian Yu, Heidi Howard, Adam Bloniarz, Jack W. Rae, Han Lu, Laurent Sifre, Marcello Maggioni, Fred Alcober, Dan Garrette, Megan Barnes, Shantanu Thakoor, Jacob Austin, Gabriel Barth-Maron, William Wong,Rishabh Joshi,Rahma Chaabouni Deeni Fatiha Arun Ahuja,Gaurav Singh Tomar Evan Senter,Martin Chadwick,Ilya Kornakov,Nithya Attaluri Inaki Iturrate,Ruibo Liu,Yunxuan Li Sarah Cogan Jeremy Chen Chao Jia Chenjie Gu Qiao Zhang Jordan Grimstad Ale Jakse Hartman Xavier Garcia Thanumalayan Sankaranarayana PillaiJacob Devlin Michael LaskinDiego de Las Casas,Dasha ValterConnie TaoLorenzo BlancoAdria Puigdomenech BadiaDavid Reitter Mianna Chen Jenny Brennan Clara Rivera,Sergey BrinShariq Iqbal,Gabriela Surita Jane Labanowski Abhi Rao Stephanie Winkler Emilio Parisotto Yiming Gu Kate Olszewka Ravi Addanki Antoine Miech Annie Louis Denis Teplyashin Geoff Brown Elliot Catt Jan BalaguerJackie Xiang,Pidong Wang,Zoe Ashwood,Anton BriukhovAlbert WebsonSanjay GanapathySmit Sanghavi Ajay Kannan Ming-Wei ChangAxel StjerngrenJosip DjolongaYuting SunAnkur Bapna Matthew Aitchison Pedram Pejman Henryk Michalewski Tianhe Yu Cindy Wang,Juliette Love Junwhan Ahn Dawn Bloxwich,Kehang Han Peter Humphreys Thibault" + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.104, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.104, + 0.825, + 0.921 + ], + "angle": 0, + "content": "Sellam, James Bradbury, Varun Godbole, Sina Samangooei, Bogdan Damoc, Alex Kaskasoli, Sebastien M. R. Arnold, Vijay Vasudevan, Shubham Agrawal, Jason Riesa, Dmitry Lepikhin, Richard Tanburn, Srivatsan Srinivasan, Hyeontaek Lim, Sarah Hodgkinson, Pranav Shyam, Johan Ferret, Steven Hand, Ankush Garg, Tom Le Paine, Jian Li, Yujia Li, Minh Giang, Alexander Neitz, Zaheer Abbas, Sarah York, Machel Reid, Elizabeth Cole, Aakanksha Chowdhery, Dipanian Das, Dominika Rogozinska, Vitaliy Nikolaev, Pablo Spechmann, Zachary Nado, Lukas Zilka, Flavien Prost, Luheng He, Marianne Monteiro, Gaurav Mishra, Chris Welty, Josh Newlan, Dawei Jia, Miltiadis Allamanis, Clara Huiyi Hu, Raoul de Liedekerke, Justin Gilmer, Carl Saroufim, Shruti Rijhwani, Shaobo Hou, Disha Shrivastava, Anirudh Baddepudi, Alex Goldin, Adnan Ozturel, Albin Cassirer, Yunhan Xu, Daniel Sohn, Devendra Sachan, Reinald Kim Amplayo, Craig Swanson, Dessie Petrova, Shashi Narayan, Arthur Guez, Siddhartha Brahma, Jessica Landon, Miteyan Patel, Ruizhe Zhao, Kevin Villela, Luyu Wang, Wenhao Jia, Matthew Rahtz, Mai Giménez, Legg Yeung, James Keeling, Petko Georgiev, Diana Mincu, Boxi Wu, Salem Haykal, Rachel Saputro, Kiran Vodrahalli, James Qin, Zeynep Cankara, Abhanshu Sharma, Nick Fernando, Will Hawkins, Behnam Neyshabur, Solomon Kim, Adrian Hutter, Priyanka Agrawal, Alex Castro-Ros, George van den Driessche, Tao Wang, Fan Yang, Shuo yiin Chang, Paul Komarek, Ross McIlroy, Mario Lučić Guodong Zhang Wael Farhan Michael Sharman Paul Natsev Paul Michel, Yamini Bansal Siyuan Qiao Kris Cao Siamak Shakeri Christina Butterfield Justin ChungPaul Kishan Rubenstein Shivani Agrawal Arthur MenschKedar Soparkar Karel Lenc Timothy ChungAedan PopeLoren MaggioreJackie KayPriya JhakraShibo WangJoshua MaynezMary Phuong Taylor Tobin Andrea Tacchetti Maja TrebaczKevin RobinsonYash Katariya Sebastian Riedel Paige Bailey Kefan Xiao Nimesh Ghelani Lora Aroyo Ambrose Slone Neil Houlsby Xuehan Xiong Zhen Yang Elena Gribovskaya Jonas Adler Mateo Wirth Lisa Lee Music Li Thais Kagohara Jay Pavagadhi Sophie Bridgers Anna Bortsova Sanjay Ghemawat,Zafarali Ahmed Tianqi LiuRichard PowellVijay BolinaMariko InumaPolina ZablotskaiaJames Besley,Da-Woon ChungTimothy Dozat Ramona ComanescuXiance Si Jeremy Greer Guolong Su Martin Polacek Raphael Lopez Kaufman Simon Tokumine Hexiang HuElena Buchatskaya Yingjie Miao Mohamed Elhawaty Aditya SiddhantNenad Tomasev Jinwei XingChristina Greer Helen Miller Shereen Ashraf Aurko RoyZizhao ZhangAda Ma Angelos Filos Milos Besta Rory Blevins Ted Klimenko Chih-Kuan Yeh Soravit Changpinyo Jiaq MuOscar ChangMantas PajarskasCarrie Muir Vered Cohen Charline Le Lan Krishna Haridasan Amit Marathe Steven Hansen Sholto Douglas Rajkumar Samuel Mingqiu WangSophia AustinChang LanJiepu JiangJustin Chiu Jaime Alonso Lorenzo Lars Lowe Sjösund Sebastien Cevey,Zach Gleicher Thi Avrahami Anudhyan BoralHansa Srinivasan Vittorio Selo Rhys May Konstantinos Aisos ,Leonard HussenotLivio Baldini Soares Kate Baumli Michael B.ChangAdria RecasensBen CaineAlexander PritzelFilip PaveticFabio Pardo Anita Gergely Justin FryeVinay RamaseshDan Horgan Kartikeya Badola Nora Kassner Subhrajit Roy,Ethan DyerVictor Campos Campos Alex Tomala Yunhao TangDalia El Badawy Elspeth White Basil Mustafa Oran LangAbhishek JindalSharad Vikram Zhitao GongSergi Caelles Ross Hemsley Gregory Thornton Fangxiaoyu Feng Wojciech Stokowiec Ce ZhengPhoebe Thacker,Cagliar Unlu Zhishuai Zhang Mohammad SalehJames Svensson Max BileschiPiyush PatilAnkesh Anand Roman RingKaterina TsihlasArpi Vezer Marco Selvi Toby Shevlane,Mikel RodriguezTom KwiatkowskiSamira Daruki Keran RongAllan Dafoe Nicholas FitzGerald Keren Gu-Lemberg Mina Khan Lisa Anne Hendricks Marie Pelllat,Vladimir FeinbergJames Cobon-KerrTara Sainath Maribeth Rauh Sayed Hadi Hashemi Richard Ives Yana Hasson Eric Noland Yuan Cao Nathan Byrd Le Hou Qingze Wang Thibault Sottiaux Michela Paganini Jean-Baptiste Lespiau Alexandre Moufarek Samer Hassan Kaushik Shivakumar Joost van AmersfoortAmol Mandhane Pratik Joshi Anirudh Goyal Matthew TungAndrew BrockHannah SheahanVedant MisraCheng LiNemanja Rakicevic Mostafa Deghhani Fangyu Liu Sid Mittal Junhyuk Oh Seb Noury Eren Sezener,Fantine Huot Matthew Lamm Nicola De Cao Charlie Chen Sidharth Mudgal Romina Stella Kevin Brooks Gautam Vasudevan Chenxi Liu Mainak Chain Nivedita Melinkeri Aaron Cohen Venus Wang Kristie Seymour,Sergey Zubkov,Rahul Goel Summer Yue,Sai Krishnakumaran,Brian Albert Nate Hurley Motoki Sano Anhad MohananeyJonah Joughin Egor Filonov Tomasz Kepa Yomna Eldawy Jiawern Lim Rahul Rishi Shirin Badiezadegan Taylor Bos Jerry ChangSanil Jain Sri Gayatri Sundara Padmanabhan Subha Puttagunta Kalpesh Krishna Leslie Baker Norbert Kalb,Vamsi Bedapudi Adam Kurzrok Shuntong Lei Anthony Yu Oren Litvin Xiang Zhou Zhichun WuSam SobellAndrea SicilianoAlan Papir Robby NealeJonas Bragagnolo Tej Toor Tina ChenValentin AnklinFeiran WangRichie FengMilad Gholami Kevin LingLijuan" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.479, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.104, + 0.825, + 0.923 + ], + "angle": 0, + "content": "Liu, Jules Walter, Hamid Moghaddam, Arun Kishore, Jakub Adamek, Tyler Mercado, Jonathan Mallinson, Siddhinita Wandekar, Stephen Cagle, Eran Ofek, Guillermo Garrido, Clemens Lombriser, Maksim Mukha, Botu Sun, Hafeezul Rahman Mohammad, Josip Matak, Yadi Qian, Vikas Peswani, Pawe Janus, Quan Yuan, Leif Schelin, Oana David, Ankur Garg, Yifan He, Oleksii Duzhyi, Anton Algmyr, Timothee Lottaz, Qi Li, Vikas Yadav, Luyao Xu, Alex Chinien, Rakesh Shivanna, Aleksandr Chuklin, Josie Li, Carrie Spadine, Travis Wolfe, Kareem Mohamed, Subhabrata Das, Zihang Dai, Kyle He, Daniel von Dincklage, Shyam Upadhyay, Akanksha Maurya, Luyan Chi, Sebastian Krause, Khalid Salama, Pam G Rabinovitch, Pavan Kumar Reddy M, Aarush Selvan, Mikhail Dektiarev, Golnaz Ghiasi, Erdem Guven, Himanshu Gupta, Boyi Liu, Deepak Sharma, Idan Heimlich Shtacher, Shachi Paul, Oscar Akerlund, François-Xavier Aubet, Terry Huang, Chen Zhu, Eric Zhu, Elico Teixeira, Matthew Fritze, Francesco Bertolini, LianaEleonora Marinescu, Martin Bolle, Dominik Paulus, Khyatti Gupta, Tejasi Latkar, Max Chang Jason Sanders, Roopa Wilson, Xuewei Wu, Yi-Xuan Tan, Lam Nguyen Thiet, Tulsee Doshi Sid Lall, Swaroop Mishra, Wanming Chen, Thang Luong, Seth Benjamin Jasmine Lee, Ewa Andrejczuk, Dominik Rabiej, Vipul Ranjan, Krzysztof Styrc,Pengcheng Yin, Jon Simon, Malcolm Rose Harriott, Mudit Bansal, Alexei Robsky, Geoff Bacon, David Greene, Daniil Mirylenka Chen Zhou, Obaid Sarvana, Abhimanyu Goyal, Samuel Andermatt, Patrick Siegler, Ben Horn Assaf Israel, Francesco Pongetti Chih-Wei \"Louis\" Chen Marco Selvatici Pedro Silva Kathie Wang Jackson Tolins Kelvin Guu Roey YogevXiaochen Cai Alessandro Agostini,Maulik Shah,Hung Nguyen Noah O Donnaile,Sebastien Pereira Linda Friso Adam Stambler Adam KurzrokChenkai Kuang Yan Romanikhin Mark Geller ZJ Yan Kane Jang Cheng-Chun Lee Wojciech FicaEric Malmi Qijun Tan Dan Banica,Daniel Balle Ryan Pham,Yanping Huang Diana Avram Hongzhi Shi Jasot Singh Chris Hidey Niharika Ahuja Pranab SaxenaDan Dooley Srividya Pranavi PotharajuEileen ONeill AnandGokulchandranRyan FoleyKai Zhao Mike DusenberryYuan Liu Pulkit Mehta Raga Kotikalapudi Chalance Safranek-Shrader Andrew Goodman Joshua Kessinger,Eran Globen Prateek Kolhar Chris Gorgolewski Ali Ibrahim Yang SongAli Eichenbaum Thomas Brovelli Sahitya Potluri Preethi LahotiCip Baetu Ali Ghorbani Charles ChenAndy CrawfordShalini PalMukund Sridhar Petru GuritaAsier Mujika Igor Petrovski Pierre-Louis CedozChenmei Li Shiyuan Chen Niccolo Dal Santo Siddharth Goyal Jitesh Punjabi Karthik Kappaganthu Chester KwakPallavi LV Sarmishta Velury Himadri Choudhury Jamie Hall Premal Shah Ricardo Figueira Matt Thomas Minjie Lu Ting Zhou Chintu Kumar Thomas Jurdi Sharat Chikkerur Yenai Ma Adams Yu Soo KwakVictor AhdelSujeevan RajayogamTravis ChomaFei Liu Aditya Barua Colin Ji,Ji Ho Park Vincent HellendoornAlex Bailey Taylan Bilal Huanjie Zhou Mehrdad Khatir Charles Sutton Wojciech Rzadkowski Fiona Macintosh Konstantin Shagin Paul Medina Chen Liang Jinjing Zhou Pararth Shah Yingying Bi Attila Dankovics Shipra Banga Sabine Lehmann Marissa Bredesen Zifan Lin John Eric Hoffmann Jonathan Lai Raynald Chung Kai Yang Nihal Balani Arthur Brazinskas Andrei Sozanschi Matthew Hayes Hector Fernandez Alcalde Peter Makarov Will Chen Antonio Stella Liselotte Snijders Michael Mandl Ante Karrman Pawel Nowak Xinyi Wu Alex Dyck Krishnan Vaidyanathan Raghavender R Jessica Mallet Mitch Rudominer Eric JohnstonSushil Mittal Akhil Udathu Janara Christensen,Vishal Verma,Zach Irving Andreas Santucci Gamaleldin Elsayed Elnaz Davoodi Marin Georgiev Ian Tenney Nan Hua Geoffrey Cideron Edouard Leurent Mahmoud Alnahlawi Ionut Georgescu Nan Wei Ivy ZhengDylan Scandinaro Heinrich Jiang Jasper Snoke Mukund Sundararajan Xuezhi WangZack Ontiveros Itay Karo Jeremy Cole Vinu Rajashekhar Lara Tumeh Eyal Ben-David Rishub Jain Jonathan Uesato Romina Datta Oskar Bunyan Shimu Wu John Zhang Piotr Stanczyk Ye Zhang David Steiner Subhajit Naskar Michael Azzam Matthew Johnson Adam Paszke Chung-Cheng Chiu Jaume Sanchez Elias Afroz Mohiuddin Faizan Muhammad Jin Miao Andrew Lee Nino Vieillard Jane Park Jiageng Zhang Jeff Stanway Drew Garmon Abhijit Karmarkar Zhe Dong Jong LeeAviral Kumar Luowei Zhou Jonathan Evens William Isaac Geoffrey Irving Edward Loper Michael Fink Isha Arkatkar Nanxin Chen Izhak Shafran Ivan Petrychenko Zhe Chen Johnson Jia Anselm Levskaya Zhenkai Zhu Peter Grabowski Yu Mao Alberto Magni Kaisheng Yao Javier Snader,Norman Casagrande Evan PalmerPaul SuganthanAlfonso Castano Irene Giannoumis Wooyeol Kim Mikolaj Rybinski Ashwin Sreevatsa Jennifer Prendki David Soergel Adrian Goedeckemeyer Willi Gierke Mohsen Jafari Meenu Gaba Jeremy Wiesner Diana Gage Wright Yawen Wei Harsha Vashisht Yana Kulizhskaya Jay Hoover Maigo Le Lu Li Chimezie Iwuanyanwu Lu Liu Kevin Ramirez Andrey Khorlin Albert Cui Tian LIN Marcus Wu Ricardo Aguilar Keith Pallo Abhishek Chakladar Ginger Perng Elena Allica Abellan Mingyang Zhang Ishita Dasgupta Nate Kushman Ivo Penchev Alena Repina Xihui Wu Tom" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.479, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.104, + 0.825, + 0.921 + ], + "angle": 0, + "content": "van der Weide, Priya Ponnapalli, Caroline Kaplan, Jiri Simsa, Shuangfeng Li, Olivier Doussé, Fan Yang, Jeff Piper, Nathan Ie, Rama Pasumarthi, Nathan Lintz, Anitha Vijayakumar, Daniel Andor, Pedro Valenzuela, Minnie Lui, Cosmin Paduraru, Daiyi Peng, Katherine Lee, Shuyuan Zhang, Somer Greene, Duc Dung Nguyen, Paula Kurylowicz, Cassidy Hardin, Lucas Dixon, Lili Janzer, Kiam Choo, Ziqiang Feng, Biao Zhang, Achintya Singhal, Dayou Du, Dan McKinnon, Natasha Antropova, Tolga Bolukbasi, Orgad Keller, David Reid, Daniel Finchelstein, Maria Abi Raad, Remi Crocker, Peter Hawkins, Robert Dadashi, Colin Gaffney, Ken Franko, Anna Bulanova, Rémi Leblond, Shirley Chung, Harry Askham, Luis C. Cobo, Kelvin Xu, Felix Fischer, Jun Xu, Christina Sorokin, Chris Alberti, Chu-Cheng Lin, Colin Evans, Alek Dimitriev, Hannah Forbes, Dylan Banarse, Zora Tung, Mark Omernick, Colton Bishop, Rachel Sterneck, Rohan Jain, Jiawei Xia, Ehsan Amid, Francesco Piccinno, Xingyu Wang, Praseem Banzal, Daniel J. Mankowitz, Alex Polozov, Victoria Krakovna, Sasha Brown, Mohammad Hossein Bateni, Dennis Duan, Vlad Firoiu, Meghana Thotakuri, Tom Natan, Matthieu Geist, Ser tan Girgin, Hui Li, Jiayu Ye, Ofir Roval, Reiko Tojo, Michael Kwong, James Lee-Thorp, Christopher Yew, Danila Sinopalnikov, Sabela Ramos, John Mellor, Abhishek Sharma, Kathy Wu, David Miller, Nicolas Sonnerat, Denis Vnukov, Rory Greig, Jennifer Beattie, Emily Caveness, Libin Bai, Julian Eisenschlos, Alex Korchemniy, Tomy Tsai, Mimi Jasarevic, Weize Kong Phuong Dao Zeyu Zheng Frederick Liu Fan Yang Rui Zhu Tian Huey Teh Jason Sanmiya Evgeny Gladchenko Nejc Trdin Daniel Toyama Evan Rosen,Sasan Tavakkol Linting Xue Chen Elkind Oliver Woodman John Carpenter George Papamakarios Rupert Kemp Sushant Kafle Tanya Grunina Rishika Sinha Alice Talbert Diane Wu,Denese Owusu-Afriyie Cosmo DuChloe Thornton Jordi Pont-Tuset Pradyumna Narayana Jing Li Saaber Fatehi John WietingOmar Ajmeri Benigno Uria Yeongil Ko Laura Knight Amelie Heliou Ning Niu Shane Gu Chenxi Pang Yeqing Li,Nir Levine,Ariel StolovichRebeca Santamaria-FernandezSonam Goenk,Wenny Yustalim Robin Strudel Ali Elqursh Charlie Deck Hyo LeeZonglin Li Kyle Levin Raphael Hoffmann Dan Holtmann-Rice Olivier Bachem Sho Arora Christy Koh Soheil Hassas Yeganeh Siim Poder Mukarram Tariq Yanhua Sun Lucian Ionita Mojtaba Seyedhosseini Pouya Tafti Zhiyu Liu Anmol Gulati Jasmine Liu Xinyu Ye Bart Chrzaszcz Lily Wang Nikhil Sethi Tianrun Li Ben Brown Shreya Singh Wei Fan Aaron Parisi Joe Stanton Vinod Koverkathu Christopher A. Choquette-Choo Yunjie Li TJ Lu Abe Ittycheriah Prakash Shroff Mani Varadarajan Sanaz Bahargam Rob Willoughby David Gaddy Guillaume Desjardins Marco Cornero Brona Robenegek,Bhavishya Mittal Ben Albrecht Ashish Shenoy Fedor Moiseev Henrik Jacobsson Alireza Ghaffarkhah,Morgane Riviere Alanna Walton Clément Crepy Alicia Parrish Zongwei ZhouClement Farabet Carey Radebaugh Praveen Srinivasan Claudia van der Salm Andreas Fidjeland Salvatore Scellato Eri Latorre-Chimoto Hanna Klimczak-Plucinska David Bridson Dario de Cesare Tom Hudson Piermaria Mendolicchio Lexi Walker Alex Morris Matthew Mauger Alexey Guseynov Alison Reid Seth Odoom Lucia Loher Victor Cotruta Madhavi Yenugula Dominik Grewe Anastasia Petrushkina Tom Duerig Antonio Sanchez Steve YadlowskyAmy Shen Amir Globerson,Lynette Webb Sahil Dua Dong Li Surya BhupatirajuDan Hurt Haroon Qureshi Ananth Agarwal Tomer Shani Matan Eyal Anuj Khare Shreyas Rammohan Belle Lei Wang Chetan Tekur Mihir Sanjay Kale Jinliang Wei Ruoxin Sang Brennan Saeta Tyler Liechty Yi Sun Yao Zhao Stephan Lee Pandu Nayak Doug Fritz Manish Reddy Vuyyyuru John Aslanides Nidhi Vyas Martin Wicke Xiao Ma Evgenii Eltsychev Nina Martin Hardie Cate James Manyika Keyvan Amiri Yelin Kim Xi Xiong Kai Kang Florian Luisier Nilesh Tripuraneni David Madras Mandy Guo Austin Waters Oliver Wang Joshua Ainslie Jason Baldridge Han Zhang Garima Pruthi Jakob Bauer Feng Yang Riham Mansour Jason Gelman Yang XuGeorge Polovets Ji Liu Honglong CaiWarren ChenXiangHai Sheng Emily Xue Sherjil Ozair Christof Angermueller Xiaowei Li Anoop Sinha Weiren Wang Julia Wiesinger Emmanueloul Koukoumidis Yuan Tian Anand Iyer Madhu Gurumurthy Mark Goldenson Parashar Shah MK Blake Hongkun Yu Anthony Urbanowicz Jennimaria Palomaki Chrisantha Fernando Ken Durden Harsh Mehta Nikola Momchev Elahe Rahimtoroghi Maria Georgaki Amit Raul Sebastian Ruder Morgan Redshaw Jinhyuk Lee Denny Zhou Komal Jalan Dinghua Li Blake Hechtman Parker Schuh Milad Nasr Kieran Milan Vladimir Mikulik Juliana Franco Tim Green Nam Nguyen Joe Kelley Aroma Mahendru Andrea Hu Joshua Howland Ben Vargas Jeffrey Hui Kshitij Bansal,Vikram Rao Rakesh Ghiya Emma Wang Ke Ye Jean Michel Sarr Melanie Moranski Preston Madeleine Elish Steve Li Aakash Kaku Jigar Gupta Ice Pasupat Da-Cheng Juan Milan Someswar Tejvi M., Xinyun Chen Aida Amini Alex Fabrikant Eric Chu Xuanyi Dong Amrutta Muthal Senaka Buthpitiya Sarthak Jauhari Nan Hua Urvashi Khandelwal Ayal Hitron Jie Ren Larissa Rinaldi Shahar Drath Avigail Dabush" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.104, + 0.825, + 0.313 + ], + "angle": 0, + "content": "Nan-Jiang Jiang, Harshal Godhia, Uli Sachs, Anthony Chen, Yicheng Fan, Hagai Taitelbaum, Hila Noga, Zhuyun Dai, James Wang, Chen Liang, Jenny Hamer, Chun-Sung Ferng, Chenel Elkind, Aviel Atias, Paulina Lee, Vít Listík, Mathias Carlen, Jan van de Kerkhof, Marcin Pikus, Krunoslav Zaher, Paul Müller, Sasha Zykova, Richard Stefanec, Vitaly Gatsko, Christoph Hirnschall, Ashwin Sethi, Xingyu Federico Xu, Chetan Ahuja, Beth Tsai, Anca Stefanoiu, Bo Feng, Keshav Dhandhania, Manish Katyal, Akshay Gupta, Atharva Parulekar, Divya Pitta, Jing Zhao, Vivaan Bhatia, Yashodha Bhavnani, Omar Alhadlaq, Xiaolin Li, Peter Danenberg, Dennis Tu, Alex Pine, Vera Filippova, Abhipso Ghosh, Ben Limonchik, Bhargava Urala, Chaitanya Krishna Lanka, Derik Clive, Yi Sun, Edward Li, Hao Wu, Kevin Hongtongsak, Ianna Li, Kalind Thakkar, Kanyush Omarov, Kushal Majmundar, Michael Alverson, Michael Kucharski, Mohak Patel, Mudit Jain, Maksim Zabelin, Paolo Pelagatti, Rohan Kohli, Saurabh Kumar, Joseph Kim, Swetha Sankar, Vineet Shah, Lakshmi Ramachandruni, Xiangkai Zeng, Ben Bariach, Laura Weidinger, Amar Subramanya, Sissie Hsiao, Demis Hassabis, Koray Kavukcuoglu, Adam Sadovsky, Quoc Le, Trevor Strohman, Yonghui Wu, Slav Petrov, Jeffrey Dean, and Oriol Vinyals. Gemini: A family of highly capable multimodal models, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.322, + 0.825, + 0.365 + ], + "angle": 0, + "content": "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. Deep sparse rectifier neural networks. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 315-323, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.374, + 0.825, + 0.418 + ], + "angle": 0, + "content": "Dirk Groeneveld, Iz Beltagy, Pete Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Harsh Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. Olmo: Accelerating the science of language models. arXiv preprint arXiv:2402.00838, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.426, + 0.825, + 0.47 + ], + "angle": 0, + "content": "Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, et al. Textbooks are all you need. arXiv preprint arXiv:2306.11644, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.478, + 0.825, + 0.535 + ], + "angle": 0, + "content": "Srinivasan Iyer, Xi Victoria Lin, Ramakanth Pasunuru, Todor Mihaylov, Daniel Simig, Ping Yu, Kurt Shuster, Tianlu Wang, Qing Liu, Punit Singh Koura, et al. Opt-iml: Scaling language model instruction meta learning through the lens of generalization. arXiv preprint arXiv:2212.12017, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.545, + 0.825, + 0.616 + ], + "angle": 0, + "content": "Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1601-1611, Vancouver, Canada, July 2017. Association for Computational Linguistics. doi: 10.18653/v1/P17-1147. URL https://aclanthology.org/P17-1147." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.625, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Norman P. Jouppi, Cliff Young, Nishant Patil, David A. Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, Rick Boyle, Pierre-luc Cantin, Clifford Chao, Chris Clark, Jeremy Coriell, Mike Daley, Matt Dau, Jeffrey Dean, Ben Gelb, Tara Vazir Ghaemmaghami, Rajendra Gottipati, William Gulland, Robert Hagmann, C. Richard Ho, Doug Hogberg, John Hu, Robert Hundt, Dan Hurt, Julian Ibarz, Aaron Jaffey, Alek Jaworski, Alexander Kaplan, Harshit Khaitan, Daniel Killebrew, Andy Koch, Naveen Kumar, Steve Lacy, James Laudon, James Law, Diemthu Le, Chris Leary, Zhuyuan Liu, Kyle Lucke, Alan Lundin, Gordon MacKean, Adriana Maggiore, Maire Mahony, Kieran Miller, Rahul Nagarajan, Ravi Narayanaswami, Ray Ni, Kathy Nix, Thomas Norrie, Mark Omernick, Narayana Penukonda, Andy Phelps, Jonathan Ross, Matt Ross, Amir Salek, Emad Samadiani, Chris Severn, Gregory Sizikov, Matthew Snelham, Jed Souter, Dan Steinberg, Andy Swing, Mercedes Tan, Gregory Thorson, Bo Tian, Horia Toma, Erick Tuttle, Vijay Vasudevan, Richard Walter, Walter Wang, Eric Wilcox, and Doe Hyun Yoon. In-Datacenter Performance Analysis of a Tensor Processing Unit. In Proceedings of the 44th Annual International Symposium on Computer Architecture ISCA, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Najoung Kim, Sebastian Schuster, and Shubham Toshniwal. Code pretraining improves entity tracking abilities of language models. arXiv preprint arXiv:2405.21068, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, Dzmitry Bahdanau, Leandro von Werra, and Harm de Vries. The stack: 3 tb of permissively licensed source code. Preprint, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.825, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.19 + ], + "angle": 0, + "content": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. doi: 10.1162/tacl_a_00276. URL https://doi.org/10.1162/tacl_a_00276." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.196, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6086-6096, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1612. URL https://www.aclweb.org/anthology/P19-1612." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.274, + 0.825, + 0.319 + ], + "angle": 0, + "content": "Raymond Li, Loubna Ben Allal, Yangtian Zi, Niklas Muennighoff, Denis Kocetkov, Chenghao Mou, Marc Marone, Christopher Akiki, Jia Li, Jenny Chim, et al. Starcoder: may the source be with you! arXiv preprint arXiv:2305.06161, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.324, + 0.824, + 0.356 + ], + "angle": 0, + "content": "Yuanzhi Li, Sebastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.362, + 0.825, + 0.405 + ], + "angle": 0, + "content": "Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, et al. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.412, + 0.825, + 0.442 + ], + "angle": 0, + "content": "Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. G-eval: Nlg evaluation using gpt-4 with better human alignment, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.449, + 0.825, + 0.506 + ], + "angle": 0, + "content": "Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. arXiv, abs/2305.13169, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.513, + 0.822, + 0.543 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. URL https://arxiv.org/abs/1711.05101." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.55, + 0.825, + 0.581 + ], + "angle": 0, + "content": "Anton Lozhkov, Loubna Ben Allal, Leandro von Werra, and Thomas Wolf. Fineweb-edu, May 2024. URL https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.586, + 0.825, + 0.617 + ], + "angle": 0, + "content": "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help llms reasoning? arXiv preprint arXiv:2309.16298, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.623, + 0.825, + 0.695 + ], + "angle": 0, + "content": "Aman Madaan, Dheeraj Rajagopal, Niket Tandon, Yiming Yang, and Antoine Bosselut. Conditional set generation using seq2seq models. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 4874-4896, Abu Dhabi, United Arab Emirates, December 2022a. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.324." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.702, + 0.825, + 0.732 + ], + "angle": 0, + "content": "Aman Madaan, Shuyan Zhou, Uri Alon, Yiming Yang, and Graham Neubig. Language models of code are few-shot commonsense learners. arXiv preprint arXiv:2210.07128, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Max Marion, Ahmet Üstün, Luiza Pozzobon, Alex Wang, Marzieh Fadaee, and Sara Hooker. When less is more: Investigating data pruning for pretraining llms at scale, 2023. URL https:// arxiv.org/abs/2309.04564." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.79, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and cloze evaluation for deeper understanding of commonsense stories. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 839-849, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-1098. URL https://aclanthology.org/N16-1098." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Niklas Muennighoff, Alexander M Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models. arXiv preprint arXiv:2305.16264, 2023a." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Niklas Muennighoff, Alexander M. Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models, 2023b. URL https://arxiv.org/abs/2305.16264." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.156, + 0.828, + 0.283 + ], + "angle": 0, + "content": "Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15991-16111, Toronto, Canada, July 2023c. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.292, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35: 27730-27744, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.359, + 0.825, + 0.444 + ], + "angle": 0, + "content": "Jupinder Parmar, Shrimai Prabhumoye, Joseph Jennings, Mostofa Patwary, Sandeep Subramanian, Dan Su, Chen Zhu, Deepak Narayanan, Aastha Jhunjunwala, Ayush Dattagupta, Vibhu Jawa, Jiwei Liu, Ameya Mahabaleshwarkar, Osvald Nitski, Annika Brundyn, James Maki, Miguel Martinez, Jiaxuan You, John Kamalu, Patrick LeGresley, Denys Fridman, Jared Casper, Ashwath Aithal, Oleksii Kuchaiev, Mohammad Shoeybi, Jonathan Cohen, and Bryan Catanzaro. Nemotron-4 15b technical report, 2024. URL https://arxiv.org/abs/2402.16819." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.453, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Guilherme Penedo, Quentin Malartic, Daniel Hesslow, Ruxandra Cojocaru, Alessandro Cappelli, Hamza Alobeidli, Baptiste Pannier, Ebtesam Almazrouei, and Julien Launay. The refined web dataset for falcon llm: Outperforming curated corpora with web data, and web data only, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.505, + 0.825, + 0.536 + ], + "angle": 0, + "content": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.544, + 0.825, + 0.587 + ], + "angle": 0, + "content": "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.596, + 0.826, + 0.821 + ], + "angle": 0, + "content": "Jack W. Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, Eliza Rutherford, Tom Hennigan, Jacob Menick, Albin Cassirer, Richard Powell, George van den Driessche, Lisa Anne Hendricks, Maribeth Rauh, Po-Sen Huang, Amelia Glaese, Johannes Welbl, Sumanth Dathathri, Saffron Huang, Jonathan Uesato, John Mellor, Irina Higgins, Antonia Creswell, Nat McAleese, Amy Wu, Erich Olsen, Siddhant Jayakumar, Elena Buchatskaya, David Budden, Esme Sutherland, Karen Simonyan, Michela Paganini, Laurent Sifre, Lena Martens, Xiang Lorraine Li, Adhiguna Kuncoro, Aida Nematzadeh, Elena Gribovskaya, Domenic Donato, Angeliki Lazaridou, Arthur Mensch, Jean-Baptiste Lespiau, Maria Tsimpoukelli, Nikolai Grigorev, Doug Fritz, Thibault Sottiaux, Mantas Pajarskas, Toby Pohlen, Zhitao Gong, Daniel Toyama, Cyprien de Masson d'Autume, Yu-jia Li, Tayfun Terzi, Vladimir Mikulik, Igor Babuschkin, Aidan Clark, Diego de Las Casas, Aurelia Guy, Chris Jones, James Bradbury, Matthew Johnson, Blake Hechtman, Laura Weidinger, Iason Gabriel, William Isaac, Ed Lockhart, Simon Osindero, Laura Rimell, Chris Dyer, Oriol Vinyals, Kareem Ayoub, Jeff Stanway, Lorrayne Bennett, Demis Hassabis, Koray Kavukcuoglu, and Geoffrey Irving. Scaling language models: Methods, analysis & insights from training gopher, 2022. URL https://arxiv.org/abs/2112.11446." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.825, + 0.872 + ], + "angle": 0, + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, abs/1910.10683, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In The Second Tiny Papers Track at ICLR 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.197, + 0.825, + 0.226 + ], + "angle": 0, + "content": "Maarten Sap, Hannah Rashkin, Derek Chen, Ronan LeBras, and Yejin Choi. Socialiaq: Common-sense reasoning about social interactions. arXiv, abs/1904.09728, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.236, + 0.826, + 0.307 + ], + "angle": 0, + "content": "Minjoon Seo, Tom Kwiatkowski, Ankur Parikh, Ali Farhadi, and Hannaneh Hajishirzi. Phrase-indexed question answering: A new challenge for scalable document comprehension. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 559-564, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1052. URL https://aclanthology.org/D18-1052." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.316, + 0.825, + 0.36 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, YK Li, Yu Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.37, + 0.825, + 0.398 + ], + "angle": 0, + "content": "Noam Shazeer. Glu variants improve transformer, 2020. URL https://arxiv.org/abs/2002.05202." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.409, + 0.825, + 0.452 + ], + "angle": 0, + "content": "Luísa Shimabucoro, Sebastian Ruder, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. Llm see, llm do: Guiding data generation to target non-differentiable objectives, 2024. URL https://arxiv.org/abs/2407.01490." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.462, + 0.826, + 0.574 + ], + "angle": 0, + "content": "Shivalika Singh, Freddie Vargus, Daniel Dsouza, Borje F. Karlsson, Abinaya Mahendiran, Wei-Yin Ko, Herumb Shandilya, Jay Patel, Deividas Mataciunas, Laura OMahony, Mike Zhang, Ramith Hettiarachchi, Joseph Wilson, Marina Machado, Luisa Souza Moura, Dominik Krzeminski, Hakimeh Fadaei, Irem Ergun, Ifeoma Okoh, Aisha Alaagib, Oshan Mudannayake, Zaid Alyafeai, Vu Minh Chien, Sebastian Ruder, Surya Guthikonda, Emad A. Alghamdi, Sebastian Gehrmann, Niklas Muennighoff, Max Bartolo, Julia Kreutzer, Ahmet Üstün, Marzieh Fadaee, and Sara Hooker. Aya dataset: An open-access collection for multilingual instruction tuning. arXiv preprint arXiv:2402.06619, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.585, + 0.825, + 0.628 + ], + "angle": 0, + "content": "Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama, 2023. URL https://huggingface.co/datasets/cerebras/SlimPajama-627B." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.637, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.69, + 0.826, + 0.872 + ], + "angle": 0, + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. arXiv, abs/2307.09288, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Superglue: A stickier benchmark for general-purpose language understanding systems, 2020. URL https://arxiv.org/abs/1905.00537." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingofflolz/mesh-transformer-jax, May 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.142, + 0.826, + 0.213 + ], + "angle": 0, + "content": "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13484-13508, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.acl-long.754." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.223, + 0.825, + 0.264 + ], + "angle": 0, + "content": "Johannes Welbl, Nelson F Liu, and Matt Gardner. Crowdsourcing multiple choice science questions. pp. 94-106, September 2017. doi: 10.18653/v1/W17-4413. URL https://aclanthology.org/W17-4413." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.275, + 0.72, + 0.29 + ], + "angle": 0, + "content": "Wikipedia. Wikipedia downloads. URL https://dumps.wikipedia.org." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.299, + 0.826, + 0.925 + ], + "angle": 0, + "content": "BigScience Workshop, :, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagne, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurencion, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, Dragomir Radev, Eduardo Gonzalez Ponferrada, Efrat Levkovizh, Ethan Kim, Eyal Bar Natan, Francesco De Toni, Gerard Dupont, German Kruszewski, Giada Pistilli, Hady Elsahar, Hamza Benyamina, Hieu Tran, Ian Yu, Idris Abdulmumin, Isaac Johnson, Itziar Gonzalez-Dios, Javier de la Rosa, Jenny Chim, Jesse Dodge, Jian Zhu, Jonathan Chang, Jorg Frohberg, Joseph Tobing, Joydeep Bhattacharjee, Khalid Almubarak, Kimbo Chen, Kyle Lo, Leandro Von Werra, Leon Weber, Long Phan, Loubna Ben allal, Ludovic Tanguy, Manan Dey, Manuel Romero Munoz, Maraim Masoud, Maria Grandury, Mario Saško, Max Huang, Maximin Coavoux, Mayank Singh, Mike Tian-Jian Jiang, Minh Chien Vu, Mohammad A. Jauhar, Mustafa Ghaleb, Nishant Subramani, Nora Kassner, Nurulaqilla Khamis, Olivier Nguyen, Omar Espejel, Ona de Gibert, Paulo Villegas, Peter Henderson, Pierre Colombo, Priscilla Amuok, Quentin Lhoest, Rheza Harliman, Rishi Bommasani, Roberto Luis López, Rui Ribeiro, Salomey Osei, Sampa Pyysalo, Sebastian Nagel, Shamik Bose, Shamsuddeen Hassan Muhammad, Shanya Sharma, Shayne Longpre, Somaieh Nikpoor, Stanislav Silberberg, Suhas Pai, Sydney Zink, Tiago Timponi Torrent, Timo Schick, Tristan Thrush, Valentin Danchev Vassilina Nikoulina Veronika Laippala Violette Lepercq,Vrinda Prabhu,Zaid Alyafeai Zeerak Talat Arun Raja Benjamin HeinzerlingChenglei Si Davut Emre Taşar Elizabeth Salesky Sabrina J.Mielke Wilson Y. Lee Abheesht Sharma Andrea Santilli Antoine Chaffin Arnaud Stiegler Debajyoti Datta Eliza Szczechla Gunjan Chhablani Han WangHarshit Pandey,Hendrik Strobelt Jason Alan Fries Jos RozenLeo Gao Lintang Sutawika,M Saiful Bari Maged S.Al-shaibani Matteo Manica Nihal Nayak Ryan Teehan Samuel Albanie Sheng Shen Srulik Ben-David Stephen H.Bach Taewoon KimTali Bers Thibault Fevry Trishala Neeraj Urmish Thakker Vikas Raunak Xiangru Tang Zheng-Xin Yong Zhiqing Sun Shaked Brody Yallow Uri Hadar Tojarieh Adam Roberts Hyung Won Chung Jaesung TaoJason Phang Ofir Press Conglong Li Deepak Narayanan Hatim Bourfoune Jared Casper Jeff Rasley Max Ryabinin Mayank Mishra Minjia Zhang Mohammad Shoeybi Myriam Peyrounette Nicolas Patry Nouamane Tazi Omar Sanseviero Patrick von Platen Pierre Cornette Pierre François Lavallee Remi Lacroix Samyam Rajbhandari Sanchit Gandhi Shaden Smith Stephane Requena Suraj Patil Tim Dettmers Ahmed Baruwa Amanpreet Singh Anastasia Cheveleva Anne-Laure Ligozat Arjun Subramonian Aurélie Névol Charles Lovering Dan Garrette Deepak Tunuguntla Ehud Reiter Ekaterina Taktasheva Ekaterina Voloshina Eli Bogdanov Genta Indra Winata Hailey Schoelkopf Jan-Christoph Kalo Jekaterina Novikova Jessica Zosa Forde Jordan Clive Jungo Kasai Ken Kawamura Liam Hazan Marine Carpuat Miruna Clinciu Najoung Kim Newton Cheng Oleg Serikov Omer Antverg Oskar van der Wal Rui Zhang Ruochen Zhang Sebastian Gehrmann Shachar Mirkin Shani Pais Tatiana Shavrina Thomas Scialom Tian Yun Tomasz Limisiewicz Verena Rieser Vitaly Protasov Vladislav Mikhailov Yada Pruksachatkun Yonatan Belinkov Zachary Bamberger Zdenek Kasner Alice Rueda Amanda Pestana Amir Feizpour Armar Khan Amy Faranak Ana Santos Anthony" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.104, + 0.825, + 0.479 + ], + "angle": 0, + "content": "Hevia, Antigona Unldreaj, Arash Aghagol, Arezoo Abdollahi, Aycha Tammour, Azadeh Haji-Hosseini, Bahareh Behroozzi, Benjamin Ajibade, Bharat Saxena, Carlos Munoz Ferrandis, Daniel McDuff, Danish Contractor, David Lansky, Davis David, Douwe Kiela, Duong A. Nguyen, Edward Tan, Emi Baylor, Ezinwanne Ozoani, Fatima Mirza, Frankline Ononiwu, Habib Rezanejad, Hessie Jones, Indrani Bhattacharya, Irene Solaiman, Irina Sedenko, Isar Nejadgholi, Jesse Passmore, Josh Seltzer, Julio Bonis Sanz, Livia Dutra, Mairon Samagaio, Maraim Elbadri, Margot Mieskes, Marissa Gerchick, Martha Akinlolu, Michael McKenna, Mike Qiu, Muhammed Ghauri, Mykola Burynok, Nafis Abrar, Nazneen Rajani, Nour Elkott, Nour Fahmy, Olanrewaju Samuel, Ran An, Rasmus Kromann, Ryan Hao, Samira Alizadeh, Sarmad Shubber, Silas Wang, Sourav Roy, Sylvain Viguier, Thanh Le, Tobi Oyebade, Trieu Le, Yoyo Yang, Zach Nguyen, Abhinav Ramesh Kashyap, Alfredo Palasciano, Alison Callahan, Anima Shukla, Antonio Miranda-Escalada, Ayush Singh, Benjamin Beilharz, Bo Wang, Caio Brito, Chenxi Zhou, Chirag Jain, Chuxin Xu, Clémentine Fourrier, Daniel León Periñan, Daniel Molano, Dian Yu, Enrique Manjavacas, Fabio Barth, Florian Fuhrimann, Gabriel Altay, Giyaseddin Bayrak, Gully Burns, Helena U. Vrabec, Imane Bello, Ishani Dash, Jihyun Kang, John Giorgi, Jonas Golde, Jose David Posada, Karthik Rangasai Sivaraman, Lokesh Bulchandani, Lu Liu, Luisa Shinzato Madeleine Hahn de Bykhovetz, Maiko Takeuchi, Marc Pamies, Maria A Castillo, Marianna Nezhurina, Mario Sänger, Matthias Samwald, Michael Cullan, Michael Weinberg, Michiel De Wolf, Mina Mihaljcic, Minna Liu Moritz Freidank Myungsun KangNatasha Seelam Nathan Dahlberg,Nicholas Michio Broad,Nikolaus MuellnerPascale Fung Patrick Haller Ramya Chandrasekhar,Renata Eisenberg,Robert Martin,Rodrigo Canalli,Rosaline Su,Ruisi Su,Samuel Cahyawijaya,Samuele GardaShlok S Deshmukh,Shubanshu Mishra,Sid Kblawi,Simon Ott Sinee Sang-aroonsiri,Srishti Kumar Stefan Schweter,Sushil Bharati Tanmay LaudTheo Gigant Tomoya Kainuma,Wojciech Kusa,Yanis Labrak,Yash Shailesh Bajaj,Yash Venkatraman Yifan Xu,Yingxin XuYu Xu,Zhe TanZhongli XieZifan Ye,Mathilde Bras,Younes Belkada and Thomas Wolf.Bloom: A 176b-parameter open-access multilingual language model, 2023. URL https://arxiv.org/abs/2211.05100." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.488, + 0.825, + 0.517 + ], + "angle": 0, + "content": "Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? arXiv, abs/1905.07830, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.526, + 0.825, + 0.582 + ], + "angle": 0, + "content": "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, Todor Mihaylov, Myle Ott, Sam Shleifer, Kurt Shuster, Daniel Simig, Punit Singh Koura, Anjali Sridhar, Tianlu Wang, and Luke Zettlemoyer. Opt: Open pre-trained transformer language models, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.591, + 0.825, + 0.634 + ], + "angle": 0, + "content": "Xinlu Zhang, Zhiyu Zoey Chen, Xi Ye, Xianjun Yang, Lichang Chen, William Yang Wang, and Linda Ruth Petzold. Unveiling the impact of coding data instruction fine-tuning on large language models reasoning. arXiv preprint arXiv:2405.20535, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.643, + 0.825, + 0.685 + ], + "angle": 0, + "content": "Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.693, + 0.825, + 0.751 + ], + "angle": 0, + "content": "Ahmet Üstün, Viraat Aryabumi, Zheng-Xin Yong, Wei-Yin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. Aya model: An instruction finetuned open-access multilingual language model, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.488, + 0.825, + 0.751 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.504, + 0.119 + ], + "angle": 0, + "content": "ETHICS STATEMENT AND LIMITATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.193 + ], + "angle": 0, + "content": "While we systematically study the impact of code data on downstream natural language tasks, we do not study its impact on safety and bias. Additionally, given the nature of pre-training and the number of ablations we have conducted we were limited by the scale of larger model sizes due to prohibitive compute costs." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.21, + 0.334, + 0.226 + ], + "angle": 0, + "content": "REPRODUCIBILITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.825, + 0.285 + ], + "angle": 0, + "content": "We provide details about our data mixture (Section 2.1), data filtering (Appendix C.1, C.2, C.3), evaluation (Section 2.2, Appendix A) and training (Section 2.3) setups. We believe these details provide a clear picture on how to obtain our data setup, model ablations and evaluation results." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.304, + 0.4, + 0.32 + ], + "angle": 0, + "content": "A EVALUATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.364 + ], + "angle": 0, + "content": "We briefly describe the details of our evaluation benchmarks and the composite datasets used for each category below:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.375, + 0.825, + 0.432 + ], + "angle": 0, + "content": "1. World knowledge. These benchmarks aim to measure world knowledge, testing knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. We report the average exact match scores for both these benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.435, + 0.827, + 0.547 + ], + "angle": 0, + "content": "2. Natural language reasoning. The Natural language (NL) reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering (Clark et al., 2019; Seo et al., 2018; Welbl et al., 2017; Sap et al., 2019; Choi et al., 2018), natural language inference (NLI) (Wang et al., 2020; de Marneffte et al., 2019; Wang et al., 2020), sentence completion (Mostafazadeh et al., 2016; Zellers et al., 2019), co-reference resolution (Sakaguchi et al., 2019) and general intelligence (Clark et al., 2018). We include a full list of the constituent benchmarks in Table 1. We report the average accuracy scores across all benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.55, + 0.827, + 0.608 + ], + "angle": 0, + "content": "3. Code. While our main focus is general performance, we also want to measure any changes to code generation performance. For code benchmarks, we focus on the function completion task. We evaluate on HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021). We report the average pass@1 scores of these benchmarks." + }, + { + "type": "list", + "bbox": [ + 0.211, + 0.375, + 0.827, + 0.608 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.631, + 0.627, + 0.647 + ], + "angle": 0, + "content": "B SUMMARY RESULTS FOR PRE-TRAINING RECIPES" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.662, + 0.431, + 0.677 + ], + "angle": 0, + "content": "Summary results are shown in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.697, + 0.444, + 0.713 + ], + "angle": 0, + "content": "C CODE-DATASETS FILTERING" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.345, + 0.743 + ], + "angle": 0, + "content": "C.1 QUALITY FILTERS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.754, + 0.827, + 0.811 + ], + "angle": 0, + "content": "In addition to the dedduplication and quality filtering applied on the GitHub scrapes by Starcoder for The Stack dataset (Li et al., 2023a), we apply filters to remove documents with greater than 1000 float numbers, with instances of the string \\(0 \\times\\) that are lists of top-level domains, and with 'generated by' in the first 400 characters" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.827, + 0.706, + 0.842 + ], + "angle": 0, + "content": "C.2 PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.853, + 0.771, + 0.868 + ], + "angle": 0, + "content": "Programming languages included in our version of The Stack dataset are present in Table 3" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.884, + 0.822, + 0.899 + ], + "angle": 0, + "content": "C.3 MARKUP-STYLE PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.72, + 0.925 + ], + "angle": 0, + "content": "Markup-style languages included in our version of The Stack dataset are in Table 4" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.208, + 0.101, + 0.795, + 0.351 + ], + "angle": 0, + "content": "
TaskDatasetMetric
WORLD KNOWLEDGE TASKS
Question AnsweringTriviaQA (Joshi et al., 2017)0-shotAcc.
NaturalQuestionsOpen (Lee et al., 2019)0-shotAcc.
NATURAL LANGUAGE REASONING
Question AnsweringBoolQ (Clark et al., 2019)0-shotAcc.
PiQA (Seo et al., 2018)0-shotAcc.
SciQ (Welbl et al., 2017)0-shotAcc.
SocialQA (Sap et al., 2019)0-shotAcc.
QUAC (Choi et al., 2018)0-shotAcc.
Natural Language InferenceSuperGLUE-CB (Wang et al., 2020; de Marneffé et al., 2019)0-shotAcc.
SuperGLUE-COPA (Wang et al., 2020)0-shotAcc.
Sentence CompletionStoryCloze (Mostafazadeh et al., 2016)0-shotAcc.
HellaSwag (Zellers et al., 2019)0-shotAcc.
Coreference ResolutionWinogrande (Sakaguchi et al., 2019)0-shotAcc.
General IntelligenceARC-Easy (Clark et al., 2018)0-shotAcc.
TEXT GENERATION
Open-Ended GenerationDolly-200 (English) (Singh et al., 2024)0-shotwin-rate
CODE GENERATION
Function completionHumanEval (Chen et al., 2021)0-shotpass@1
MBPP (Austin et al., 2021)0-shotpass@1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.359, + 0.825, + 0.403 + ], + "angle": 0, + "content": "Table 1: Datasets considered for evaluation: We conduct extensive evaluations across benchmarks detailed above. These provide valuable proxies for performance in natural language reasoning, world knowledge, open ended text generation, and code generation tasks." + }, + { + "type": "table", + "bbox": [ + 0.208, + 0.416, + 0.793, + 0.584 + ], + "angle": 0, + "content": "
Model VariantRecipeToken CountNatural LanguageCodeTotal Avg.
TextCodeReason.Know.Avg.
TEXT-ONLYPre-training400B-49.09.529.20.419.6
Cooldown+32B+8B54.111.132.64.423.2
BALANCED-ONLYPre-training200B200B51.88.130.09.023.0
Cooldown+32B+8B53.211.132.18.424.2
BALANCED→TEXTPre-training Init.100B100B52.07.429.67.822.4
Continue Pre-train.+180B+20B53.09.931.54.822.6
Cooldown+32B+8B54.910.932.95.823.9
CODE→TEXTPre-training Init.-200B44.71.523.115.520.6
Continue Pre-train.+180B+20B53.39.531.44.122.3
Cooldown+32B+8B52.110.331.27.523.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.593, + 0.825, + 0.637 + ], + "angle": 0, + "content": "Table 2: Model variants with the corresponding pre-training recipes: Pre-training recipes include initial pre-training, continued pre-training, and cooldown phases. Balanced \\(\\rightarrow\\) Text achieves the best NL performance while Balanced-only performs significantly better in code generation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.661, + 0.667, + 0.677 + ], + "angle": 0, + "content": "D LLM JUDGE PROMPT AND PREAMBLE FOR WIN-RATES" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.693, + 0.245, + 0.707 + ], + "angle": 0, + "content": "Preamble" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.715, + 0.813, + 0.743 + ], + "angle": 0, + "content": "You are a helpful following assistant whose goal is to select the preferred" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.743, + 0.614, + 0.758 + ], + "angle": 0, + "content": "(least wrong) output for a given instruction." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.764, + 0.233, + 0.779 + ], + "angle": 0, + "content": "Prompt" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.763, + 0.813 + ], + "angle": 0, + "content": "Which of the following answers is the best one for the given instruction." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.814, + 0.565, + 0.827 + ], + "angle": 0, + "content": "A good answer should follow these rules:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.828, + 0.528, + 0.842 + ], + "angle": 0, + "content": "1) It should have correct reasoning," + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.842, + 0.672, + 0.855 + ], + "angle": 0, + "content": "2) It should answer the request in the instruction," + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.856, + 0.818, + 0.869 + ], + "angle": 0, + "content": "3) It should be factually correct and semantically comprehensible," + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.87, + 0.653, + 0.883 + ], + "angle": 0, + "content": "4) It should be grammatically correct and fluent." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.828, + 0.818, + 0.883 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.897, + 0.421, + 0.911 + ], + "angle": 0, + "content": "Instruction: instruction" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table", + "bbox": [ + 0.325, + 0.102, + 0.675, + 0.44 + ], + "angle": 0, + "content": "
Language NameProportion of total code documents
java15.54
javascript15.29
php12.46
python9.60
c-sharp8.30
typescript7.92
c6.63
cpp4.91
go3.49
ruby2.69
shell1.82
kotlin1.76
Swift1.52
Vue1.48
rust1.00
scala0.94
JSX0.83
sql0.74
dart0.72
makefile0.53
lua0.47
haskell0.45
smalltalk0.43
tex0.37
clojure0.10
" + }, + { + "type": "table_caption", + "bbox": [ + 0.242, + 0.449, + 0.757, + 0.466 + ], + "angle": 0, + "content": "Table 3: Programming languages included in our version of The Stack dataset" + }, + { + "type": "table", + "bbox": [ + 0.304, + 0.496, + 0.695, + 0.646 + ], + "angle": 0, + "content": "
Language NameProportion of total code documents
markdown54.23
yaml10.77
json9.97
html8.57
css6.86
SCSS5.84
restructuredtext2.26
TOML1.25
rmarkdown0.02
Sass0.22
" + }, + { + "type": "table_caption", + "bbox": [ + 0.242, + 0.656, + 0.757, + 0.673 + ], + "angle": 0, + "content": "Table 4: Markup-style languages included in our version of The Stack dataset" + }, + { + "type": "code", + "bbox": [ + 0.171, + 0.716, + 0.805, + 0.927 + ], + "angle": 0, + "content": "Answer (A): completion_a \nAnswer (B): completion_b \nFIRST provide a concise comparison of the two answers which explains \nwhich answer you prefer and why. \nSECOND, on a new line, state exactly one of 'Preferred: Answer (A)' or 'Preferred: Answer (B)' to indicate your choice of preferred response. \nYour response should use the format: \nComparison: \nPreferred: <'Answer (A)' or 'Answer (B)'>" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.691, + 0.119 + ], + "angle": 0, + "content": "E GENERATIVE WIN-RATES FOR IMPACT OF INITIALIZATION" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.139, + 0.52, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.141, + 0.756, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.201, + 0.329, + 0.796, + 0.346 + ], + "angle": 0, + "content": "Figure 8: Impact of initialization on generative quality as judged by LLM-as-a-judge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.375, + 0.684, + 0.391 + ], + "angle": 0, + "content": "F EVALUATION OF 470M COOLDOWN MODELS ON GSM8K" + }, + { + "type": "image_caption", + "bbox": [ + 0.396, + 0.415, + 0.639, + 0.431 + ], + "angle": 0, + "content": "Mathematical Evaluation" + }, + { + "type": "image", + "bbox": [ + 0.364, + 0.432, + 0.637, + 0.618 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.625, + 0.355, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.364, + 0.627, + 0.58, + 0.641 + ], + "angle": 0, + "content": "text \\(\\rightarrow\\) no-code cooldown" + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.645, + 0.355, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.364, + 0.646, + 0.577, + 0.66 + ], + "angle": 0, + "content": "text \\(\\rightarrow\\) cooldown w/ code" + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.665, + 0.355, + 0.679 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.364, + 0.665, + 0.681, + 0.679 + ], + "angle": 0, + "content": "balanced \\(\\rightarrow\\) text \\(\\rightarrow\\) no-code cooldown" + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.684, + 0.355, + 0.699 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.364, + 0.684, + 0.68, + 0.699 + ], + "angle": 0, + "content": "balanced \\(\\rightarrow\\) text \\(\\rightarrow\\) cooldown w/ code" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.72, + 0.828, + 0.79 + ], + "angle": 0, + "content": "Figure 9: Evaluation of 470M cooldown models on GSM8K Including code in any stage of the pre-training improves performance compared to the model where no code has been seen in any of the training stages: pre-training, continual pre-training and cooldown. The most performant model in this comparison has seen code in all stages including cooldown where it leads a significant improvement (from 2.9 to 4.12, +42% relative gain)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_origin.pdf b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8eaa2a5ecb1be562f50e469e584432196d1dab3f --- /dev/null +++ b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/e6b439cb-3b05-45ee-8c52-561b8f255560_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0779f8250e8940b5029009c6deeea602f1294e2940724568af370949db32d07e +size 1978501 diff --git a/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/full.md b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c44b5d854b1838500df471e660fc535ca9db1b76 --- /dev/null +++ b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/full.md @@ -0,0 +1,408 @@ +# TO CODE, OR NOT TO CODE? EXPLORING IMPACT OF CODE IN PRE-TRAINING + +Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, Sara Hooker {viraat, ahmetustun, sarahooker}@cohere.com + +# ABSTRACT + +Including code in the pre-training data mixture, even for models not specifically designed for code, has become a common practice in LLMs pre-training. While there has been anecdotal consensus among practitioners that code data plays a vital role in general LLMs' performance, there is only limited work analyzing the precise impact of code on non-code tasks. In this work, we systematically investigate the impact of code data on general performance. We ask "what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation". We conduct extensive ablations and evaluate across a broad range of natural language reasoning tasks, world knowledge tasks, code benchmarks, and LLM-as-a-judge win-rates for models with sizes ranging from 470M to 2.8B parameters. Across settings, we find a consistent results that code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact across all tasks. In particular, compared to text-only pre-training, the addition of code results in up to relative increase of $8.2\%$ in natural language (NL) reasoning, $4.2\%$ in world knowledge, $6.6\%$ improvement in generative win-rates, and a $12x$ boost in code performance respectively. Our work suggests investments in code quality and preserving code during pre-training have positive impacts. + +# 1 INTRODUCTION + +The role of data has taken on critical significance in recent breakthroughs. State-of-the-art models highlight the importance of the pre-training data mixture, diversity of data sources (Brown et al., 2020; Longpre et al., 2023; Singh et al., 2024) combined with compute availability as key drivers on performance (Dubey et al., 2024; Üstün et al., 2024; Team et al., 2023; Aryabumi et al., 2024). A critical question is what properties of data impart the best general performance? + +Perhaps surprisingly, code is often included in pre-training even if a model is not explicitly intended to generate high-quality code. Code datasets differ significantly in terms of structure and textural characteristics from high-quality web datasets (Wikimedia; Raffel et al., 2019). Despite this, several previous generations of LLMs like PaLM (Chowdhery et al., 2022), Gopher (Rae et al., 2022) and Bloom (Workshop et al., 2023) that were not explicitly intended to support code generation, included code data together with high-quality natural language data in their pre-training mixture. + +In current state-of-the-art models, it is an accepted norm to not only include code data but further increase the proportion – for instance, Llama 3 (Dubey et al., 2024) has four times more code data in proportion (17%), of its pre-training mixture than Llama 2 (4.5%) (Touvron et al., 2023). While there has been consensus anecdotally among practitioners that code data plays a vital role in LLMs' performance, there has been only limited work analyzing the precise impact of code on non-code tasks. Prior work shows particular side benefits of the inclusion of code data, such as impact on scaling in limited data regime (Muennighoff et al., 2023a), entity tracking capabilities (Kim et al., 2024), and mathematical reasoning (Razeghi et al.). However, there has been no exhaustive study to date that systematically investigates the impact of code data on general performance. In this work, we ask "what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation?" + +We embark on an exhaustive set of large-scale controlled pre-training experiments. This includes a consideration of where in the training process adding code is beneficial, code proportions, the role of scaling, and the quality and properties of code added. While a costly endeavor to perform these ablations in a rigorous way, we find consistent and valuable results that code provides critical improvements to non-code performance. In particular, compared to text-only pre-training, for our best variant, the addition of code results in relative increase of $8.2\%$ in natural language (NL) reasoning, $4.2\%$ in world knowledge, $6.6\%$ improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves NL reasoning by $3.7\%$ , World knowledge by $6.8\%$ , and code by $20\%$ , relative to cooldown without code and leads to a $4.1\%$ additional win-rate increase. + +Here, several factors matter including getting the proportion of code correct, improving the quality of code by including synthetic code and code adjacent data such as commits, and leveraging code across multiple stages of training including during cooldown. Our results suggest code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact on performance. We conduct an extensive evaluation on a broad range of benchmarks, which cover world knowledge tasks, natural language reasoning, and code generation, as well as LLM-as-a-judge win-rates. Across experiments on models ranging from 470 million to 2.8 billion parameter models, we find the following detailed results: + +1. Code provides critical improvements to non-code performance. Initialization with code pretrained models results in improved performance for natural language tasks. In particular, compared to text-only pre-training, for our best variant, the addition of code results in a relative increase of $8.2\%$ in NL reasoning, $4.2\%$ in world knowledge, $6.6\%$ improvement in generative win-rates, and a 12x boost in code performance respectively. +2. Code quality and properties matter. Using markup-style programming languages, code-adjacent datasets such as GitHub commits and synthetically generated code improves the performance in pre-training. In particular, training on a higher quality synthetically generated code dataset results in a $9\%$ and $44\%$ increase in natural language reasoning and code performance, respectively, compared to web-based code data in pre-training. Additionally, continual pre-training from a code model that includes synthetic data results in $1.9\%$ and $41\%$ relative increases in natural language reasoning and code performance respectively, compared to initialization from a code model that does not include code data. +3. Code in cooldown enables further improvement across all tasks. Including code data in pretraining cooldown, where high-quality datasets are up-weighted, leads to an increase of $3.6\%$ in NL reasoning, $10.1\%$ in world knowledge, and $20\%$ in code performance relative to no cooldown. More significantly, cooldown with code beats the baseline (no cooldown) by $52.3\%$ win-rates, where win-rates are $4.1\%$ higher compared to cooldown without code. + +# 2 METHODOLOGY + +We describe the details of our Pre-training Data (§ 2.1), Evaluation (§ 2.2), Training and Model details (§ 2.3). Figure 1 shows the high-level experimental framework. Precise details for each experiment and their results are presented in Section 3. + +# 2.1 PRE-TRAINING DATA + +In this section, we describe the details of our pre-training and cooldown datasets. We aim to evaluate the role of code in pre-training, following current state-of-art practices. Hence, we consider pretraining runs that consist of two phases: 1) continued pretraining and 2) cooldown. Continued pre-training refers to training a model that is initialized from a pre-trained model and trained for a fixed token budget. cooldown (Team et al., 2023; Parmar et al., 2024) involves up-weighting high-quality datasets and annealing the learning rate for a relatively small number of tokens during the final stages of training. This up-weighting of high-quality datasets for a smaller amount of steps at the end of training can significantly boost model quality. + +Text dataset. We use the SlimPajama pre-training corpus (Soboleva et al., 2023) as our source of natural language text data. SlimPajama is a de-duplicated, quality-filtered, multi-corpora, open-source dataset based on RedPajama-1.2T (Computer, 2023). SlimPajama consists of documents + +![](images/a4396e1682e56163e5de9733298ea2cb77f96523c6831e11de756216bfe5ae6c.jpg) +Figure 1: Overview of our experimental framework: We exhaustively evaluate the impact of code by varying: 1) the proportion of code in pre-training, 2) code quality and properties, 3) model initialization, 4) model scale, and 5) stage of training at which code is introduced. We evaluate the resulting model on a wide-ranging set of tasks, including natural language reasoning, world knowledge, code, and open-ended generations. + +from CommonCrawl, C4, GitHub, Books, ArXiv, Wikipedia, and StackExchange. We filter out all documents from GitHub and StackExchange to remove code and code-adjacent data sources and ensure this is a text-only source. SlimPajama has a total of 627B tokens. After removing all code sources, this results in our text pre-training corpus with a total of 503B tokens. + +Code datasets. To explore the impact of different properties of code data, we use multiple sources of code in our experiments: + +- WEB-BASED CODE DATA: For our main source of code data, we start with the Stack dataset (Kocetkov et al., 2022) that was used to train StarCoder (Li et al., 2023a). The Stack consists of permissively licensed code data scraped from GitHub. We apply quality filters1 and restrict to the top 25 programming languages based on document count2. After all filtering steps, the size of the code-only and markup subset is 139B tokens. +- MARKDOWN DATA We also separately process markup-style languages such as Markdown, CSS, and HTML. After all filtering steps, the size of this markup subset is 180B tokens. +- SYNTHETIC CODE DATA: To ablate the quality of the code dataset, we use a proprietary synthetically generated code dataset that consists of Python programming problems that have been formally verified. We treat this as a high-quality source of code data (See the details in § 3.4). The final synthetic dataset consists of 3.2B code tokens. +- CODE ADJACENT DATA: Finally, to explore different properties of code data, we include a version of the code data which includes auxiliary data such as GitHub commits, jupyter notebooks, StackExchange threads. For GitHub commits, and jupyter notebooks we use the datasets provided as part of the Stack (Kocetkov et al., 2022). We use the version of StackExchange that is part of SlimPajama (Soboleva et al., 2023). In total we have 21.4B tokens of code-adjacent data. + +Pre-training cooldown datasets. Cooldown involves up-weighting higher quality datasets for the final steps of pre-training and has been found to improve performance on downstream tasks (Parmar et al., 2024; Team et al., 2023), in particular to impart instruction-following capabilities. We choose a cooldown mixture comprising high-quality text, math, code, and instruct-style text datasets. + +# 2.2 EVALUATION + +Our goal is to systematically understand the impact of code on general performance, which requires a broad evaluation suite that extends to a large variety of downstream tasks beyond code generation. To achieve this, we evaluate models on benchmarks that are reasonable proxies for model ability on 1) world knowledge, 2) natural language reasoning, and 3) code performance. In addition, we report win-rates as evaluated by an LLM-as-a-judge. Table 1 (Appendix A) shows the full evaluation suite and their respective grouping, along with the metric used. + +For World knowledge, we use benchmarks to measure knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. Natural language reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering, natural language inference (NLI), sentence completion, co-reference resolution, and general intelligence. We include the full list of the constituent benchmarks with references in Table 1. Finally, while our main focus is general performance, we also want to measure any changes to code generation performance. For Code benchmarks, we focus on the function completion task where we use HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021). + +We evaluate performance at two scales: 470M to 2.8B parameter models. At 470M scale, model capabilities are limited, thus to ensure fair comparisons, we only compare benchmarks for which all models achieve scores above random similar to Muennighoff et al. (2023a); Lozhkov et al. (2024). + +LLM-as-a-judge win-rates. In addition to task-specific discriminative performance, to allow for a more holistic view across all performance measures, we also evaluate generative performance using LLM-as-a-judge win-rates. This is particularly valuable given recent work that has shown that as performance on open-ended generations improves, there is deterioration in traditional academic tasks (Ustun et al., 2024; Ouyang et al., 2022; Iyer et al., 2022; Muennighoff et al., 2023c). The use of LLMs-as-a-Judge benchmarks (Fu et al., 2023; Liu et al., 2023; Chiang & yi Lee, 2023; Shimabucoro et al., 2024) has gained traction as an alternative to performing human evaluation, which tends to be laborious and expensive (Wang et al., 2023; Boubdir et al., 2023). LLMs as evaluators compare two completions based upon detailed prompts and are reasonable proxies aligned with human preference (Ustun et al., 2024; Dubois et al., 2024). + +We use the Dolly-200 English dataset (Ustun et al., 2024; Singh et al., 2024), which consists of 200 hand-picked examples from the Dolly-15K dataset (Conover et al., 2023). These prompts are open-ended and capture general-purpose non-code use cases making them a valuable proxy for how code impacts more fluid and often open-ended tasks. For our win-rate evaluations, we use Command- $R^{+3}$ as the LLM judge. Details about the prompt are provided in Appendix D. + +# 2.3 TRAINING AND MODEL DETAILS + +We use 470M and 2.8B parameters decoder-only auto-regressive Transformer models (Radford et al., 2019) that are trained with a standard language modeling objective. We use parallel attention layers, (Chowdhery et al., 2022; Wang & Komatsuzaki, 2021), SwiGLU activation (Shazeer, 2020), no biases in dense layers, and a byte-pair-encoding tokenizer with a vocabulary size of 256K. All models are pre-trained using AdamW (Loshchilov & Hutter, 2019) with a max sequence length of 8192, batch size of 512 and a cosine LR schedule with a warmup of 1325 steps. + +Infrastructure. We use TPU v5e chips (Jouppi et al., 2017) for training and evaluation. All models are trained using Jax (Bradbury et al., 2018) framework. We pre-train 64 models in total. This is an enormous endeavour given the scale and computational resources required. Each pre-training run for 200B tokens takes 4736 TPU-chip hours for 470M and 13824 TPU-chip-hours for 2.8B parameter models. Each cooldown run for 40B tokens takes 1024 TPU-chip hours for 470M models. + +# 3 RESULTS AND DISCUSSION + +In this section, we will report descriptions and results for each experimental variants. We systematically investigate, (1) initializing an LLM with code pre-trained models (§ 3.1), and (2) the impact of + +![](images/2c8f69e3bc02422673769f7929c8ccbc81d18d4666c5b42ed23fd45dafe7fa69.jpg) +Figure 2: Impact of initialization using code pre-trained models: Initializing model training with code pre-trained models improves reasoning and code generation compared to text-only models, where the improvement is the most when continued pre-training with high percentage text (Balanced $\rightarrow$ Text, Code $\rightarrow$ Text). Note that these variants are designed to isolate the role of initialization, so do not include cooldown. + +model scale (§ 3.2), (3) varying proportion of code in pre-training data (§ 3.3), (4) quality and properties of the code data (§ 3.4), (5) code data in pre-training cooldown (§ 3.5). Finally, we compare the resulting pre-training recipes (§ 3.6). Figure 1 shows the key levers of our experimental design. + +# 3.1 INITIALIZING AN LLM WITH CODE PRE-TRAINED MODELS + +We explore different initializations of pre-trained models to understand if using an LM with a large portion of code data as initialization improves the performance. These key ablations, along with their token counts, are summarized in Table 2. We briefly describe below: + +- Text LM (TEXT-ONLY BASELINE): Pre-trained model from scratch using glorot-normal initialization (Glorot et al., 2011) on the text-only data for 400B tokens. +- Balanced LM (BALANCED-ONLY): A model is trained with an equal ratio of code and text data (50% text and 50% code) in pre-training for 400B tokens. +- Balance-initialized Text LM (BALANCED $\rightarrow$ TEXT): This model is initialized with a balanced LM (50% text and 50% code) and further pre-trained using text data for 200B tokens. +- Code-initialized Text LM (CODE $\rightarrow$ TEXT): Different from other variants, this model is initialized with a code-LM which is pre-trained on a code dataset for 200B tokens. The code dataset contains a mixture of $80\%$ code data and $20\%$ markup-style code data. We then continually pre-train this model on text for another 200B tokens.4 + +Natural Language Reasoning As seen in Figure 2, initializing with $100\%$ code pre-trained model (code $\rightarrow$ text) has the best performance for NL Reasoning benchmarks, and is closely followed by the balanced $\rightarrow$ text model. The code $\rightarrow$ text model and balanced $\rightarrow$ text model beat the text-only baseline on NL reasoning tasks by $8.8\%$ and $8.2\%$ relative improvement respectively. The balanced-only model improves upon the baseline by $3.2\%$ . This shows that initialization from a pre-trained model with a mix of code has a strong positive effect on NL reasoning tasks. Further using a text mix with a small percentage of code for continual pre-training results in the best performance as evidenced by both the code $\rightarrow$ text and balanced $\rightarrow$ text models. + +World Knowledge For World Knowledge tasks, we see that the balanced $\rightarrow$ text model has the best performance over all other variants, beating the code $\rightarrow$ text by $21\%$ and text-only by $4.1\%$ relative improvement. This suggests that performance on world knowledge tasks depends on a more balanced data mixture for initialization and a larger proportion of text in the continual pretraining stage. Overall, code data is still beneficial compared to text-only pre-training for world knowledge tasks. + +![](images/98ab9c458f60bc63afdf09a9f3a7c56fe711892c17265bf1505e931c5ccaaca6.jpg) +Figure 3: Impact of model scale on different tasks. We observe that scale provides pronounced gains across tasks of up-to $2.7\mathrm{x}$ increase, however the overall trend remains the same across scales showing consistency of findings across model sizes. + +Trade-offs between NL tasks and code generation For code generation, balanced-only achieves the best performance, where we see a $46.7\%$ and $54.5\%$ relative improvement over balanced $\rightarrow$ text and code $\rightarrow$ text. This is expected as balanced-only includes $50\%$ code throughout pre-training. However, this model trades off better code generation with lower performance in NL tasks. code $\rightarrow$ text and balanced $\rightarrow$ text achieves $2.9\%$ and $2.3\%$ relative increase in NL reasoning, and $17.3\%$ and $22.2\%$ relative increase in world knowledge respectively compared to balanced-only. + +Generative quality win-rates comparison Additionally, we compare the generative performance of each code variant (code $\rightarrow$ text and balanced-only) against the text-only model. We report win-rates and observe that the presence of code has a strong positive impact on generation quality. Both code $\rightarrow$ text and balanced-only) models beat the text-only variant by a $6.6\%$ difference in win-loss rates. We again note that Dolly-200-English evaluation set we use for win-rate calculation is curated to reflect open ended questions and is a non-code evaluation. This confirms that code data in the pre-training mix does not only improves reasoning but also helps the model produce better quality generations. + +# 3.2 IMPACT OF SCALE + +To understand if the findings of Section 3.1 transfer to larger models, we train 2.8B parameters models with the same token budget following the same model variants at 470M scale. Figure 3 shows the results of 2.8B models in comparison with 470M results. + +Comparison between 2.8B and 470M models Scaling model size to 2.8B enables higher performance for all model variants in all task categories, compared to 470M results. In terms of average performance across NL reasoning and world knowledge, balanced $\rightarrow$ text model benefits from scaling-up by a $33.1\%$ increase relative to the same model with 470M size. The improvement for code $\rightarrow$ text and balanced-only are $31.7\%$ and $30\%$ relative increase. + +We find that the improvements in NL reasoning are relatively modest with $5.3\%$ , $9.2\%$ , and $5.2\%$ relative gains for balanced $\rightarrow$ text, code $\rightarrow$ text, and balanced-only respectively. However, world knowledge and code performance nearly triples for all the model variants. In particular, 2.8B balanced $\rightarrow$ text results increase by $2.7\mathrm{x}$ in world knowledge and $2.5\mathrm{x}$ in code evaluation compared to 470M. + +Trends between model variants in 2.8B Notably, in terms of initialization with code pre-trained models, the same trends seen in 470M parameter scale hold at 2.8B models. code $\rightarrow$ text and balanced $\rightarrow$ text models improve over balanced models by $6.9\%$ and $6.1\%$ relative gain, however, fall significantly behind in code generation performance with $43.1\%$ and $46.3\%$ relative drop. These results show that the trade-off between NL tasks and code generation increases with the model size. Overall our experiments scaling to a larger size shows that our results hold and are consistent with the trends we observe at 470M parameter ablations. + +![](images/7e24086b373a64314a1087e59d1623c6a838d3d57c9e2acc40df3cf0273d96bd.jpg) + +![](images/45f86072ce3adbc7f66d6a67edf2070b009c704bf42dfa4648e58333587db907.jpg) +Increasing Proportion of Code $\rightarrow$ +Figure 4: Impact of the proportion of code in pre-training for different tasks: We observe that as the code proportion of pre-training increases, the performance on code tasks increases linearly. In contrast, there is more sensitivity for NL reasoning and World knowledge tasks and an optimal range of code proportions where benefits are most tangible. Model size is 470M parameters and trained for 200B tokens. + +![](images/0391f65bb231307d946996aa142617c4ed192f040db1142b8318406c3172f3b3.jpg) + +# 3.3 CODE DATA PROPORTION IN PRE-TRAINING + +In these experiments, we ablate the proportions of code data in the pre-training mixture to understand what is the optimal amount of code to maximize performance on non-code tasks. Here, we focus on the first phase of pre-training with random initialization. We train six models for 200B tokens with increasing code proportions: $0\%$ , $25\%$ , $50\%$ , $75\%$ , $90\%$ , and $100\%$ . The remaining proportion is filled with text data. For each variant, we train a new model independently in order to carefully ablate the impact of varying code proportions. + +Natural Language Reasoning and World Knowledge For NL Reasoning, as the amount of code increases, in Figure 4 we see an increase in performance compared to a text-only (0% code) model. The best performance is from a model with 25% code and 75% text, with a 3.4% relative improvement over a model with 0% code. While performance is maintained up to 75% code, it starts to rapidly erode at higher proportions with a sharp relative drop of 18.3% when the model is trained on 100% code compared to a model with no code. + +For World Knowledge tasks, we see an inverse relationship with increasing the amount of code. As seen in Figure 4 middle inset, there is a slight relative drop of $3.4\%$ at $25\%$ code and this relative drop worsens to $31\%$ at $75\%$ code compared to the no-code model. The fully code model (100% code) is unable to perform in world knowledge task (86% drop relative to text-only) as there are no data sources to acquire the required knowledge in the pre-training mix. + +Performance on Code For code evaluation, there is a linear increase in performance as the amount of code increases, with the best model being a code-only model. As observable in Figure 4 right inset, the $100\%$ code leads to a 2.6x increase in the code benchmarks compared to the $25\%$ code model. As expected, for the model with $0\%$ code, the average pass@1 score drops to 0. + +# 3.4 INFLUENCE OF CODE QUALITY AND PROPERTIES ON GENERAL PERFORMANCE + +In this section, we investigate the properties of code data by varying its quality and composition. We study this firstly (a) from the perspective of training from scratch, as we want to isolate the exact effects of different properties of code data. Secondly (b), we incorporate the best variant of the code data (high-quality synthetic code), in our continual pre-training experiments to see if the impact of the code quality transfer. We report performance on NL reasoning and Code tasks. + +We study the effect of the following properties: (1) MARKUP-STYLE DATA: we separate markup-style programming languages ( $\S$ 2.1) from the rest of web-based code (Appendix C.3). We replace $20\%$ of code-only tokens with markup-style tokens. (2) CODE ADJACENT DATA: Instead of using purely web-based code data, we replaced $15\%$ percentage of code tokens with code-adjacent datasets - GitHub issues ( $5\%$ ), StackExchange ( $5\%$ ) and Jupyter Notebooks ( $5\%$ ), resulting in a code-adjacent model. (3) CODE QUALITY: To control the quality of the code, we replaced $10\%$ of existing code tokens with a synthetically generated high-quality code dataset. The remaining proportions of web-based code data are kept the same, resulting in a code-synth model. + +![](images/bfe1105936533411e23b23642a672fa16cffe114e17aca190b7e14798646b3d1.jpg) +(a) Code-only Pre-training + +![](images/697f623b23d26d3d291cb46d4ef3c5bdb5b3f1be7f8936b1aeba94d76a428934.jpg) +(b)Continual Pre-training +Figure 5: Impact of using different properties of code data: (a) As the most impactful code data source, synthetically generated high-quality code improves NL reasoning and code performance for code pre-training. (b) These improvements with synthetically generated high-quality code data also transfer the continual pre-training setting. All models are of size 470M parameters. + +Code-only pre-training We compare the above variants to a model that is trained only on web-based code data (code) from the stack dataset (Kocetkov et al., 2022), which forms our baseline model. All the variants are pre-trained using the same amount of tokens (200B) for fair comparison. + +In Figure 5a, we evaluate the impact of code quality and code composition. We observe that across all variants, including diverse code sources and also synthetic code leads to gains in natural language performance relative to code, however, only synthetically generated code improves the code benchmarks. We relate this to our code evaluation where we measure performance in python, thus different programming languages or code-adjacent data slightly decrease the results. Here, code+markup and code+adjacent leads to $2.8\%$ and $6.3\%$ relative improvement in NL reasoning compared to code (web-code-only), but cause $15.7\%$ and $9.4\%$ drop in code evaluation. + +Our synthetic code data (code+synth) is the most impactful ablation. It is particularly impressive given its relatively small share of the overall dataset. Despite a small weighting of $10\%$ , the inclusion of synthetic data leads to relative improvements of $9\%$ on NL reasoning, and $44.9\%$ on code benchmarks compared to the baseline of web-code-only. We note that the lifts observed for synthetic data are even more impressive given the limited amount of synthetic data available compared to code-adjacent data (3.2B tokens vs 21.4B tokens) or code+markup data (3.2B tokens vs 40B tokens), and the weighting during pre-training allocation ( $10\%$ vs $15\%$ vs $20\%$ for synthetic data, code-adjacent, code-markup respectively). This suggests a key future lever of improvement in increasing the proportion of such high-quality code data sources. + +Continual pre-training Here, based on the findings from code-only pre-training, we incorporated code + synth into our best continual pre-training variant (balanced + synth $\rightarrow$ text). We compare this against the same variant without synthetic code data (balanced $\rightarrow$ text) to evaluate the benefits of synthetic data. We use the same amount of code and text tokens in these experiments. + +As shown in Figure 5b, balanced+synth→text achieves $2\%$ and $35\%$ relative improvement over balanced→text in NL reasoning and code, respectively. This further confirms that even a small percentage of a high-quality code data, not only improves performance in code pre-training but also increases code and non-code performance after continual pre-training with text data. + +# 3.5 CODE IN PRE-TRAINING COOLDOWN + +In this section, we evaluate the impact of code at the final stage of pre-training. Here, we consider cooldown, where we up-weight high-quality text, math, code, and instruct-style datasets. We change the learning rate schedule from cosine-based to linear annealing with a final learning rate of $1e - 6$ . We evaluate the impact of code in cooldown by comparing 3 models: a pre-trained model before cooldown, cooldown without code data, and cooldown with $20\%$ code data. For our pre-trained model, we use balanced-text as it is our best pre-trained variant. We preserve the same token budget across variants - 40B tokens which is $10\%$ of the token budget of the pre-trained model. + +Impact of code used during cooldown in different tasks In Figure 6a, we evaluate the impact of code in cooldown on model performance in NL Reasoning, world knowledge, and code benchmarks. + +![](images/da623e2c7e25504e3b4dff71e5b5195640f97b335a0d0bcf47f61048a5f7f540.jpg) +(a) Downstream tasks + +![](images/bea05cec28c33b6ca910fd2883b14f385de8c7ef47fcf40a189caea3262e1abb.jpg) +(b) Generative win-rates +Figure 6: Impact of code data in pre-training cooldown: Including code data in the cooldown phase improves downstream relative to cooldown with no code. All cooldown variants benefit for downstream tasks and especially generative quality. We find the largest gains from cooldown with code, with the highest win-rates of $52.3\%$ over a model with no cooldown. + +Across tasks, we find that a cooldown with code data is most beneficial with $3.6\%$ , $10.1\%$ , and $20\%$ in NL reasoning, world knowledge, and code relative to the model without cooldown. + +In contrast, we find that cooldown without code does not provide any increases for both NL reasoning and Code, while providing a relative improvement of $3.1\%$ in World Knowledge tasks compared to no cooldown, showing the critical role of code data in also cooldown phase of pre-training. + +Generative win-rates after cooldown As expected, cooldown has a significant impact on generative performance as measured by win-rates (seen in Figure 6b). This is because we up-weight high-quality data sources in pre-training mix including instruction-style datasets such as Dolly v2 (Conover et al., 2023). Both cooldown variants (cooldown w/o code, cooldown w/ code) beat no-cooldown model by large win-rates (48.2% and 52.3%) as seen in Figure 6b. Comparing the cooldown variants, including code leads an additional 4.1% generative win-rates against no-cooldown compared to cooldown without code. + +# 3.6 COMPARING PRE-TRAINING RECIPES + +Considering all our experiments, we summarize our findings and recommend recipes for pre-training with code data. Table 2 (Appendix B) shows the different variants along with pre-training phases. + +Best recipe for natural language tasks As seen in Sections 3.1, 3.3, and 3.5, including code in all phases of pre-training provides improvements across all task categories. When looking at the final recipes, we find that balanced $\rightarrow$ text model followed by cooldown that includes code data corresponds to the best overall performance in natural language tasks considering NL reasoning, world knowledge, and generative performance. Notably this model achieves the highest generative win-rates with $37.7\%$ vs 33.7 against text-only as shown in Figure 7. + +Best recipe for code performance Among complete recipes shown in Table 2, balanced-only provides the best performance in code benchmarks. This model achieves $20\%$ relative gain compared to second best code $\rightarrow$ text and $55\%$ relative gain compared to balanced $\rightarrow$ text. However, balanced-only falls behind in natural language performance by $2.5\%$ relative difference and $5.0\%$ win-rate difference (vs text-only), both compared to balanced $\rightarrow$ text. + +Including code in all phases of pre-training is beneficial across our three task categories and generative performance. Our recommendation for the best overall performance is to include a balanced mixture of code and text data during pre-training from scratch (§ 3.3), use a relatively lower code percentage during continual pre-training (§ 3.1), and include code data into cooldown mixture. Further, we recommend including high-quality code data during all phases of pre-training (§ 3.4). + +# 4 RELATED WORK + +Understanding the impact of pre-training mixes Several works have studied the effects of data age, quality, toxicity and domain of pre-training data (Longpre et al., 2023; Üstün et al., 2024). Several works have looked at the impact of filtering (Raffel et al., 2020; Rae et al., 2021; Penedo et al., 2023), de-duping (Zhang et al., 2022) and data pruning (Lozhkov et al., 2024; Marion et al., 2023; + +![](images/0acead19bcc93dc191f7b4b05f59a204988bbe81cefebf3380086b79102e5615.jpg) +Figure 7: Generative performance as measured by win-rates for variants with full-coutdown. + +![](images/717a60ebef812534c77f398e4b5a40fba0b77fa12b96fb613eb94aff0608eedb.jpg) + +![](images/57d292212ebabb7a14ce6e8158b991527af35051c5e192939ade8e652c7d273e.jpg) + +Chimoto et al., 2024; Boubdir et al., 2023). Furthermore, several works have considered the role of synthetic data at improving performance (Shimabucoro et al., 2024; Dang et al., 2024; Aakanksha et al., 2024) and helping bridge the gap in performance between open weights and proprietary models (Gunasekar et al., 2023; Li et al., 2023b). In contrast to our work which focuses explicitly on understanding the role of code, these studies focus on characteristics of training data as a whole. + +Understanding the role of code Including code in the pre-training data mixture, even for models not specifically designed for code, has been a common practice in LLMs pre-training (Dubey et al., 2024; Gemini-Team et al., 2024; Groeneveld et al., 2024). In addition to serving the popular use case in code completion and generation (Chen et al., 2021), previous studies suggest that the addition of code improves the performance of LLMs on various NLP tasks, such as entity linking (Kim et al., 2024) and commonsense reasoning (Madaan et al., 2022b)), mathematical reasoning tasks (Liang et al., 2022; Madaan et al., 2022a; Gao et al., 2023; Shao et al., 2024), and general reasoning capabilities (Muennighoff et al., 2023a; Fu & Khot, 2022; Ma et al., 2023). Muennighoff et al. (2023b) demonstrated Python code data can be used to improve pretraining performance. They focused on a low-resource pre-training regime with limited data and an evaluation set-up limited to perplexity evaluations. Zhang et al. (2024) investigated the impact of code on LLMs' internal reasoning capability across various tasks and model families. They only focus on the effect of code in the supervised fine-tuning stage (SFT) primarily measuring the impact on reasoning. Zhu et al. (2024) report the performance of their DeepSeek-Coder-V2 models on General Natural Language benchmarks. They compare chat and instruct models, and do not investigate different phases of pre-training and properties of code. + +To the best of our knowledge, this work is the first study that presents a thorough investigation of the impact of code in pre-training on non-code tasks. Our experiment spans several axes and a exhaustive evaluation suite, with costly ablations at scale including model initialization strategies, different proportions and properties of code data, and model scales. + +# 5 CONCLUSION + +We perform a first-of-its-kind systematic study to answer "what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation". We focus, not just on code performance but on downstream natural language performance, as well as generative quality using LLM-as-a-judge win-rates. We conduct ablations that look at initialization, proportions of code, quality and properties of code, and role of code in pre-training cooldown. We find across all scales of experiments that code provides critical improvements to performance on non-code tasks. Compared to text-only pre-training, for our best variant, the addition of code results in relative increase of $8.2\%$ in natural language (NL) reasoning, $4.2\%$ in world knowledge, $6.6\%$ improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves $3.6\%$ , $10.1\%$ , and $20\%$ in NL reasoning, world knowledge, and code relative to the model before cooldown and leads $52.3\%$ generative win-rates. Finally, we find that adding a small amount of high-quality synthetic data can have an outsized impact on both NL reasoning ( $9\%$ relative increase) and code performance ( $44.9\%$ relative increase). + +# REFERENCES + +Aakanksha, Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. The multilingual alignment prism: Aligning global and local preferences to reduce harm, 2024. URL https://arxiv.org/abs/2406.18682. +Viraat Aryabumi, John Dang, Dwarak Talupuru, Saurabh Dash, David Cairuz, Hangyu Lin, Bharat Venkitesh, Madeline Smith, Jon Ander Campos, Yi Chern Tan, Kelly Marchisio, Max Bartolo, Sebastian Ruder, Acyr Locatelli, Julia Kreutzer, Nick Frosst, Aidan Gomez, Phil Blunsom, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. Aya 23: Open weight releases to further multilingual progress, 2024. URL https://arxiv.org/abs/2405.15032. +Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. Program synthesis with large language models, 2021. URL https://arxiv.org/abs/2108.07732. +Meriem Boubdir, Edward Kim, Beyza Ermis, Marzieh Fadaee, and Sara Hooker. Which prompts make the difference? data prioritization for efficient human llm evaluation, 2023. URL https://arxiv.org/abs/2310.14424. +James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, George Necula, Adam Paszke, Jake VanderPlas, Skye Wanderman-Milne, and Qiao Zhang. JAX: composable transformations of Python+NumPy programs, 2018. URL http://github.com/google/jax. +Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. arXiv, abs/2005.14165, 2020. +Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fiotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374. +Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, and Yuanzhi Li. Towards understanding the mixture-of-experts layer in deep learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=MaYzugDmQV. +Cheng-Han Chiang and Hung yi Lee. Can large language models be an alternative to human evaluations?, 2023. +Everlyn Asiko Chimoto, Jay Gala, Orevaoghene Ahia, Julia Kreutzer, Bruce A. Bassett, and Sara Hooker. Critical learning periods: Leveraging early training dynamics for efficient data pruning, 2024. URL https://arxiv.org/abs/2405.19462. +Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen-tau Yih, Yejin Choi, Percy Liang, and Luke Zettlemoyer. QuAC: Question answering in context. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 2174-2184, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1241. URL https://aclanthology.org/D18-1241. + +Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan Firat, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. Palm: Scaling language modeling with pathways, 2022. URL https://arxiv.org/abs/2204.02311. +Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. BoolQ: Exploring the surprising difficulty of natural yes/no questions. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 2924–2936, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1300. URL https://aclanthology.org/N19-1300. +Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018. +Together Computer. Redpajama: an open dataset for training large language models, 2023. URL https://github.com/togethercomputer/RedPajama-Data. +Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. URL https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm. +John Dang, Arash Ahmadian, Kelly Marchisio, Julia Kreutzer, Ahmet Üstün, and Sara Hooker. Rlhf can speak many languages: Unlocking multilingual preference optimization for llms, 2024. URL https://arxiv.org/abs/2407.02552. +Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The commitmentbank: Investigating projection in naturally occurring discourse, 2019. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurelien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Roziere, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Gregoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, Khalid El-Arini, Krithika Iyer, Kshitiz Malik, Kuenley Chiu, Kunal Bhalla, Lauren Rantala-Yeary, Laurens van der Maaten, Lawrence Chen, Liang Tan, Liz Jenkins, Louis Martin Lovish Madaan Lubo Malo Lukas Blecher Lukas Landzaat Luke de Oliveira Madeline Muzzi Mahesh Pasupuleti Mannat Singh Manohar Paluri Marcin Kardas Mathew Oldham Mathieu Rita Maya Pavlova + +Melanie Kambadur, Mike Lewis, Min Si, Mitesh Kumar Singh, Mona Hassan, Naman Goyal, Narjes Torabi, Nikolay Bashlykov, Nikolay Bogoychev, Niladri Chatterji, Olivier Duchenne, Onur Celebi, Patrick Alrassy, Pengchuan Zhang, Pengwei Li, Petar Vasic, Peter Weng, Prajjwal Bhargava, Pratik Dubal, Praveen Krishnan, Punit Singh Koura, Puxin Xu, Qing He, Qingxiao Dong, Ragavan Srinivasan, Raj Ganapathy, Ramon Calderer, Ricardo Silveira Cabral, Robert Stojnic, Roberta Raileanu, Rohit Girdhar, Rohit Patel, Romain Sauvestre, Ronnie Polidoro, Roshan Sumbaly, Ross Taylor, Ruan Silva, Rui Hou, Rui Wang, Saghar Hosseini, Sahana Chennabasappa, Sanjay Singh, Sean Bell, Seohyun Sonia Kim, Sergey Edunov, Shaoliang Nie, Sharan Narang, Sharath Raporthy, Sheng Shen, Shengye Wan, Shruti Bhosale, Shun Zhang, Simon Vandenhende, Soumya Batra, Spencer Whitman, Sten Sootla, Stephane Collot, Suchin Gururangan, Sydney Borodinsky, Tamar Herman, Tara Fowler, Tarek Sheasha, Thomas Georgiou, Thomas Scialom, Tobias Speckbacher, Todor Mihaylov, Tong Xiao, Ujjwal Karn, Vedanuj Goswami, Vibhor Gupta, Vignesh Ramanathan, Viktor Kerkez, Vincent Gonguet, Virginie Do, Vish Vogeti, Vladan Petrovic, Weiwei Chu, Wenhan Xiong, Wenyin Fu, Whitney Meers, Xavier Martinet, Xiaodong Wang, Xiaqing Ellen Tan, Xinfeng Xie, Xuchao Jia, Xuewei Wang, Yaelle Goldschlag, Yashesh Gaur, Yasmine Babaei, Yi Wen, Yuwen Song, Yuchen Zhang, Yue Li, Yuning Mao, Zacharie Delpierre Coudert, Zheng Yan Zhengxing Chen Zoe Papakipos Aaditya Singh Aaron Grattafori Abha Jain Adam Kelsey Adam Shajnfeld Adithya Gangidi Adolfo Victoria,Ahuva Goldstand Ajay Menon Ajay Sharma Alex Boesenberg Alex Vaughan Alexei Baevski Allie Feinstein Amanda Kallet Amit Sangani Anam Yunus Andrei Lupu Andres Alvarado Andrew Caples Andrew GuAndrew Ho Andrew Poulton Andrew Ryan Ankit Ramchandani Annie Franco Aparajita Saraf Arkabandhu Chowdhury Ashley Gabriel Ashwin Bharambe Assaf Eisenman Azadeh Yazdan Beau James Ben Maurer Benjamin Leonhardi Bernie Huang Beth Loyd Beto De Paola Bhargavi ParanjapeBing LiuBo WuBoyu Ni Braden Hancock Bram Wasti Brandon Spence Brani Stojkovic Brian Gamido Britt Montalvo Carl Parker Carly Burton Catalina Mejia Changhan Wang Changkyu Kim Chao Zhou Chester Hu Ching-Hsiang Chu Chris Cai Chris Tindal Christoph Feichtenhofer Damon Civin Dana Beaty Daniel Kreymer Daniel Li Danny Wyatt David Atkins David Xu Davide Testuggine Delia David Devi Parikh Diana Liskovich Didem Foss Dingkang Wang Duc Le,Dustin Holland Edward Dowling Eissa Jamil Elaine Montgomery Eleonora Presani Emily Hahn Emily Wood Erik Brinkman Esteban Arcaute Evan Dunbar Evan Smothers Fei Sun Felix Kreuk Feng Tian First Ozgenel Francesco Caggioni Francisco Guzmán Frank Kanayet Frank Seide Gabriela Medina FlorezGabriella Schwarz Gada Badeer Georgia Swee Gil Halpern Govind Thattai Grant Herman Grigory Sizov Guangyi Zhang Guna Lakshminarayanan Hamid Shojanazeri Han Zou Hannah Wang Hanwen Zha Haroun Habeeb Harrison Rudolph Helen Suk Henry Aspegren Hunter Goldman Igor Molybog Igor Tufanov Irina-Elena Veliche Itai Gat Jake Weissman James Geboski James Kohli Japhet Asher Jean-Baptiste Gaya Jeff MarcusJeff Tang Jennifer Chan Jenny Zhen Jeremy Reizenstein Jeremy Teboul Jessica Zhong Jian Jin Jingyi Yang Joe Cummings Jon Carvill Jon Shepard Jonathan McPhie Jonathan Torres Josh Ginsburg Junjie Wang Kai Wu Kam Hou U Karan Saxena Karthik Prasad Kartikay Khandelwal Katayoun Zand Kathy Matosich Kaushik Veeraraghavan Kelly Michelena Keqian Li Kun Huang Kunal ChawlaKushal Lakhotia Kyle Huang Lailin Chen Lakshya Garg Lavender A Leandro Silva Lee Bell Lei Zhang Liangpeng Guo Licheng Yu Liron Moshkovich Luca Wehrstedt Madian Khabsa Manav Avalani Manish Bhatt Maria Tsimpoukelli Martynas Mankus Matan Hasson Matthew Lennie Matthias Reso Maxim Groshev Maxim Naumov Maya Lathi Meghan Keneally Michael L. Seltzer Michal Valko Michelle Restrepo Mihir Patel Mik Vyatskov Mikayel Samvelyan Mike Clark Mike Macey Mike Wang Miquel Jubert Hermoso Mo Metanat Mohammad Rastegari Munish Bansal Nandhini Santhanam Natascha Parks Natasha White Navyata Bawa Nayan Singhal Nick Egebo Nicolas Usunier Nikolay Pavlovich Laptev Ning Dong Ning Zhang Norman Cheng Oleg Chernoguz Olivia Hart Omkar Salpekar Ozlem Kalinli Parkin Kent Parth Parekh Paul Saab Pavan Balaji Pedro Rittner Philip Bontrager Pierre Roux Piotr Dollar Polina Zvyagina Prashant Ratanchandani British Yuvraj Qian Liang Rachad Alao Rachel Rodriguez Rafy Aub Ragtoham Murthy Raghu Nayani Rahul Mitra Raymond Li Rebekkah Hogan Robin Battey Rocky Wang Rohan Maheswari Russ Howes Rudy Tinott Sai Jayesh Bondu Samyak Datta Sara Chugh Sara Hunt Sargun Dhillon Sasha Sidorov Satadru Pan Saurabh Verma Seiji Yamamoto Sharadh Ramaswamy Shaun Lindsay Shaun Lindsay Sheng Feng Shenghao Lin Shengxin Cindy Zha Shiva Shankar Shuqiang Zhang Shuqiang Zhang Sinong WangSneha Agarwal Soji Sajuyigbe Soumith Chintala Stephanie Max Stephen Chen Steve Kehoe Steve Satterfield Sudarshan Govindaprasad Sumit Gupta Sungmin Cho + +Sunny Virk, Suraj Subramanian, Sy Choudhury, Sydney Goldman, Tal Remez, Tamar Glaser, Tamara Best, Thilo Kohler, Thomas Robinson, Tianhe Li, Tianjun Zhang, Tim Matthews, Timothy Chou, Tzook Shaked, Varun Vontimitta, Victoria Ajayi, Victoria Montanez, Vijai Mohan, Vinay Satish Kumar, Vishal Mangla, Vlad Ionescu, Vlad Poenaru, Vlad Tiberiu Mihailescu, Vladimir Ivanov, Wei Li, Wenchen Wang, Wenwen Jiang, Wes Bouaziz, Will Constable, Xiaocheng Tang, Xiaofang Wang, Xiaojian Wu, Xiaolan Wang, Xide Xia, Xilun Wu, Xinbo Gao, Yanjun Chen, Ye Hu, Ye Jia, Ye Qi, Yenda Li, Yilin Zhang, Ying Zhang, Yossi Adi, Youngjin Nam, Yu, Wang, Yuchen Hao, Yundi Qian, Yuzi He, Zach Rait, Zachary DeVito, Zef Rosnbrick, Zhaoduo Wen, Zhenyu Yang, and Zhiwei Zhao. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783. +Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Alpacafarm: A simulation framework for methods that learn from human feedback, 2024. URL https://arxiv.org/abs/2305.14387. +Hao Fu, Yao; Peng and Tushar Khot. How does gpt obtain its ability? tracing emergent abilities of language models to their sources. Yao Fu's Notion, Dec 2022. URL https://yaofu.notion.site/b9a57ac0acf74f30a1ab9e3e36fa1dc1?pvs=25. +Jinlan Fu, See-Kiong Ng, Zhengbao Jiang, and Pengfei Liu. Gptscore: Evaluate as you desire, 2023. +Yifu Gao, Yongquan He, Zhigang Kan, Yi Han, Linbo Qiao, and Dongsheng Li. Learning joint structural and temporal contextualized knowledge embeddings for temporal knowledge graph completion. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 417-430, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023-findings-acl.28. +Gemini-Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M. Dai, Anja Hauth, Katie Millican, David Silver, Melvin Johnson, Ioannis Antonoglou, Julian Schrittwieser, Amelia Glaese, Jilin Chen, Emily Pitler, Timothy Lillicrap, Angeliki Lazaridou, Orhan First, James Molloy, Michael Isard, Paul R. Barham, Tom Hennigan, Benjamin Lee, Fabio Viola, Malcolm Reynolds, Yuanzhong Xu, Ryan Doherty, Eli Collins, Clemens Meyer, Eliza Rutherford, Erica Moreira, Kareem Ayoub, Megha Goel, Jack Krawczyk, Cosmo Du, Ed Chi, Heng-Tze Cheng, Eric Ni, Purvi Shah, Patrick Kane, Betty Chan, Manaal Faruqui, Aliaksei Severyn, Hanzhao Lin, YaGuang Li, Yong Cheng, Abe Ittycheriah, Mahdis Mahdieh, Mia Chen, Pei Sun, Dustin Tran, Sumit Bagri, Balaji Lakshminarayanan, Jeremiah Liu, Andras Orban, Fabian Gura, Hao Zhou, Xinying Song, Aurelien Boffy, Harish Ganapathy, Steven Zheng, HyunJeong Choe, Agoston Weisz, Tao Zhu, Yifeng Lu, Siddharth Gopal, Jarrod Kahn, Maciej Kula, Jeff Pitman, Rushin Shah, Emanuel Taropa, Majd Al Merey, Martin Baeuml, Zhifeng Chen, Laurent El Shafey, Yujing Zhang, Olcan Sercinoglu, George Tucker, Enrique Piqueras, Maxim Krikun, Iain Barr, Nikolay Savinov, Ivo Danihelka, Becca Roelofs, Anaïs White, Anders Andreassen, Tamara von Glehn, Lakshman Yagati, Mehran Kazemi, Lucas Gonzalez, Misha Khalman, Jakub Sygnowski, Alexandre Frechette, Charlotte Smith, Laura Culp, Lev Proleev, Yi Luan, Xi Chen, James Lottes, Nathan Schucher, Federico Lebron, Alban Rrustemi, Natalie Clay, Phil Crone, Tomas Kocisky, Jeffrey Zhao, Bartek Perz, Dian Yu, Heidi Howard, Adam Bloniarz, Jack W. Rae, Han Lu, Laurent Sifre, Marcello Maggioni, Fred Alcober, Dan Garrette, Megan Barnes, Shantanu Thakoor, Jacob Austin, Gabriel Barth-Maron, William Wong,Rishabh Joshi,Rahma Chaabouni Deeni Fatiha Arun Ahuja,Gaurav Singh Tomar Evan Senter,Martin Chadwick,Ilya Kornakov,Nithya Attaluri Inaki Iturrate,Ruibo Liu,Yunxuan Li Sarah Cogan Jeremy Chen Chao Jia Chenjie Gu Qiao Zhang Jordan Grimstad Ale Jakse Hartman Xavier Garcia Thanumalayan Sankaranarayana PillaiJacob Devlin Michael LaskinDiego de Las Casas,Dasha ValterConnie TaoLorenzo BlancoAdria Puigdomenech BadiaDavid Reitter Mianna Chen Jenny Brennan Clara Rivera,Sergey BrinShariq Iqbal,Gabriela Surita Jane Labanowski Abhi Rao Stephanie Winkler Emilio Parisotto Yiming Gu Kate Olszewka Ravi Addanki Antoine Miech Annie Louis Denis Teplyashin Geoff Brown Elliot Catt Jan BalaguerJackie Xiang,Pidong Wang,Zoe Ashwood,Anton BriukhovAlbert WebsonSanjay GanapathySmit Sanghavi Ajay Kannan Ming-Wei ChangAxel StjerngrenJosip DjolongaYuting SunAnkur Bapna Matthew Aitchison Pedram Pejman Henryk Michalewski Tianhe Yu Cindy Wang,Juliette Love Junwhan Ahn Dawn Bloxwich,Kehang Han Peter Humphreys Thibault + +Sellam, James Bradbury, Varun Godbole, Sina Samangooei, Bogdan Damoc, Alex Kaskasoli, Sebastien M. R. Arnold, Vijay Vasudevan, Shubham Agrawal, Jason Riesa, Dmitry Lepikhin, Richard Tanburn, Srivatsan Srinivasan, Hyeontaek Lim, Sarah Hodgkinson, Pranav Shyam, Johan Ferret, Steven Hand, Ankush Garg, Tom Le Paine, Jian Li, Yujia Li, Minh Giang, Alexander Neitz, Zaheer Abbas, Sarah York, Machel Reid, Elizabeth Cole, Aakanksha Chowdhery, Dipanian Das, Dominika Rogozinska, Vitaliy Nikolaev, Pablo Spechmann, Zachary Nado, Lukas Zilka, Flavien Prost, Luheng He, Marianne Monteiro, Gaurav Mishra, Chris Welty, Josh Newlan, Dawei Jia, Miltiadis Allamanis, Clara Huiyi Hu, Raoul de Liedekerke, Justin Gilmer, Carl Saroufim, Shruti Rijhwani, Shaobo Hou, Disha Shrivastava, Anirudh Baddepudi, Alex Goldin, Adnan Ozturel, Albin Cassirer, Yunhan Xu, Daniel Sohn, Devendra Sachan, Reinald Kim Amplayo, Craig Swanson, Dessie Petrova, Shashi Narayan, Arthur Guez, Siddhartha Brahma, Jessica Landon, Miteyan Patel, Ruizhe Zhao, Kevin Villela, Luyu Wang, Wenhao Jia, Matthew Rahtz, Mai Giménez, Legg Yeung, James Keeling, Petko Georgiev, Diana Mincu, Boxi Wu, Salem Haykal, Rachel Saputro, Kiran Vodrahalli, James Qin, Zeynep Cankara, Abhanshu Sharma, Nick Fernando, Will Hawkins, Behnam Neyshabur, Solomon Kim, Adrian Hutter, Priyanka Agrawal, Alex Castro-Ros, George van den Driessche, Tao Wang, Fan Yang, Shuo yiin Chang, Paul Komarek, Ross McIlroy, Mario Lučić Guodong Zhang Wael Farhan Michael Sharman Paul Natsev Paul Michel, Yamini Bansal Siyuan Qiao Kris Cao Siamak Shakeri Christina Butterfield Justin ChungPaul Kishan Rubenstein Shivani Agrawal Arthur MenschKedar Soparkar Karel Lenc Timothy ChungAedan PopeLoren MaggioreJackie KayPriya JhakraShibo WangJoshua MaynezMary Phuong Taylor Tobin Andrea Tacchetti Maja TrebaczKevin RobinsonYash Katariya Sebastian Riedel Paige Bailey Kefan Xiao Nimesh Ghelani Lora Aroyo Ambrose Slone Neil Houlsby Xuehan Xiong Zhen Yang Elena Gribovskaya Jonas Adler Mateo Wirth Lisa Lee Music Li Thais Kagohara Jay Pavagadhi Sophie Bridgers Anna Bortsova Sanjay Ghemawat,Zafarali Ahmed Tianqi LiuRichard PowellVijay BolinaMariko InumaPolina ZablotskaiaJames Besley,Da-Woon ChungTimothy Dozat Ramona ComanescuXiance Si Jeremy Greer Guolong Su Martin Polacek Raphael Lopez Kaufman Simon Tokumine Hexiang HuElena Buchatskaya Yingjie Miao Mohamed Elhawaty Aditya SiddhantNenad Tomasev Jinwei XingChristina Greer Helen Miller Shereen Ashraf Aurko RoyZizhao ZhangAda Ma Angelos Filos Milos Besta Rory Blevins Ted Klimenko Chih-Kuan Yeh Soravit Changpinyo Jiaq MuOscar ChangMantas PajarskasCarrie Muir Vered Cohen Charline Le Lan Krishna Haridasan Amit Marathe Steven Hansen Sholto Douglas Rajkumar Samuel Mingqiu WangSophia AustinChang LanJiepu JiangJustin Chiu Jaime Alonso Lorenzo Lars Lowe Sjösund Sebastien Cevey,Zach Gleicher Thi Avrahami Anudhyan BoralHansa Srinivasan Vittorio Selo Rhys May Konstantinos Aisos ,Leonard HussenotLivio Baldini Soares Kate Baumli Michael B.ChangAdria RecasensBen CaineAlexander PritzelFilip PaveticFabio Pardo Anita Gergely Justin FryeVinay RamaseshDan Horgan Kartikeya Badola Nora Kassner Subhrajit Roy,Ethan DyerVictor Campos Campos Alex Tomala Yunhao TangDalia El Badawy Elspeth White Basil Mustafa Oran LangAbhishek JindalSharad Vikram Zhitao GongSergi Caelles Ross Hemsley Gregory Thornton Fangxiaoyu Feng Wojciech Stokowiec Ce ZhengPhoebe Thacker,Cagliar Unlu Zhishuai Zhang Mohammad SalehJames Svensson Max BileschiPiyush PatilAnkesh Anand Roman RingKaterina TsihlasArpi Vezer Marco Selvi Toby Shevlane,Mikel RodriguezTom KwiatkowskiSamira Daruki Keran RongAllan Dafoe Nicholas FitzGerald Keren Gu-Lemberg Mina Khan Lisa Anne Hendricks Marie Pelllat,Vladimir FeinbergJames Cobon-KerrTara Sainath Maribeth Rauh Sayed Hadi Hashemi Richard Ives Yana Hasson Eric Noland Yuan Cao Nathan Byrd Le Hou Qingze Wang Thibault Sottiaux Michela Paganini Jean-Baptiste Lespiau Alexandre Moufarek Samer Hassan Kaushik Shivakumar Joost van AmersfoortAmol Mandhane Pratik Joshi Anirudh Goyal Matthew TungAndrew BrockHannah SheahanVedant MisraCheng LiNemanja Rakicevic Mostafa Deghhani Fangyu Liu Sid Mittal Junhyuk Oh Seb Noury Eren Sezener,Fantine Huot Matthew Lamm Nicola De Cao Charlie Chen Sidharth Mudgal Romina Stella Kevin Brooks Gautam Vasudevan Chenxi Liu Mainak Chain Nivedita Melinkeri Aaron Cohen Venus Wang Kristie Seymour,Sergey Zubkov,Rahul Goel Summer Yue,Sai Krishnakumaran,Brian Albert Nate Hurley Motoki Sano Anhad MohananeyJonah Joughin Egor Filonov Tomasz Kepa Yomna Eldawy Jiawern Lim Rahul Rishi Shirin Badiezadegan Taylor Bos Jerry ChangSanil Jain Sri Gayatri Sundara Padmanabhan Subha Puttagunta Kalpesh Krishna Leslie Baker Norbert Kalb,Vamsi Bedapudi Adam Kurzrok Shuntong Lei Anthony Yu Oren Litvin Xiang Zhou Zhichun WuSam SobellAndrea SicilianoAlan Papir Robby NealeJonas Bragagnolo Tej Toor Tina ChenValentin AnklinFeiran WangRichie FengMilad Gholami Kevin LingLijuan + +Liu, Jules Walter, Hamid Moghaddam, Arun Kishore, Jakub Adamek, Tyler Mercado, Jonathan Mallinson, Siddhinita Wandekar, Stephen Cagle, Eran Ofek, Guillermo Garrido, Clemens Lombriser, Maksim Mukha, Botu Sun, Hafeezul Rahman Mohammad, Josip Matak, Yadi Qian, Vikas Peswani, Pawe Janus, Quan Yuan, Leif Schelin, Oana David, Ankur Garg, Yifan He, Oleksii Duzhyi, Anton Algmyr, Timothee Lottaz, Qi Li, Vikas Yadav, Luyao Xu, Alex Chinien, Rakesh Shivanna, Aleksandr Chuklin, Josie Li, Carrie Spadine, Travis Wolfe, Kareem Mohamed, Subhabrata Das, Zihang Dai, Kyle He, Daniel von Dincklage, Shyam Upadhyay, Akanksha Maurya, Luyan Chi, Sebastian Krause, Khalid Salama, Pam G Rabinovitch, Pavan Kumar Reddy M, Aarush Selvan, Mikhail Dektiarev, Golnaz Ghiasi, Erdem Guven, Himanshu Gupta, Boyi Liu, Deepak Sharma, Idan Heimlich Shtacher, Shachi Paul, Oscar Akerlund, François-Xavier Aubet, Terry Huang, Chen Zhu, Eric Zhu, Elico Teixeira, Matthew Fritze, Francesco Bertolini, LianaEleonora Marinescu, Martin Bolle, Dominik Paulus, Khyatti Gupta, Tejasi Latkar, Max Chang Jason Sanders, Roopa Wilson, Xuewei Wu, Yi-Xuan Tan, Lam Nguyen Thiet, Tulsee Doshi Sid Lall, Swaroop Mishra, Wanming Chen, Thang Luong, Seth Benjamin Jasmine Lee, Ewa Andrejczuk, Dominik Rabiej, Vipul Ranjan, Krzysztof Styrc,Pengcheng Yin, Jon Simon, Malcolm Rose Harriott, Mudit Bansal, Alexei Robsky, Geoff Bacon, David Greene, Daniil Mirylenka Chen Zhou, Obaid Sarvana, Abhimanyu Goyal, Samuel Andermatt, Patrick Siegler, Ben Horn Assaf Israel, Francesco Pongetti Chih-Wei "Louis" Chen Marco Selvatici Pedro Silva Kathie Wang Jackson Tolins Kelvin Guu Roey YogevXiaochen Cai Alessandro Agostini,Maulik Shah,Hung Nguyen Noah O Donnaile,Sebastien Pereira Linda Friso Adam Stambler Adam KurzrokChenkai Kuang Yan Romanikhin Mark Geller ZJ Yan Kane Jang Cheng-Chun Lee Wojciech FicaEric Malmi Qijun Tan Dan Banica,Daniel Balle Ryan Pham,Yanping Huang Diana Avram Hongzhi Shi Jasot Singh Chris Hidey Niharika Ahuja Pranab SaxenaDan Dooley Srividya Pranavi PotharajuEileen ONeill AnandGokulchandranRyan FoleyKai Zhao Mike DusenberryYuan Liu Pulkit Mehta Raga Kotikalapudi Chalance Safranek-Shrader Andrew Goodman Joshua Kessinger,Eran Globen Prateek Kolhar Chris Gorgolewski Ali Ibrahim Yang SongAli Eichenbaum Thomas Brovelli Sahitya Potluri Preethi LahotiCip Baetu Ali Ghorbani Charles ChenAndy CrawfordShalini PalMukund Sridhar Petru GuritaAsier Mujika Igor Petrovski Pierre-Louis CedozChenmei Li Shiyuan Chen Niccolo Dal Santo Siddharth Goyal Jitesh Punjabi Karthik Kappaganthu Chester KwakPallavi LV Sarmishta Velury Himadri Choudhury Jamie Hall Premal Shah Ricardo Figueira Matt Thomas Minjie Lu Ting Zhou Chintu Kumar Thomas Jurdi Sharat Chikkerur Yenai Ma Adams Yu Soo KwakVictor AhdelSujeevan RajayogamTravis ChomaFei Liu Aditya Barua Colin Ji,Ji Ho Park Vincent HellendoornAlex Bailey Taylan Bilal Huanjie Zhou Mehrdad Khatir Charles Sutton Wojciech Rzadkowski Fiona Macintosh Konstantin Shagin Paul Medina Chen Liang Jinjing Zhou Pararth Shah Yingying Bi Attila Dankovics Shipra Banga Sabine Lehmann Marissa Bredesen Zifan Lin John Eric Hoffmann Jonathan Lai Raynald Chung Kai Yang Nihal Balani Arthur Brazinskas Andrei Sozanschi Matthew Hayes Hector Fernandez Alcalde Peter Makarov Will Chen Antonio Stella Liselotte Snijders Michael Mandl Ante Karrman Pawel Nowak Xinyi Wu Alex Dyck Krishnan Vaidyanathan Raghavender R Jessica Mallet Mitch Rudominer Eric JohnstonSushil Mittal Akhil Udathu Janara Christensen,Vishal Verma,Zach Irving Andreas Santucci Gamaleldin Elsayed Elnaz Davoodi Marin Georgiev Ian Tenney Nan Hua Geoffrey Cideron Edouard Leurent Mahmoud Alnahlawi Ionut Georgescu Nan Wei Ivy ZhengDylan Scandinaro Heinrich Jiang Jasper Snoke Mukund Sundararajan Xuezhi WangZack Ontiveros Itay Karo Jeremy Cole Vinu Rajashekhar Lara Tumeh Eyal Ben-David Rishub Jain Jonathan Uesato Romina Datta Oskar Bunyan Shimu Wu John Zhang Piotr Stanczyk Ye Zhang David Steiner Subhajit Naskar Michael Azzam Matthew Johnson Adam Paszke Chung-Cheng Chiu Jaume Sanchez Elias Afroz Mohiuddin Faizan Muhammad Jin Miao Andrew Lee Nino Vieillard Jane Park Jiageng Zhang Jeff Stanway Drew Garmon Abhijit Karmarkar Zhe Dong Jong LeeAviral Kumar Luowei Zhou Jonathan Evens William Isaac Geoffrey Irving Edward Loper Michael Fink Isha Arkatkar Nanxin Chen Izhak Shafran Ivan Petrychenko Zhe Chen Johnson Jia Anselm Levskaya Zhenkai Zhu Peter Grabowski Yu Mao Alberto Magni Kaisheng Yao Javier Snader,Norman Casagrande Evan PalmerPaul SuganthanAlfonso Castano Irene Giannoumis Wooyeol Kim Mikolaj Rybinski Ashwin Sreevatsa Jennifer Prendki David Soergel Adrian Goedeckemeyer Willi Gierke Mohsen Jafari Meenu Gaba Jeremy Wiesner Diana Gage Wright Yawen Wei Harsha Vashisht Yana Kulizhskaya Jay Hoover Maigo Le Lu Li Chimezie Iwuanyanwu Lu Liu Kevin Ramirez Andrey Khorlin Albert Cui Tian LIN Marcus Wu Ricardo Aguilar Keith Pallo Abhishek Chakladar Ginger Perng Elena Allica Abellan Mingyang Zhang Ishita Dasgupta Nate Kushman Ivo Penchev Alena Repina Xihui Wu Tom + +van der Weide, Priya Ponnapalli, Caroline Kaplan, Jiri Simsa, Shuangfeng Li, Olivier Doussé, Fan Yang, Jeff Piper, Nathan Ie, Rama Pasumarthi, Nathan Lintz, Anitha Vijayakumar, Daniel Andor, Pedro Valenzuela, Minnie Lui, Cosmin Paduraru, Daiyi Peng, Katherine Lee, Shuyuan Zhang, Somer Greene, Duc Dung Nguyen, Paula Kurylowicz, Cassidy Hardin, Lucas Dixon, Lili Janzer, Kiam Choo, Ziqiang Feng, Biao Zhang, Achintya Singhal, Dayou Du, Dan McKinnon, Natasha Antropova, Tolga Bolukbasi, Orgad Keller, David Reid, Daniel Finchelstein, Maria Abi Raad, Remi Crocker, Peter Hawkins, Robert Dadashi, Colin Gaffney, Ken Franko, Anna Bulanova, Rémi Leblond, Shirley Chung, Harry Askham, Luis C. Cobo, Kelvin Xu, Felix Fischer, Jun Xu, Christina Sorokin, Chris Alberti, Chu-Cheng Lin, Colin Evans, Alek Dimitriev, Hannah Forbes, Dylan Banarse, Zora Tung, Mark Omernick, Colton Bishop, Rachel Sterneck, Rohan Jain, Jiawei Xia, Ehsan Amid, Francesco Piccinno, Xingyu Wang, Praseem Banzal, Daniel J. Mankowitz, Alex Polozov, Victoria Krakovna, Sasha Brown, Mohammad Hossein Bateni, Dennis Duan, Vlad Firoiu, Meghana Thotakuri, Tom Natan, Matthieu Geist, Ser tan Girgin, Hui Li, Jiayu Ye, Ofir Roval, Reiko Tojo, Michael Kwong, James Lee-Thorp, Christopher Yew, Danila Sinopalnikov, Sabela Ramos, John Mellor, Abhishek Sharma, Kathy Wu, David Miller, Nicolas Sonnerat, Denis Vnukov, Rory Greig, Jennifer Beattie, Emily Caveness, Libin Bai, Julian Eisenschlos, Alex Korchemniy, Tomy Tsai, Mimi Jasarevic, Weize Kong Phuong Dao Zeyu Zheng Frederick Liu Fan Yang Rui Zhu Tian Huey Teh Jason Sanmiya Evgeny Gladchenko Nejc Trdin Daniel Toyama Evan Rosen,Sasan Tavakkol Linting Xue Chen Elkind Oliver Woodman John Carpenter George Papamakarios Rupert Kemp Sushant Kafle Tanya Grunina Rishika Sinha Alice Talbert Diane Wu,Denese Owusu-Afriyie Cosmo DuChloe Thornton Jordi Pont-Tuset Pradyumna Narayana Jing Li Saaber Fatehi John WietingOmar Ajmeri Benigno Uria Yeongil Ko Laura Knight Amelie Heliou Ning Niu Shane Gu Chenxi Pang Yeqing Li,Nir Levine,Ariel StolovichRebeca Santamaria-FernandezSonam Goenk,Wenny Yustalim Robin Strudel Ali Elqursh Charlie Deck Hyo LeeZonglin Li Kyle Levin Raphael Hoffmann Dan Holtmann-Rice Olivier Bachem Sho Arora Christy Koh Soheil Hassas Yeganeh Siim Poder Mukarram Tariq Yanhua Sun Lucian Ionita Mojtaba Seyedhosseini Pouya Tafti Zhiyu Liu Anmol Gulati Jasmine Liu Xinyu Ye Bart Chrzaszcz Lily Wang Nikhil Sethi Tianrun Li Ben Brown Shreya Singh Wei Fan Aaron Parisi Joe Stanton Vinod Koverkathu Christopher A. Choquette-Choo Yunjie Li TJ Lu Abe Ittycheriah Prakash Shroff Mani Varadarajan Sanaz Bahargam Rob Willoughby David Gaddy Guillaume Desjardins Marco Cornero Brona Robenegek,Bhavishya Mittal Ben Albrecht Ashish Shenoy Fedor Moiseev Henrik Jacobsson Alireza Ghaffarkhah,Morgane Riviere Alanna Walton Clément Crepy Alicia Parrish Zongwei ZhouClement Farabet Carey Radebaugh Praveen Srinivasan Claudia van der Salm Andreas Fidjeland Salvatore Scellato Eri Latorre-Chimoto Hanna Klimczak-Plucinska David Bridson Dario de Cesare Tom Hudson Piermaria Mendolicchio Lexi Walker Alex Morris Matthew Mauger Alexey Guseynov Alison Reid Seth Odoom Lucia Loher Victor Cotruta Madhavi Yenugula Dominik Grewe Anastasia Petrushkina Tom Duerig Antonio Sanchez Steve YadlowskyAmy Shen Amir Globerson,Lynette Webb Sahil Dua Dong Li Surya BhupatirajuDan Hurt Haroon Qureshi Ananth Agarwal Tomer Shani Matan Eyal Anuj Khare Shreyas Rammohan Belle Lei Wang Chetan Tekur Mihir Sanjay Kale Jinliang Wei Ruoxin Sang Brennan Saeta Tyler Liechty Yi Sun Yao Zhao Stephan Lee Pandu Nayak Doug Fritz Manish Reddy Vuyyyuru John Aslanides Nidhi Vyas Martin Wicke Xiao Ma Evgenii Eltsychev Nina Martin Hardie Cate James Manyika Keyvan Amiri Yelin Kim Xi Xiong Kai Kang Florian Luisier Nilesh Tripuraneni David Madras Mandy Guo Austin Waters Oliver Wang Joshua Ainslie Jason Baldridge Han Zhang Garima Pruthi Jakob Bauer Feng Yang Riham Mansour Jason Gelman Yang XuGeorge Polovets Ji Liu Honglong CaiWarren ChenXiangHai Sheng Emily Xue Sherjil Ozair Christof Angermueller Xiaowei Li Anoop Sinha Weiren Wang Julia Wiesinger Emmanueloul Koukoumidis Yuan Tian Anand Iyer Madhu Gurumurthy Mark Goldenson Parashar Shah MK Blake Hongkun Yu Anthony Urbanowicz Jennimaria Palomaki Chrisantha Fernando Ken Durden Harsh Mehta Nikola Momchev Elahe Rahimtoroghi Maria Georgaki Amit Raul Sebastian Ruder Morgan Redshaw Jinhyuk Lee Denny Zhou Komal Jalan Dinghua Li Blake Hechtman Parker Schuh Milad Nasr Kieran Milan Vladimir Mikulik Juliana Franco Tim Green Nam Nguyen Joe Kelley Aroma Mahendru Andrea Hu Joshua Howland Ben Vargas Jeffrey Hui Kshitij Bansal,Vikram Rao Rakesh Ghiya Emma Wang Ke Ye Jean Michel Sarr Melanie Moranski Preston Madeleine Elish Steve Li Aakash Kaku Jigar Gupta Ice Pasupat Da-Cheng Juan Milan Someswar Tejvi M., Xinyun Chen Aida Amini Alex Fabrikant Eric Chu Xuanyi Dong Amrutta Muthal Senaka Buthpitiya Sarthak Jauhari Nan Hua Urvashi Khandelwal Ayal Hitron Jie Ren Larissa Rinaldi Shahar Drath Avigail Dabush + +Nan-Jiang Jiang, Harshal Godhia, Uli Sachs, Anthony Chen, Yicheng Fan, Hagai Taitelbaum, Hila Noga, Zhuyun Dai, James Wang, Chen Liang, Jenny Hamer, Chun-Sung Ferng, Chenel Elkind, Aviel Atias, Paulina Lee, Vít Listík, Mathias Carlen, Jan van de Kerkhof, Marcin Pikus, Krunoslav Zaher, Paul Müller, Sasha Zykova, Richard Stefanec, Vitaly Gatsko, Christoph Hirnschall, Ashwin Sethi, Xingyu Federico Xu, Chetan Ahuja, Beth Tsai, Anca Stefanoiu, Bo Feng, Keshav Dhandhania, Manish Katyal, Akshay Gupta, Atharva Parulekar, Divya Pitta, Jing Zhao, Vivaan Bhatia, Yashodha Bhavnani, Omar Alhadlaq, Xiaolin Li, Peter Danenberg, Dennis Tu, Alex Pine, Vera Filippova, Abhipso Ghosh, Ben Limonchik, Bhargava Urala, Chaitanya Krishna Lanka, Derik Clive, Yi Sun, Edward Li, Hao Wu, Kevin Hongtongsak, Ianna Li, Kalind Thakkar, Kanyush Omarov, Kushal Majmundar, Michael Alverson, Michael Kucharski, Mohak Patel, Mudit Jain, Maksim Zabelin, Paolo Pelagatti, Rohan Kohli, Saurabh Kumar, Joseph Kim, Swetha Sankar, Vineet Shah, Lakshmi Ramachandruni, Xiangkai Zeng, Ben Bariach, Laura Weidinger, Amar Subramanya, Sissie Hsiao, Demis Hassabis, Koray Kavukcuoglu, Adam Sadovsky, Quoc Le, Trevor Strohman, Yonghui Wu, Slav Petrov, Jeffrey Dean, and Oriol Vinyals. Gemini: A family of highly capable multimodal models, 2024. +Xavier Glorot, Antoine Bordes, and Yoshua Bengio. Deep sparse rectifier neural networks. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 315-323, 2011. +Dirk Groeneveld, Iz Beltagy, Pete Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Harsh Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. Olmo: Accelerating the science of language models. arXiv preprint arXiv:2402.00838, 2024. +Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, et al. Textbooks are all you need. arXiv preprint arXiv:2306.11644, 2023. +Srinivasan Iyer, Xi Victoria Lin, Ramakanth Pasunuru, Todor Mihaylov, Daniel Simig, Ping Yu, Kurt Shuster, Tianlu Wang, Qing Liu, Punit Singh Koura, et al. Opt-iml: Scaling language model instruction meta learning through the lens of generalization. arXiv preprint arXiv:2212.12017, 2022. +Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1601-1611, Vancouver, Canada, July 2017. Association for Computational Linguistics. doi: 10.18653/v1/P17-1147. URL https://aclanthology.org/P17-1147. +Norman P. Jouppi, Cliff Young, Nishant Patil, David A. Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, Rick Boyle, Pierre-luc Cantin, Clifford Chao, Chris Clark, Jeremy Coriell, Mike Daley, Matt Dau, Jeffrey Dean, Ben Gelb, Tara Vazir Ghaemmaghami, Rajendra Gottipati, William Gulland, Robert Hagmann, C. Richard Ho, Doug Hogberg, John Hu, Robert Hundt, Dan Hurt, Julian Ibarz, Aaron Jaffey, Alek Jaworski, Alexander Kaplan, Harshit Khaitan, Daniel Killebrew, Andy Koch, Naveen Kumar, Steve Lacy, James Laudon, James Law, Diemthu Le, Chris Leary, Zhuyuan Liu, Kyle Lucke, Alan Lundin, Gordon MacKean, Adriana Maggiore, Maire Mahony, Kieran Miller, Rahul Nagarajan, Ravi Narayanaswami, Ray Ni, Kathy Nix, Thomas Norrie, Mark Omernick, Narayana Penukonda, Andy Phelps, Jonathan Ross, Matt Ross, Amir Salek, Emad Samadiani, Chris Severn, Gregory Sizikov, Matthew Snelham, Jed Souter, Dan Steinberg, Andy Swing, Mercedes Tan, Gregory Thorson, Bo Tian, Horia Toma, Erick Tuttle, Vijay Vasudevan, Richard Walter, Walter Wang, Eric Wilcox, and Doe Hyun Yoon. In-Datacenter Performance Analysis of a Tensor Processing Unit. In Proceedings of the 44th Annual International Symposium on Computer Architecture ISCA, 2017. +Najoung Kim, Sebastian Schuster, and Shubham Toshniwal. Code pretraining improves entity tracking abilities of language models. arXiv preprint arXiv:2405.21068, 2024. +Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, Dzmitry Bahdanau, Leandro von Werra, and Harm de Vries. The stack: 3 tb of permissively licensed source code. Preprint, 2022. + +Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. doi: 10.1162/tacl_a_00276. URL https://doi.org/10.1162/tacl_a_00276. +Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6086-6096, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1612. URL https://www.aclweb.org/anthology/P19-1612. +Raymond Li, Loubna Ben Allal, Yangtian Zi, Niklas Muennighoff, Denis Kocetkov, Chenghao Mou, Marc Marone, Christopher Akiki, Jia Li, Jenny Chim, et al. Starcoder: may the source be with you! arXiv preprint arXiv:2305.06161, 2023a. +Yuanzhi Li, Sebastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023b. +Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, et al. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110, 2022. +Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. G-eval: Nlg evaluation using gpt-4 with better human alignment, 2023. +Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. arXiv, abs/2305.13169, 2023. +Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. URL https://arxiv.org/abs/1711.05101. +Anton Lozhkov, Loubna Ben Allal, Leandro von Werra, and Thomas Wolf. Fineweb-edu, May 2024. URL https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu. +Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help llms reasoning? arXiv preprint arXiv:2309.16298, 2023. +Aman Madaan, Dheeraj Rajagopal, Niket Tandon, Yiming Yang, and Antoine Bosselut. Conditional set generation using seq2seq models. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 4874-4896, Abu Dhabi, United Arab Emirates, December 2022a. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.324. +Aman Madaan, Shuyan Zhou, Uri Alon, Yiming Yang, and Graham Neubig. Language models of code are few-shot commonsense learners. arXiv preprint arXiv:2210.07128, 2022b. +Max Marion, Ahmet Üstün, Luiza Pozzobon, Alex Wang, Marzieh Fadaee, and Sara Hooker. When less is more: Investigating data pruning for pretraining llms at scale, 2023. URL https:// arxiv.org/abs/2309.04564. +Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and cloze evaluation for deeper understanding of commonsense stories. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 839-849, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-1098. URL https://aclanthology.org/N16-1098. +Niklas Muennighoff, Alexander M Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models. arXiv preprint arXiv:2305.16264, 2023a. + +Niklas Muennighoff, Alexander M. Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models, 2023b. URL https://arxiv.org/abs/2305.16264. +Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15991-16111, Toronto, Canada, July 2023c. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891. +Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35: 27730-27744, 2022. +Jupinder Parmar, Shrimai Prabhumoye, Joseph Jennings, Mostofa Patwary, Sandeep Subramanian, Dan Su, Chen Zhu, Deepak Narayanan, Aastha Jhunjunwala, Ayush Dattagupta, Vibhu Jawa, Jiwei Liu, Ameya Mahabaleshwarkar, Osvald Nitski, Annika Brundyn, James Maki, Miguel Martinez, Jiaxuan You, John Kamalu, Patrick LeGresley, Denys Fridman, Jared Casper, Ashwath Aithal, Oleksii Kuchaiev, Mohammad Shoeybi, Jonathan Cohen, and Bryan Catanzaro. Nemotron-4 15b technical report, 2024. URL https://arxiv.org/abs/2402.16819. +Guilherme Penedo, Quentin Malartic, Daniel Hesslow, Ruxandra Cojocaru, Alessandro Cappelli, Hamza Alobeidli, Baptiste Pannier, Ebtesam Almazrouei, and Julien Launay. The refined web dataset for falcon llm: Outperforming curated corpora with web data, and web data only, 2023. +Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019. +Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021. +Jack W. Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, Eliza Rutherford, Tom Hennigan, Jacob Menick, Albin Cassirer, Richard Powell, George van den Driessche, Lisa Anne Hendricks, Maribeth Rauh, Po-Sen Huang, Amelia Glaese, Johannes Welbl, Sumanth Dathathri, Saffron Huang, Jonathan Uesato, John Mellor, Irina Higgins, Antonia Creswell, Nat McAleese, Amy Wu, Erich Olsen, Siddhant Jayakumar, Elena Buchatskaya, David Budden, Esme Sutherland, Karen Simonyan, Michela Paganini, Laurent Sifre, Lena Martens, Xiang Lorraine Li, Adhiguna Kuncoro, Aida Nematzadeh, Elena Gribovskaya, Domenic Donato, Angeliki Lazaridou, Arthur Mensch, Jean-Baptiste Lespiau, Maria Tsimpoukelli, Nikolai Grigorev, Doug Fritz, Thibault Sottiaux, Mantas Pajarskas, Toby Pohlen, Zhitao Gong, Daniel Toyama, Cyprien de Masson d'Autume, Yu-jia Li, Tayfun Terzi, Vladimir Mikulik, Igor Babuschkin, Aidan Clark, Diego de Las Casas, Aurelia Guy, Chris Jones, James Bradbury, Matthew Johnson, Blake Hechtman, Laura Weidinger, Iason Gabriel, William Isaac, Ed Lockhart, Simon Osindero, Laura Rimell, Chris Dyer, Oriol Vinyals, Kareem Ayoub, Jeff Stanway, Lorrayne Bennett, Demis Hassabis, Koray Kavukcuoglu, and Geoffrey Irving. Scaling language models: Methods, analysis & insights from training gopher, 2022. URL https://arxiv.org/abs/2112.11446. +Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, 2019. +Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, abs/1910.10683, 2020. + +Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In The Second Tiny Papers Track at ICLR 2024. +Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale, 2019. +Maarten Sap, Hannah Rashkin, Derek Chen, Ronan LeBras, and Yejin Choi. Socialiaq: Common-sense reasoning about social interactions. arXiv, abs/1904.09728, 2019. +Minjoon Seo, Tom Kwiatkowski, Ankur Parikh, Ali Farhadi, and Hannaneh Hajishirzi. Phrase-indexed question answering: A new challenge for scalable document comprehension. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 559-564, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1052. URL https://aclanthology.org/D18-1052. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, YK Li, Yu Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +Noam Shazeer. Glu variants improve transformer, 2020. URL https://arxiv.org/abs/2002.05202. +Luísa Shimabucoro, Sebastian Ruder, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. Llm see, llm do: Guiding data generation to target non-differentiable objectives, 2024. URL https://arxiv.org/abs/2407.01490. +Shivalika Singh, Freddie Vargus, Daniel Dsouza, Borje F. Karlsson, Abinaya Mahendiran, Wei-Yin Ko, Herumb Shandilya, Jay Patel, Deividas Mataciunas, Laura OMahony, Mike Zhang, Ramith Hettiarachchi, Joseph Wilson, Marina Machado, Luisa Souza Moura, Dominik Krzeminski, Hakimeh Fadaei, Irem Ergun, Ifeoma Okoh, Aisha Alaagib, Oshan Mudannayake, Zaid Alyafeai, Vu Minh Chien, Sebastian Ruder, Surya Guthikonda, Emad A. Alghamdi, Sebastian Gehrmann, Niklas Muennighoff, Max Bartolo, Julia Kreutzer, Ahmet Üstün, Marzieh Fadaee, and Sara Hooker. Aya dataset: An open-access collection for multilingual instruction tuning. arXiv preprint arXiv:2402.06619, 2024. +Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama, 2023. URL https://huggingface.co/datasets/cerebras/SlimPajama-627B. +Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. +Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. arXiv, abs/2307.09288, 2023. +Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Superglue: A stickier benchmark for general-purpose language understanding systems, 2020. URL https://arxiv.org/abs/1905.00537. + +Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingofflolz/mesh-transformer-jax, May 2021. +Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13484-13508, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.acl-long.754. +Johannes Welbl, Nelson F Liu, and Matt Gardner. Crowdsourcing multiple choice science questions. pp. 94-106, September 2017. doi: 10.18653/v1/W17-4413. URL https://aclanthology.org/W17-4413. +Wikipedia. Wikipedia downloads. URL https://dumps.wikipedia.org. +BigScience Workshop, :, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagne, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurencion, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, Dragomir Radev, Eduardo Gonzalez Ponferrada, Efrat Levkovizh, Ethan Kim, Eyal Bar Natan, Francesco De Toni, Gerard Dupont, German Kruszewski, Giada Pistilli, Hady Elsahar, Hamza Benyamina, Hieu Tran, Ian Yu, Idris Abdulmumin, Isaac Johnson, Itziar Gonzalez-Dios, Javier de la Rosa, Jenny Chim, Jesse Dodge, Jian Zhu, Jonathan Chang, Jorg Frohberg, Joseph Tobing, Joydeep Bhattacharjee, Khalid Almubarak, Kimbo Chen, Kyle Lo, Leandro Von Werra, Leon Weber, Long Phan, Loubna Ben allal, Ludovic Tanguy, Manan Dey, Manuel Romero Munoz, Maraim Masoud, Maria Grandury, Mario Saško, Max Huang, Maximin Coavoux, Mayank Singh, Mike Tian-Jian Jiang, Minh Chien Vu, Mohammad A. Jauhar, Mustafa Ghaleb, Nishant Subramani, Nora Kassner, Nurulaqilla Khamis, Olivier Nguyen, Omar Espejel, Ona de Gibert, Paulo Villegas, Peter Henderson, Pierre Colombo, Priscilla Amuok, Quentin Lhoest, Rheza Harliman, Rishi Bommasani, Roberto Luis López, Rui Ribeiro, Salomey Osei, Sampa Pyysalo, Sebastian Nagel, Shamik Bose, Shamsuddeen Hassan Muhammad, Shanya Sharma, Shayne Longpre, Somaieh Nikpoor, Stanislav Silberberg, Suhas Pai, Sydney Zink, Tiago Timponi Torrent, Timo Schick, Tristan Thrush, Valentin Danchev Vassilina Nikoulina Veronika Laippala Violette Lepercq,Vrinda Prabhu,Zaid Alyafeai Zeerak Talat Arun Raja Benjamin HeinzerlingChenglei Si Davut Emre Taşar Elizabeth Salesky Sabrina J.Mielke Wilson Y. Lee Abheesht Sharma Andrea Santilli Antoine Chaffin Arnaud Stiegler Debajyoti Datta Eliza Szczechla Gunjan Chhablani Han WangHarshit Pandey,Hendrik Strobelt Jason Alan Fries Jos RozenLeo Gao Lintang Sutawika,M Saiful Bari Maged S.Al-shaibani Matteo Manica Nihal Nayak Ryan Teehan Samuel Albanie Sheng Shen Srulik Ben-David Stephen H.Bach Taewoon KimTali Bers Thibault Fevry Trishala Neeraj Urmish Thakker Vikas Raunak Xiangru Tang Zheng-Xin Yong Zhiqing Sun Shaked Brody Yallow Uri Hadar Tojarieh Adam Roberts Hyung Won Chung Jaesung TaoJason Phang Ofir Press Conglong Li Deepak Narayanan Hatim Bourfoune Jared Casper Jeff Rasley Max Ryabinin Mayank Mishra Minjia Zhang Mohammad Shoeybi Myriam Peyrounette Nicolas Patry Nouamane Tazi Omar Sanseviero Patrick von Platen Pierre Cornette Pierre François Lavallee Remi Lacroix Samyam Rajbhandari Sanchit Gandhi Shaden Smith Stephane Requena Suraj Patil Tim Dettmers Ahmed Baruwa Amanpreet Singh Anastasia Cheveleva Anne-Laure Ligozat Arjun Subramonian Aurélie Névol Charles Lovering Dan Garrette Deepak Tunuguntla Ehud Reiter Ekaterina Taktasheva Ekaterina Voloshina Eli Bogdanov Genta Indra Winata Hailey Schoelkopf Jan-Christoph Kalo Jekaterina Novikova Jessica Zosa Forde Jordan Clive Jungo Kasai Ken Kawamura Liam Hazan Marine Carpuat Miruna Clinciu Najoung Kim Newton Cheng Oleg Serikov Omer Antverg Oskar van der Wal Rui Zhang Ruochen Zhang Sebastian Gehrmann Shachar Mirkin Shani Pais Tatiana Shavrina Thomas Scialom Tian Yun Tomasz Limisiewicz Verena Rieser Vitaly Protasov Vladislav Mikhailov Yada Pruksachatkun Yonatan Belinkov Zachary Bamberger Zdenek Kasner Alice Rueda Amanda Pestana Amir Feizpour Armar Khan Amy Faranak Ana Santos Anthony + +Hevia, Antigona Unldreaj, Arash Aghagol, Arezoo Abdollahi, Aycha Tammour, Azadeh Haji-Hosseini, Bahareh Behroozzi, Benjamin Ajibade, Bharat Saxena, Carlos Munoz Ferrandis, Daniel McDuff, Danish Contractor, David Lansky, Davis David, Douwe Kiela, Duong A. Nguyen, Edward Tan, Emi Baylor, Ezinwanne Ozoani, Fatima Mirza, Frankline Ononiwu, Habib Rezanejad, Hessie Jones, Indrani Bhattacharya, Irene Solaiman, Irina Sedenko, Isar Nejadgholi, Jesse Passmore, Josh Seltzer, Julio Bonis Sanz, Livia Dutra, Mairon Samagaio, Maraim Elbadri, Margot Mieskes, Marissa Gerchick, Martha Akinlolu, Michael McKenna, Mike Qiu, Muhammed Ghauri, Mykola Burynok, Nafis Abrar, Nazneen Rajani, Nour Elkott, Nour Fahmy, Olanrewaju Samuel, Ran An, Rasmus Kromann, Ryan Hao, Samira Alizadeh, Sarmad Shubber, Silas Wang, Sourav Roy, Sylvain Viguier, Thanh Le, Tobi Oyebade, Trieu Le, Yoyo Yang, Zach Nguyen, Abhinav Ramesh Kashyap, Alfredo Palasciano, Alison Callahan, Anima Shukla, Antonio Miranda-Escalada, Ayush Singh, Benjamin Beilharz, Bo Wang, Caio Brito, Chenxi Zhou, Chirag Jain, Chuxin Xu, Clémentine Fourrier, Daniel León Periñan, Daniel Molano, Dian Yu, Enrique Manjavacas, Fabio Barth, Florian Fuhrimann, Gabriel Altay, Giyaseddin Bayrak, Gully Burns, Helena U. Vrabec, Imane Bello, Ishani Dash, Jihyun Kang, John Giorgi, Jonas Golde, Jose David Posada, Karthik Rangasai Sivaraman, Lokesh Bulchandani, Lu Liu, Luisa Shinzato Madeleine Hahn de Bykhovetz, Maiko Takeuchi, Marc Pamies, Maria A Castillo, Marianna Nezhurina, Mario Sänger, Matthias Samwald, Michael Cullan, Michael Weinberg, Michiel De Wolf, Mina Mihaljcic, Minna Liu Moritz Freidank Myungsun KangNatasha Seelam Nathan Dahlberg,Nicholas Michio Broad,Nikolaus MuellnerPascale Fung Patrick Haller Ramya Chandrasekhar,Renata Eisenberg,Robert Martin,Rodrigo Canalli,Rosaline Su,Ruisi Su,Samuel Cahyawijaya,Samuele GardaShlok S Deshmukh,Shubanshu Mishra,Sid Kblawi,Simon Ott Sinee Sang-aroonsiri,Srishti Kumar Stefan Schweter,Sushil Bharati Tanmay LaudTheo Gigant Tomoya Kainuma,Wojciech Kusa,Yanis Labrak,Yash Shailesh Bajaj,Yash Venkatraman Yifan Xu,Yingxin XuYu Xu,Zhe TanZhongli XieZifan Ye,Mathilde Bras,Younes Belkada and Thomas Wolf.Bloom: A 176b-parameter open-access multilingual language model, 2023. URL https://arxiv.org/abs/2211.05100. + +Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? arXiv, abs/1905.07830, 2019. +Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, Todor Mihaylov, Myle Ott, Sam Shleifer, Kurt Shuster, Daniel Simig, Punit Singh Koura, Anjali Sridhar, Tianlu Wang, and Luke Zettlemoyer. Opt: Open pre-trained transformer language models, 2022. +Xinlu Zhang, Zhiyu Zoey Chen, Xi Ye, Xianjun Yang, Lichang Chen, William Yang Wang, and Linda Ruth Petzold. Unveiling the impact of coding data instruction fine-tuning on large language models reasoning. arXiv preprint arXiv:2405.20535, 2024. +Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024. +Ahmet Üstün, Viraat Aryabumi, Zheng-Xin Yong, Wei-Yin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. Aya model: An instruction finetuned open-access multilingual language model, 2024. + +# ETHICS STATEMENT AND LIMITATIONS + +While we systematically study the impact of code data on downstream natural language tasks, we do not study its impact on safety and bias. Additionally, given the nature of pre-training and the number of ablations we have conducted we were limited by the scale of larger model sizes due to prohibitive compute costs. + +# REPRODUCIBILITY + +We provide details about our data mixture (Section 2.1), data filtering (Appendix C.1, C.2, C.3), evaluation (Section 2.2, Appendix A) and training (Section 2.3) setups. We believe these details provide a clear picture on how to obtain our data setup, model ablations and evaluation results. + +# A EVALUATION DETAILS + +We briefly describe the details of our evaluation benchmarks and the composite datasets used for each category below: + +1. World knowledge. These benchmarks aim to measure world knowledge, testing knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. We report the average exact match scores for both these benchmarks. +2. Natural language reasoning. The Natural language (NL) reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering (Clark et al., 2019; Seo et al., 2018; Welbl et al., 2017; Sap et al., 2019; Choi et al., 2018), natural language inference (NLI) (Wang et al., 2020; de Marneffte et al., 2019; Wang et al., 2020), sentence completion (Mostafazadeh et al., 2016; Zellers et al., 2019), co-reference resolution (Sakaguchi et al., 2019) and general intelligence (Clark et al., 2018). We include a full list of the constituent benchmarks in Table 1. We report the average accuracy scores across all benchmarks. +3. Code. While our main focus is general performance, we also want to measure any changes to code generation performance. For code benchmarks, we focus on the function completion task. We evaluate on HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021). We report the average pass@1 scores of these benchmarks. + +# B SUMMARY RESULTS FOR PRE-TRAINING RECIPES + +Summary results are shown in Table 2. + +# C CODE-DATASETS FILTERING + +# C.1 QUALITY FILTERS + +In addition to the dedduplication and quality filtering applied on the GitHub scrapes by Starcoder for The Stack dataset (Li et al., 2023a), we apply filters to remove documents with greater than 1000 float numbers, with instances of the string $0 \times$ that are lists of top-level domains, and with 'generated by' in the first 400 characters + +# C.2 PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET + +Programming languages included in our version of The Stack dataset are present in Table 3 + +# C.3 MARKUP-STYLE PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET + +Markup-style languages included in our version of The Stack dataset are in Table 4 + +
TaskDatasetMetric
WORLD KNOWLEDGE TASKS
Question AnsweringTriviaQA (Joshi et al., 2017)0-shotAcc.
NaturalQuestionsOpen (Lee et al., 2019)0-shotAcc.
NATURAL LANGUAGE REASONING
Question AnsweringBoolQ (Clark et al., 2019)0-shotAcc.
PiQA (Seo et al., 2018)0-shotAcc.
SciQ (Welbl et al., 2017)0-shotAcc.
SocialQA (Sap et al., 2019)0-shotAcc.
QUAC (Choi et al., 2018)0-shotAcc.
Natural Language InferenceSuperGLUE-CB (Wang et al., 2020; de Marneffé et al., 2019)0-shotAcc.
SuperGLUE-COPA (Wang et al., 2020)0-shotAcc.
Sentence CompletionStoryCloze (Mostafazadeh et al., 2016)0-shotAcc.
HellaSwag (Zellers et al., 2019)0-shotAcc.
Coreference ResolutionWinogrande (Sakaguchi et al., 2019)0-shotAcc.
General IntelligenceARC-Easy (Clark et al., 2018)0-shotAcc.
TEXT GENERATION
Open-Ended GenerationDolly-200 (English) (Singh et al., 2024)0-shotwin-rate
CODE GENERATION
Function completionHumanEval (Chen et al., 2021)0-shotpass@1
MBPP (Austin et al., 2021)0-shotpass@1
+ +Table 1: Datasets considered for evaluation: We conduct extensive evaluations across benchmarks detailed above. These provide valuable proxies for performance in natural language reasoning, world knowledge, open ended text generation, and code generation tasks. + +
Model VariantRecipeToken CountNatural LanguageCodeTotal Avg.
TextCodeReason.Know.Avg.
TEXT-ONLYPre-training400B-49.09.529.20.419.6
Cooldown+32B+8B54.111.132.64.423.2
BALANCED-ONLYPre-training200B200B51.88.130.09.023.0
Cooldown+32B+8B53.211.132.18.424.2
BALANCED→TEXTPre-training Init.100B100B52.07.429.67.822.4
Continue Pre-train.+180B+20B53.09.931.54.822.6
Cooldown+32B+8B54.910.932.95.823.9
CODE→TEXTPre-training Init.-200B44.71.523.115.520.6
Continue Pre-train.+180B+20B53.39.531.44.122.3
Cooldown+32B+8B52.110.331.27.523.3
+ +Table 2: Model variants with the corresponding pre-training recipes: Pre-training recipes include initial pre-training, continued pre-training, and cooldown phases. Balanced $\rightarrow$ Text achieves the best NL performance while Balanced-only performs significantly better in code generation. + +# D LLM JUDGE PROMPT AND PREAMBLE FOR WIN-RATES + +# Preamble + +You are a helpful following assistant whose goal is to select the preferred + +(least wrong) output for a given instruction. + +# Prompt + +Which of the following answers is the best one for the given instruction. + +A good answer should follow these rules: + +1) It should have correct reasoning, +2) It should answer the request in the instruction, +3) It should be factually correct and semantically comprehensible, +4) It should be grammatically correct and fluent. + +Instruction: instruction + +
Language NameProportion of total code documents
java15.54
javascript15.29
php12.46
python9.60
c-sharp8.30
typescript7.92
c6.63
cpp4.91
go3.49
ruby2.69
shell1.82
kotlin1.76
Swift1.52
Vue1.48
rust1.00
scala0.94
JSX0.83
sql0.74
dart0.72
makefile0.53
lua0.47
haskell0.45
smalltalk0.43
tex0.37
clojure0.10
+ +Table 3: Programming languages included in our version of The Stack dataset + +
Language NameProportion of total code documents
markdown54.23
yaml10.77
json9.97
html8.57
css6.86
SCSS5.84
restructuredtext2.26
TOML1.25
rmarkdown0.02
Sass0.22
+ +Table 4: Markup-style languages included in our version of The Stack dataset + +```txt +Answer (A): completion_a +Answer (B): completion_b +FIRST provide a concise comparison of the two answers which explains +which answer you prefer and why. +SECOND, on a new line, state exactly one of 'Preferred: Answer (A)' or 'Preferred: Answer (B)' to indicate your choice of preferred response. +Your response should use the format: +Comparison: +Preferred: <'Answer (A)' or 'Answer (B)'> +``` + +# E GENERATIVE WIN-RATES FOR IMPACT OF INITIALIZATION + +![](images/010a96056f4b958d13db52d5e1b4d70ef0db1a2846ea9f5553ec924432c3772b.jpg) +Figure 8: Impact of initialization on generative quality as judged by LLM-as-a-judge. + +![](images/1f4ab37c52750ad830097a2cfd0cc3e97ed46650431d831913e3ea67803dac32.jpg) + +# F EVALUATION OF 470M COOLDOWN MODELS ON GSM8K + +![](images/937a3c8cec3999d07174f00929dff46a62dc5ae9893d57e4b42b002b5eea7a9d.jpg) +Mathematical Evaluation + +![](images/2198d10d441a490dce596eb6cef60a5843dc892d8c541979cc1eb9043d2ab53d.jpg) + +text $\rightarrow$ no-code cooldown + +![](images/ed8bfb10c083e92fdfdb6076cad5798956f4b34fe94d82ba856ec5a1e41f9886.jpg) + +text $\rightarrow$ cooldown w/ code + +![](images/60c004598d25c1f7ce4d49f2b2dce9ad16115bea7b6b2420dd89f392de91625b.jpg) + +balanced $\rightarrow$ text $\rightarrow$ no-code cooldown + +![](images/bcdd78c6a09e68b0aa568fb31013bf1df36f16e3ac2d7d2ab259b146a97c8ece.jpg) +Figure 9: Evaluation of 470M cooldown models on GSM8K Including code in any stage of the pre-training improves performance compared to the model where no code has been seen in any of the training stages: pre-training, continual pre-training and cooldown. The most performant model in this comparison has seen code in all stages including cooldown where it leads a significant improvement (from 2.9 to 4.12, +42% relative gain). + +balanced $\rightarrow$ text $\rightarrow$ cooldown w/ code \ No newline at end of file diff --git a/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/images.zip b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..259f713f829fcb7b97da861dc86f173e736e0737 --- /dev/null +++ b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d39af3ce46b37253ee1aee3532681174c4697c1af494a4244235381cff0a5a43 +size 452604 diff --git a/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/layout.json b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3cbe7c0570554148ecf03ea360f6f7b9b079d6ab --- /dev/null +++ b/2025/To Code or Not To Code_ Exploring Impact of Code in Pre-training/layout.json @@ -0,0 +1,13932 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 463, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 463, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 463, + 118 + ], + "type": "text", + "content": "TO CODE, OR NOT TO CODE? EXPLORING IMPACT OF CODE IN PRE-TRAINING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 134, + 430, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 134, + 430, + 171 + ], + "spans": [ + { + "bbox": [ + 110, + 134, + 430, + 171 + ], + "type": "text", + "content": "Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, Sara Hooker {viraat, ahmetustun, sarahooker}@cohere.com" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 276, + 198, + 335, + 210 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 198, + 335, + 210 + ], + "spans": [ + { + "bbox": [ + 276, + 198, + 335, + 210 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "spans": [ + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "text", + "content": "Including code in the pre-training data mixture, even for models not specifically designed for code, has become a common practice in LLMs pre-training. While there has been anecdotal consensus among practitioners that code data plays a vital role in general LLMs' performance, there is only limited work analyzing the precise impact of code on non-code tasks. In this work, we systematically investigate the impact of code data on general performance. We ask \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation\". We conduct extensive ablations and evaluate across a broad range of natural language reasoning tasks, world knowledge tasks, code benchmarks, and LLM-as-a-judge win-rates for models with sizes ranging from 470M to 2.8B parameters. Across settings, we find a consistent results that code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact across all tasks. In particular, compared to text-only pre-training, the addition of code results in up to relative increase of " + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "inline_equation", + "content": "8.2\\%" + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "text", + "content": " in natural language (NL) reasoning, " + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "inline_equation", + "content": "4.2\\%" + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "text", + "content": " in world knowledge, " + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "inline_equation", + "content": "6.6\\%" + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "text", + "content": " improvement in generative win-rates, and a " + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "inline_equation", + "content": "12x" + }, + { + "bbox": [ + 140, + 226, + 471, + 426 + ], + "type": "text", + "content": " boost in code performance respectively. Our work suggests investments in code quality and preserving code during pre-training have positive impacts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 451, + 208, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 451, + 208, + 464 + ], + "spans": [ + { + "bbox": [ + 106, + 451, + 208, + 464 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": "The role of data has taken on critical significance in recent breakthroughs. State-of-the-art models highlight the importance of the pre-training data mixture, diversity of data sources (Brown et al., 2020; Longpre et al., 2023; Singh et al., 2024) combined with compute availability as key drivers on performance (Dubey et al., 2024; Üstün et al., 2024; Team et al., 2023; Aryabumi et al., 2024). A critical question is what properties of data impart the best general performance?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 539, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 506, + 608 + ], + "type": "text", + "content": "Perhaps surprisingly, code is often included in pre-training even if a model is not explicitly intended to generate high-quality code. Code datasets differ significantly in terms of structure and textural characteristics from high-quality web datasets (Wikimedia; Raffel et al., 2019). Despite this, several previous generations of LLMs like PaLM (Chowdhery et al., 2022), Gopher (Rae et al., 2022) and Bloom (Workshop et al., 2023) that were not explicitly intended to support code generation, included code data together with high-quality natural language data in their pre-training mixture." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 611, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 507, + 734 + ], + "type": "text", + "content": "In current state-of-the-art models, it is an accepted norm to not only include code data but further increase the proportion – for instance, Llama 3 (Dubey et al., 2024) has four times more code data in proportion (17%), of its pre-training mixture than Llama 2 (4.5%) (Touvron et al., 2023). While there has been consensus anecdotally among practitioners that code data plays a vital role in LLMs' performance, there has been only limited work analyzing the precise impact of code on non-code tasks. Prior work shows particular side benefits of the inclusion of code data, such as impact on scaling in limited data regime (Muennighoff et al., 2023a), entity tracking capabilities (Kim et al., 2024), and mathematical reasoning (Razeghi et al.). However, there has been no exhaustive study to date that systematically investigates the impact of code data on general performance. In this work, we ask \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation?\"" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": "We embark on an exhaustive set of large-scale controlled pre-training experiments. This includes a consideration of where in the training process adding code is beneficial, code proportions, the role of scaling, and the quality and properties of code added. While a costly endeavor to perform these ablations in a rigorous way, we find consistent and valuable results that code provides critical improvements to non-code performance. In particular, compared to text-only pre-training, for our best variant, the addition of code results in relative increase of " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "8.2\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " in natural language (NL) reasoning, " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "4.2\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " in world knowledge, " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "6.6\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves NL reasoning by " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "3.7\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": ", World knowledge by " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "6.8\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": ", and code by " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": ", relative to cooldown without code and leads to a " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "4.1\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " additional win-rate increase." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 287 + ], + "type": "text", + "content": "Here, several factors matter including getting the proportion of code correct, improving the quality of code by including synthetic code and code adjacent data such as commits, and leveraging code across multiple stages of training including during cooldown. Our results suggest code is a critical building block for generalization far beyond coding tasks and improvements to code quality have an outsized impact on performance. We conduct an extensive evaluation on a broad range of benchmarks, which cover world knowledge tasks, natural language reasoning, and code generation, as well as LLM-as-a-judge win-rates. Across experiments on models ranging from 470 million to 2.8 billion parameter models, we find the following detailed results:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 294, + 505, + 498 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "text", + "content": "1. Code provides critical improvements to non-code performance. Initialization with code pretrained models results in improved performance for natural language tasks. In particular, compared to text-only pre-training, for our best variant, the addition of code results in a relative increase of " + }, + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "inline_equation", + "content": "8.2\\%" + }, + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "text", + "content": " in NL reasoning, " + }, + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "inline_equation", + "content": "4.2\\%" + }, + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "text", + "content": " in world knowledge, " + }, + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "inline_equation", + "content": "6.6\\%" + }, + { + "bbox": [ + 105, + 294, + 504, + 350 + ], + "type": "text", + "content": " improvement in generative win-rates, and a 12x boost in code performance respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "text", + "content": "2. Code quality and properties matter. Using markup-style programming languages, code-adjacent datasets such as GitHub commits and synthetically generated code improves the performance in pre-training. In particular, training on a higher quality synthetically generated code dataset results in a " + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "inline_equation", + "content": "44\\%" + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "text", + "content": " increase in natural language reasoning and code performance, respectively, compared to web-based code data in pre-training. Additionally, continual pre-training from a code model that includes synthetic data results in " + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "inline_equation", + "content": "1.9\\%" + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "inline_equation", + "content": "41\\%" + }, + { + "bbox": [ + 104, + 352, + 505, + 441 + ], + "type": "text", + "content": " relative increases in natural language reasoning and code performance respectively, compared to initialization from a code model that does not include code data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "text", + "content": "3. Code in cooldown enables further improvement across all tasks. Including code data in pretraining cooldown, where high-quality datasets are up-weighted, leads to an increase of " + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "inline_equation", + "content": "3.6\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "text", + "content": " in NL reasoning, " + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "inline_equation", + "content": "10.1\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "text", + "content": " in world knowledge, and " + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "text", + "content": " in code performance relative to no cooldown. More significantly, cooldown with code beats the baseline (no cooldown) by " + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "inline_equation", + "content": "52.3\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "text", + "content": " win-rates, where win-rates are " + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "inline_equation", + "content": "4.1\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 498 + ], + "type": "text", + "content": " higher compared to cooldown without code." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 514, + 211, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 211, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 211, + 526 + ], + "type": "text", + "content": "2 METHODOLOGY" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 537, + 504, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 504, + 572 + ], + "type": "text", + "content": "We describe the details of our Pre-training Data (§ 2.1), Evaluation (§ 2.2), Training and Model details (§ 2.3). Figure 1 shows the high-level experimental framework. Precise details for each experiment and their results are presented in Section 3." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 584, + 222, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 222, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 222, + 595 + ], + "type": "text", + "content": "2.1 PRE-TRAINING DATA" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 605, + 506, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 506, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 506, + 694 + ], + "type": "text", + "content": "In this section, we describe the details of our pre-training and cooldown datasets. We aim to evaluate the role of code in pre-training, following current state-of-art practices. Hence, we consider pretraining runs that consist of two phases: 1) continued pretraining and 2) cooldown. Continued pre-training refers to training a model that is initialized from a pre-trained model and trained for a fixed token budget. cooldown (Team et al., 2023; Parmar et al., 2024) involves up-weighting high-quality datasets and annealing the learning rate for a relatively small number of tokens during the final stages of training. This up-weighting of high-quality datasets for a smaller amount of steps at the end of training can significantly boost model quality." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Text dataset. We use the SlimPajama pre-training corpus (Soboleva et al., 2023) as our source of natural language text data. SlimPajama is a de-duplicated, quality-filtered, multi-corpora, open-source dataset based on RedPajama-1.2T (Computer, 2023). SlimPajama consists of documents" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 152, + 86, + 459, + 257 + ], + "blocks": [ + { + "bbox": [ + 152, + 86, + 459, + 257 + ], + "lines": [ + { + "bbox": [ + 152, + 86, + 459, + 257 + ], + "spans": [ + { + "bbox": [ + 152, + 86, + 459, + 257 + ], + "type": "image", + "image_path": "a4396e1682e56163e5de9733298ea2cb77f96523c6831e11de756216bfe5ae6c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 268, + 506, + 324 + ], + "lines": [ + { + "bbox": [ + 104, + 268, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 104, + 268, + 506, + 324 + ], + "type": "text", + "content": "Figure 1: Overview of our experimental framework: We exhaustively evaluate the impact of code by varying: 1) the proportion of code in pre-training, 2) code quality and properties, 3) model initialization, 4) model scale, and 5) stage of training at which code is introduced. We evaluate the resulting model on a wide-ranging set of tasks, including natural language reasoning, world knowledge, code, and open-ended generations." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 345, + 504, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 504, + 390 + ], + "type": "text", + "content": "from CommonCrawl, C4, GitHub, Books, ArXiv, Wikipedia, and StackExchange. We filter out all documents from GitHub and StackExchange to remove code and code-adjacent data sources and ensure this is a text-only source. SlimPajama has a total of 627B tokens. After removing all code sources, this results in our text pre-training corpus with a total of 503B tokens." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 504, + 418 + ], + "type": "text", + "content": "Code datasets. To explore the impact of different properties of code data, we use multiple sources of code in our experiments:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 428, + 504, + 627 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 132, + 428, + 504, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 428, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 132, + 428, + 504, + 483 + ], + "type": "text", + "content": "- WEB-BASED CODE DATA: For our main source of code data, we start with the Stack dataset (Kocetkov et al., 2022) that was used to train StarCoder (Li et al., 2023a). The Stack consists of permissively licensed code data scraped from GitHub. We apply quality filters1 and restrict to the top 25 programming languages based on document count2. After all filtering steps, the size of the code-only and markup subset is 139B tokens." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 487, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 487, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 132, + 487, + 504, + 510 + ], + "type": "text", + "content": "- MARKDOWN DATA We also separately process markup-style languages such as Markdown, CSS, and HTML. After all filtering steps, the size of this markup subset is 180B tokens." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 513, + 504, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 513, + 504, + 557 + ], + "spans": [ + { + "bbox": [ + 132, + 513, + 504, + 557 + ], + "type": "text", + "content": "- SYNTHETIC CODE DATA: To ablate the quality of the code dataset, we use a proprietary synthetically generated code dataset that consists of Python programming problems that have been formally verified. We treat this as a high-quality source of code data (See the details in § 3.4). The final synthetic dataset consists of 3.2B code tokens." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 560, + 504, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 560, + 504, + 627 + ], + "spans": [ + { + "bbox": [ + 132, + 560, + 504, + 627 + ], + "type": "text", + "content": "- CODE ADJACENT DATA: Finally, to explore different properties of code data, we include a version of the code data which includes auxiliary data such as GitHub commits, jupyter notebooks, StackExchange threads. For GitHub commits, and jupyter notebooks we use the datasets provided as part of the Stack (Kocetkov et al., 2022). We use the version of StackExchange that is part of SlimPajama (Soboleva et al., 2023). In total we have 21.4B tokens of code-adjacent data." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 637, + 504, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 637, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 637, + 504, + 682 + ], + "type": "text", + "content": "Pre-training cooldown datasets. Cooldown involves up-weighting higher quality datasets for the final steps of pre-training and has been found to improve performance on downstream tasks (Parmar et al., 2024; Team et al., 2023), in particular to impart instruction-following capabilities. We choose a cooldown mixture comprising high-quality text, math, code, and instruct-style text datasets." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 710, + 302, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 710, + 302, + 720 + ], + "spans": [ + { + "bbox": [ + 119, + 710, + 302, + 720 + ], + "type": "text", + "content": "See Appendix C.1 for details about quality filters" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 720, + 476, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 720, + 476, + 732 + ], + "spans": [ + { + "bbox": [ + 119, + 720, + 476, + 732 + ], + "type": "text", + "content": "2Refer to Appendix C.2, C.3 for the full list of programming and markup-style languages included" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 188, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 188, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 188, + 94 + ], + "type": "text", + "content": "2.2 EVALUATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 170 + ], + "type": "text", + "content": "Our goal is to systematically understand the impact of code on general performance, which requires a broad evaluation suite that extends to a large variety of downstream tasks beyond code generation. To achieve this, we evaluate models on benchmarks that are reasonable proxies for model ability on 1) world knowledge, 2) natural language reasoning, and 3) code performance. In addition, we report win-rates as evaluated by an LLM-as-a-judge. Table 1 (Appendix A) shows the full evaluation suite and their respective grouping, along with the metric used." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 175, + 506, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 275 + ], + "type": "text", + "content": "For World knowledge, we use benchmarks to measure knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. Natural language reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering, natural language inference (NLI), sentence completion, co-reference resolution, and general intelligence. We include the full list of the constituent benchmarks with references in Table 1. Finally, while our main focus is general performance, we also want to measure any changes to code generation performance. For Code benchmarks, we focus on the function completion task where we use HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "content": "We evaluate performance at two scales: 470M to 2.8B parameter models. At 470M scale, model capabilities are limited, thus to ensure fair comparisons, we only compare benchmarks for which all models achieve scores above random similar to Muennighoff et al. (2023a); Lozhkov et al. (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 318, + 505, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 318, + 505, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 318, + 505, + 429 + ], + "type": "text", + "content": "LLM-as-a-judge win-rates. In addition to task-specific discriminative performance, to allow for a more holistic view across all performance measures, we also evaluate generative performance using LLM-as-a-judge win-rates. This is particularly valuable given recent work that has shown that as performance on open-ended generations improves, there is deterioration in traditional academic tasks (Ustun et al., 2024; Ouyang et al., 2022; Iyer et al., 2022; Muennighoff et al., 2023c). The use of LLMs-as-a-Judge benchmarks (Fu et al., 2023; Liu et al., 2023; Chiang & yi Lee, 2023; Shimabucoro et al., 2024) has gained traction as an alternative to performing human evaluation, which tends to be laborious and expensive (Wang et al., 2023; Boubdir et al., 2023). LLMs as evaluators compare two completions based upon detailed prompts and are reasonable proxies aligned with human preference (Ustun et al., 2024; Dubois et al., 2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "content": "We use the Dolly-200 English dataset (Ustun et al., 2024; Singh et al., 2024), which consists of 200 hand-picked examples from the Dolly-15K dataset (Conover et al., 2023). These prompts are open-ended and capture general-purpose non-code use cases making them a valuable proxy for how code impacts more fluid and often open-ended tasks. For our win-rate evaluations, we use Command-" + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "inline_equation", + "content": "R^{+3}" + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "content": " as the LLM judge. Details about the prompt are provided in Appendix D." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 502, + 272, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 272, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 272, + 513 + ], + "type": "text", + "content": "2.3 TRAINING AND MODEL DETAILS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 523, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 504, + 590 + ], + "type": "text", + "content": "We use 470M and 2.8B parameters decoder-only auto-regressive Transformer models (Radford et al., 2019) that are trained with a standard language modeling objective. We use parallel attention layers, (Chowdhery et al., 2022; Wang & Komatsuzaki, 2021), SwiGLU activation (Shazeer, 2020), no biases in dense layers, and a byte-pair-encoding tokenizer with a vocabulary size of 256K. All models are pre-trained using AdamW (Loshchilov & Hutter, 2019) with a max sequence length of 8192, batch size of 512 and a cosine LR schedule with a warmup of 1325 steps." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 594, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 651 + ], + "type": "text", + "content": "Infrastructure. We use TPU v5e chips (Jouppi et al., 2017) for training and evaluation. All models are trained using Jax (Bradbury et al., 2018) framework. We pre-train 64 models in total. This is an enormous endeavour given the scale and computational resources required. Each pre-training run for 200B tokens takes 4736 TPU-chip hours for 470M and 13824 TPU-chip-hours for 2.8B parameter models. Each cooldown run for 40B tokens takes 1024 TPU-chip hours for 470M models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 666, + 265, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 265, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 265, + 678 + ], + "type": "text", + "content": "3 RESULTS AND DISCUSSION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 691, + 505, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 691, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 505, + 715 + ], + "type": "text", + "content": "In this section, we will report descriptions and results for each experimental variants. We systematically investigate, (1) initializing an LLM with code pre-trained models (§ 3.1), and (2) the impact of" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 720, + 337, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 720, + 337, + 732 + ], + "spans": [ + { + "bbox": [ + 119, + 720, + 337, + 732 + ], + "type": "text", + "content": "3https://huggingface.co/CohereForAI/c4ai-command-r-plus" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 82, + 473, + 209 + ], + "blocks": [ + { + "bbox": [ + 136, + 82, + 473, + 209 + ], + "lines": [ + { + "bbox": [ + 136, + 82, + 473, + 209 + ], + "spans": [ + { + "bbox": [ + 136, + 82, + 473, + 209 + ], + "type": "image", + "image_path": "2c8f69e3bc02422673769f7929c8ccbc81d18d4666c5b42ed23fd45dafe7fa69.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "lines": [ + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "content": "Figure 2: Impact of initialization using code pre-trained models: Initializing model training with code pre-trained models improves reasoning and code generation compared to text-only models, where the improvement is the most when continued pre-training with high percentage text (Balanced " + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "content": " Text, Code " + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "content": " Text). Note that these variants are designed to isolate the role of initialization, so do not include cooldown." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 295, + 504, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 331 + ], + "type": "text", + "content": "model scale (§ 3.2), (3) varying proportion of code in pre-training data (§ 3.3), (4) quality and properties of the code data (§ 3.4), (5) code data in pre-training cooldown (§ 3.5). Finally, we compare the resulting pre-training recipes (§ 3.6). Figure 1 shows the key levers of our experimental design." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 342, + 384, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 384, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 384, + 354 + ], + "type": "text", + "content": "3.1 INITIALIZING AN LLM WITH CODE PRE-TRAINED MODELS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 362, + 504, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 362, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 362, + 504, + 397 + ], + "type": "text", + "content": "We explore different initializations of pre-trained models to understand if using an LM with a large portion of code data as initialization improves the performance. These key ablations, along with their token counts, are summarized in Table 2. We briefly describe below:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 406, + 504, + 528 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 132, + 406, + 504, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 406, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 132, + 406, + 504, + 428 + ], + "type": "text", + "content": "- Text LM (TEXT-ONLY BASELINE): Pre-trained model from scratch using glorot-normal initialization (Glorot et al., 2011) on the text-only data for 400B tokens." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 431, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 431, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 132, + 431, + 504, + 453 + ], + "type": "text", + "content": "- Balanced LM (BALANCED-ONLY): A model is trained with an equal ratio of code and text data (50% text and 50% code) in pre-training for 400B tokens." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 457, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 457, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 457, + 504, + 479 + ], + "type": "text", + "content": "- Balance-initialized Text LM (BALANCED " + }, + { + "bbox": [ + 132, + 457, + 504, + 479 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 132, + 457, + 504, + 479 + ], + "type": "text", + "content": " TEXT): This model is initialized with a balanced LM (50% text and 50% code) and further pre-trained using text data for 200B tokens." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "spans": [ + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "text", + "content": "- Code-initialized Text LM (CODE " + }, + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "text", + "content": " TEXT): Different from other variants, this model is initialized with a code-LM which is pre-trained on a code dataset for 200B tokens. The code dataset contains a mixture of " + }, + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "text", + "content": " code data and " + }, + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 132, + 483, + 504, + 528 + ], + "type": "text", + "content": " markup-style code data. We then continually pre-train this model on text for another 200B tokens.4" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": "Natural Language Reasoning As seen in Figure 2, initializing with " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " code pre-trained model (code " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " text) has the best performance for NL Reasoning benchmarks, and is closely followed by the balanced " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " text model. The code " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " text model and balanced " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " text model beat the text-only baseline on NL reasoning tasks by " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "8.8\\%" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "8.2\\%" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " relative improvement respectively. The balanced-only model improves upon the baseline by " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "3.2\\%" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": ". This shows that initialization from a pre-trained model with a mix of code has a strong positive effect on NL reasoning tasks. Further using a text mix with a small percentage of code for continual pre-training results in the best performance as evidenced by both the code " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " text and balanced " + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 536, + 505, + 625 + ], + "type": "text", + "content": " text models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "text", + "content": "World Knowledge For World Knowledge tasks, we see that the balanced " + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "text", + "content": " text model has the best performance over all other variants, beating the code " + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "text", + "content": " text by " + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "inline_equation", + "content": "21\\%" + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "text", + "content": " and text-only by " + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "inline_equation", + "content": "4.1\\%" + }, + { + "bbox": [ + 104, + 631, + 504, + 698 + ], + "type": "text", + "content": " relative improvement. This suggests that performance on world knowledge tasks depends on a more balanced data mixture for initialization and a larger proportion of text in the continual pretraining stage. Overall, code data is still beneficial compared to text-only pre-training for world knowledge tasks." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "text", + "content": "4We use a 10% of code in text mixture data during continual pre-training of code-initialized models (balanced→text, code→text) to avoid a full distribution shift and maintain the benefits of code." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 82, + 473, + 208 + ], + "blocks": [ + { + "bbox": [ + 138, + 82, + 473, + 208 + ], + "lines": [ + { + "bbox": [ + 138, + 82, + 473, + 208 + ], + "spans": [ + { + "bbox": [ + 138, + 82, + 473, + 208 + ], + "type": "image", + "image_path": "98ab9c458f60bc63afdf09a9f3a7c56fe711892c17265bf1505e931c5ccaaca6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "text", + "content": "Figure 3: Impact of model scale on different tasks. We observe that scale provides pronounced gains across tasks of up-to " + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "inline_equation", + "content": "2.7\\mathrm{x}" + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "text", + "content": " increase, however the overall trend remains the same across scales showing consistency of findings across model sizes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": "Trade-offs between NL tasks and code generation For code generation, balanced-only achieves the best performance, where we see a " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "46.7\\%" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "54.5\\%" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " relative improvement over balanced " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " text and code " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " text. This is expected as balanced-only includes " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " code throughout pre-training. However, this model trades off better code generation with lower performance in NL tasks. code " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " text and balanced " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " text achieves " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "2.9\\%" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "2.3\\%" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " relative increase in NL reasoning, and " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "17.3\\%" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "inline_equation", + "content": "22.2\\%" + }, + { + "bbox": [ + 104, + 270, + 504, + 350 + ], + "type": "text", + "content": " relative increase in world knowledge respectively compared to balanced-only." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "content": "Generative quality win-rates comparison Additionally, we compare the generative performance of each code variant (code " + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "content": " text and balanced-only) against the text-only model. We report win-rates and observe that the presence of code has a strong positive impact on generation quality. Both code " + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "content": " text and balanced-only) models beat the text-only variant by a " + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "inline_equation", + "content": "6.6\\%" + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "content": " difference in win-loss rates. We again note that Dolly-200-English evaluation set we use for win-rate calculation is curated to reflect open ended questions and is a non-code evaluation. This confirms that code data in the pre-training mix does not only improves reasoning but also helps the model produce better quality generations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 455, + 211, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 455, + 211, + 466 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 211, + 466 + ], + "type": "text", + "content": "3.2 IMPACT OF SCALE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": "To understand if the findings of Section 3.1 transfer to larger models, we train 2.8B parameters models with the same token budget following the same model variants at 470M scale. Figure 3 shows the results of 2.8B models in comparison with 470M results." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": "Comparison between 2.8B and 470M models Scaling model size to 2.8B enables higher performance for all model variants in all task categories, compared to 470M results. In terms of average performance across NL reasoning and world knowledge, balanced " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": " text model benefits from scaling-up by a " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "33.1\\%" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": " increase relative to the same model with 470M size. The improvement for code " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": " text and balanced-only are " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "31.7\\%" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": " relative increase." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": "We find that the improvements in NL reasoning are relatively modest with " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "5.3\\%" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "9.2\\%" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "5.2\\%" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " relative gains for balanced " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " text, code " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " text, and balanced-only respectively. However, world knowledge and code performance nearly triples for all the model variants. In particular, 2.8B balanced " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " text results increase by " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "2.7\\mathrm{x}" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " in world knowledge and " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "2.5\\mathrm{x}" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " in code evaluation compared to 470M." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "content": "Trends between model variants in 2.8B Notably, in terms of initialization with code pre-trained models, the same trends seen in 470M parameter scale hold at 2.8B models. code " + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "content": " text and balanced " + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "content": " text models improve over balanced models by " + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "inline_equation", + "content": "6.9\\%" + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "inline_equation", + "content": "6.1\\%" + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "content": " relative gain, however, fall significantly behind in code generation performance with " + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "inline_equation", + "content": "43.1\\%" + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "inline_equation", + "content": "46.3\\%" + }, + { + "bbox": [ + 104, + 635, + 505, + 715 + ], + "type": "text", + "content": " relative drop. These results show that the trade-off between NL tasks and code generation increases with the model size. Overall our experiments scaling to a larger size shows that our results hold and are consistent with the trends we observe at 470M parameter ablations." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 720, + 387, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 720, + 387, + 732 + ], + "spans": [ + { + "bbox": [ + 119, + 720, + 387, + 732 + ], + "type": "text", + "content": "We include the extended Win-rates for these experiments in Appendix E." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 81, + 257, + 167 + ], + "blocks": [ + { + "bbox": [ + 137, + 81, + 257, + 167 + ], + "lines": [ + { + "bbox": [ + 137, + 81, + 257, + 167 + ], + "spans": [ + { + "bbox": [ + 137, + 81, + 257, + 167 + ], + "type": "image", + "image_path": "7e24086b373a64314a1087e59d1623c6a838d3d57c9e2acc40df3cf0273d96bd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 258, + 81, + 377, + 167 + ], + "blocks": [ + { + "bbox": [ + 258, + 81, + 377, + 167 + ], + "lines": [ + { + "bbox": [ + 258, + 81, + 377, + 167 + ], + "spans": [ + { + "bbox": [ + 258, + 81, + 377, + 167 + ], + "type": "image", + "image_path": "45f86072ce3adbc7f66d6a67edf2070b009c704bf42dfa4648e58333587db907.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 266, + 167, + 364, + 175 + ], + "lines": [ + { + "bbox": [ + 266, + 167, + 364, + 175 + ], + "spans": [ + { + "bbox": [ + 266, + 167, + 364, + 175 + ], + "type": "text", + "content": "Increasing Proportion of Code " + }, + { + "bbox": [ + 266, + 167, + 364, + 175 + ], + "type": "inline_equation", + "content": "\\rightarrow" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 185, + 504, + 240 + ], + "lines": [ + { + "bbox": [ + 104, + 185, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 504, + 240 + ], + "type": "text", + "content": "Figure 4: Impact of the proportion of code in pre-training for different tasks: We observe that as the code proportion of pre-training increases, the performance on code tasks increases linearly. In contrast, there is more sensitivity for NL reasoning and World knowledge tasks and an optimal range of code proportions where benefits are most tangible. Model size is 470M parameters and trained for 200B tokens." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 378, + 81, + 473, + 167 + ], + "blocks": [ + { + "bbox": [ + 378, + 81, + 473, + 167 + ], + "lines": [ + { + "bbox": [ + 378, + 81, + 473, + 167 + ], + "spans": [ + { + "bbox": [ + 378, + 81, + 473, + 167 + ], + "type": "image", + "image_path": "0391f65bb231307d946996aa142617c4ed192f040db1142b8318406c3172f3b3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 268, + 318, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 268, + 318, + 278 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 318, + 278 + ], + "type": "text", + "content": "3.3 CODE DATA PROPORTION IN PRE-TRAINING" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "content": "In these experiments, we ablate the proportions of code data in the pre-training mixture to understand what is the optimal amount of code to maximize performance on non-code tasks. Here, we focus on the first phase of pre-training with random initialization. We train six models for 200B tokens with increasing code proportions: " + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "inline_equation", + "content": "0\\%" + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 291, + 504, + 357 + ], + "type": "text", + "content": ". The remaining proportion is filled with text data. For each variant, we train a new model independently in order to carefully ablate the impact of varying code proportions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 362, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 362, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 362, + 504, + 430 + ], + "type": "text", + "content": "Natural Language Reasoning and World Knowledge For NL Reasoning, as the amount of code increases, in Figure 4 we see an increase in performance compared to a text-only (0% code) model. The best performance is from a model with 25% code and 75% text, with a 3.4% relative improvement over a model with 0% code. While performance is maintained up to 75% code, it starts to rapidly erode at higher proportions with a sharp relative drop of 18.3% when the model is trained on 100% code compared to a model with no code." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "content": "For World Knowledge tasks, we see an inverse relationship with increasing the amount of code. As seen in Figure 4 middle inset, there is a slight relative drop of " + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "inline_equation", + "content": "3.4\\%" + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "content": " code and this relative drop worsens to " + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "inline_equation", + "content": "31\\%" + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 104, + 434, + 504, + 491 + ], + "type": "text", + "content": " code compared to the no-code model. The fully code model (100% code) is unable to perform in world knowledge task (86% drop relative to text-only) as there are no data sources to acquire the required knowledge in the pre-training mix." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "text", + "content": "Performance on Code For code evaluation, there is a linear increase in performance as the amount of code increases, with the best model being a code-only model. As observable in Figure 4 right inset, the " + }, + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "text", + "content": " code leads to a 2.6x increase in the code benchmarks compared to the " + }, + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "text", + "content": " code model. As expected, for the model with " + }, + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "inline_equation", + "content": "0\\%" + }, + { + "bbox": [ + 104, + 495, + 504, + 540 + ], + "type": "text", + "content": " code, the average pass@1 score drops to 0." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 559, + 467, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 559, + 467, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 467, + 571 + ], + "type": "text", + "content": "3.4 INFLUENCE OF CODE QUALITY AND PROPERTIES ON GENERAL PERFORMANCE" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 582, + 504, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 639 + ], + "type": "text", + "content": "In this section, we investigate the properties of code data by varying its quality and composition. We study this firstly (a) from the perspective of training from scratch, as we want to isolate the exact effects of different properties of code data. Secondly (b), we incorporate the best variant of the code data (high-quality synthetic code), in our continual pre-training experiments to see if the impact of the code quality transfer. We report performance on NL reasoning and Code tasks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": "We study the effect of the following properties: (1) MARKUP-STYLE DATA: we separate markup-style programming languages (" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": " 2.1) from the rest of web-based code (Appendix C.3). We replace " + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": " of code-only tokens with markup-style tokens. (2) CODE ADJACENT DATA: Instead of using purely web-based code data, we replaced " + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": " percentage of code tokens with code-adjacent datasets - GitHub issues (" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": "), StackExchange (" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": ") and Jupyter Notebooks (" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": "), resulting in a code-adjacent model. (3) CODE QUALITY: To control the quality of the code, we replaced " + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": " of existing code tokens with a synthetically generated high-quality code dataset. The remaining proportions of web-based code data are kept the same, resulting in a code-synth model." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 82, + 324, + 177 + ], + "blocks": [ + { + "bbox": [ + 113, + 82, + 324, + 177 + ], + "lines": [ + { + "bbox": [ + 113, + 82, + 324, + 177 + ], + "spans": [ + { + "bbox": [ + 113, + 82, + 324, + 177 + ], + "type": "image", + "image_path": "bfe1105936533411e23b23642a672fa16cffe114e17aca190b7e14798646b3d1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 171, + 182, + 269, + 194 + ], + "lines": [ + { + "bbox": [ + 171, + 182, + 269, + 194 + ], + "spans": [ + { + "bbox": [ + 171, + 182, + 269, + 194 + ], + "type": "text", + "content": "(a) Code-only Pre-training" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 329, + 81, + 498, + 177 + ], + "blocks": [ + { + "bbox": [ + 329, + 81, + 498, + 177 + ], + "lines": [ + { + "bbox": [ + 329, + 81, + 498, + 177 + ], + "spans": [ + { + "bbox": [ + 329, + 81, + 498, + 177 + ], + "type": "image", + "image_path": "697f623b23d26d3d291cb46d4ef3c5bdb5b3f1be7f8936b1aeba94d76a428934.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 365, + 182, + 461, + 194 + ], + "lines": [ + { + "bbox": [ + 365, + 182, + 461, + 194 + ], + "spans": [ + { + "bbox": [ + 365, + 182, + 461, + 194 + ], + "type": "text", + "content": "(b)Continual Pre-training" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 202, + 504, + 247 + ], + "lines": [ + { + "bbox": [ + 104, + 202, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 504, + 247 + ], + "type": "text", + "content": "Figure 5: Impact of using different properties of code data: (a) As the most impactful code data source, synthetically generated high-quality code improves NL reasoning and code performance for code pre-training. (b) These improvements with synthetically generated high-quality code data also transfer the continual pre-training setting. All models are of size 470M parameters." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "type": "text", + "content": "Code-only pre-training We compare the above variants to a model that is trained only on web-based code data (code) from the stack dataset (Kocetkov et al., 2022), which forms our baseline model. All the variants are pre-trained using the same amount of tokens (200B) for fair comparison." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": "In Figure 5a, we evaluate the impact of code quality and code composition. We observe that across all variants, including diverse code sources and also synthetic code leads to gains in natural language performance relative to code, however, only synthetically generated code improves the code benchmarks. We relate this to our code evaluation where we measure performance in python, thus different programming languages or code-adjacent data slightly decrease the results. Here, code+markup and code+adjacent leads to " + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "inline_equation", + "content": "2.8\\%" + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "inline_equation", + "content": "6.3\\%" + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": " relative improvement in NL reasoning compared to code (web-code-only), but cause " + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "inline_equation", + "content": "15.7\\%" + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "inline_equation", + "content": "9.4\\%" + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": " drop in code evaluation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "content": "Our synthetic code data (code+synth) is the most impactful ablation. It is particularly impressive given its relatively small share of the overall dataset. Despite a small weighting of " + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "content": ", the inclusion of synthetic data leads to relative improvements of " + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "content": " on NL reasoning, and " + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "inline_equation", + "content": "44.9\\%" + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "content": " on code benchmarks compared to the baseline of web-code-only. We note that the lifts observed for synthetic data are even more impressive given the limited amount of synthetic data available compared to code-adjacent data (3.2B tokens vs 21.4B tokens) or code+markup data (3.2B tokens vs 40B tokens), and the weighting during pre-training allocation (" + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "content": " vs " + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "content": " vs " + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 392, + 504, + 491 + ], + "type": "text", + "content": " for synthetic data, code-adjacent, code-markup respectively). This suggests a key future lever of improvement in increasing the proportion of such high-quality code data sources." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "text", + "content": "Continual pre-training Here, based on the findings from code-only pre-training, we incorporated code + synth into our best continual pre-training variant (balanced + synth " + }, + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "text", + "content": " text). We compare this against the same variant without synthetic code data (balanced " + }, + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "text", + "content": " text) to evaluate the benefits of synthetic data. We use the same amount of code and text tokens in these experiments." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 545, + 504, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 545, + 504, + 591 + ], + "spans": [ + { + "bbox": [ + 104, + 545, + 504, + 591 + ], + "type": "text", + "content": "As shown in Figure 5b, balanced+synth→text achieves " + }, + { + "bbox": [ + 104, + 545, + 504, + 591 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 104, + 545, + 504, + 591 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 545, + 504, + 591 + ], + "type": "inline_equation", + "content": "35\\%" + }, + { + "bbox": [ + 104, + 545, + 504, + 591 + ], + "type": "text", + "content": " relative improvement over balanced→text in NL reasoning and code, respectively. This further confirms that even a small percentage of a high-quality code data, not only improves performance in code pre-training but also increases code and non-code performance after continual pre-training with text data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 605, + 289, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 289, + 616 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 289, + 616 + ], + "type": "text", + "content": "3.5 CODE IN PRE-TRAINING COOLDOWN" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "text", + "content": "In this section, we evaluate the impact of code at the final stage of pre-training. Here, we consider cooldown, where we up-weight high-quality text, math, code, and instruct-style datasets. We change the learning rate schedule from cosine-based to linear annealing with a final learning rate of " + }, + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "inline_equation", + "content": "1e - 6" + }, + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "text", + "content": ". We evaluate the impact of code in cooldown by comparing 3 models: a pre-trained model before cooldown, cooldown without code data, and cooldown with " + }, + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "text", + "content": " code data. For our pre-trained model, we use balanced-text as it is our best pre-trained variant. We preserve the same token budget across variants - 40B tokens which is " + }, + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 627, + 504, + 704 + ], + "type": "text", + "content": " of the token budget of the pre-trained model." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "content": "Impact of code used during cooldown in different tasks In Figure 6a, we evaluate the impact of code in cooldown on model performance in NL Reasoning, world knowledge, and code benchmarks." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 81, + 321, + 162 + ], + "blocks": [ + { + "bbox": [ + 108, + 81, + 321, + 162 + ], + "lines": [ + { + "bbox": [ + 108, + 81, + 321, + 162 + ], + "spans": [ + { + "bbox": [ + 108, + 81, + 321, + 162 + ], + "type": "image", + "image_path": "da623e2c7e25504e3b4dff71e5b5195640f97b335a0d0bcf47f61048a5f7f540.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 167, + 259, + 178 + ], + "lines": [ + { + "bbox": [ + 179, + 167, + 259, + 178 + ], + "spans": [ + { + "bbox": [ + 179, + 167, + 259, + 178 + ], + "type": "text", + "content": "(a) Downstream tasks" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 331, + 84, + 496, + 161 + ], + "blocks": [ + { + "bbox": [ + 331, + 84, + 496, + 161 + ], + "lines": [ + { + "bbox": [ + 331, + 84, + 496, + 161 + ], + "spans": [ + { + "bbox": [ + 331, + 84, + 496, + 161 + ], + "type": "image", + "image_path": "bea05cec28c33b6ca910fd2883b14f385de8c7ef47fcf40a189caea3262e1abb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 372, + 167, + 462, + 178 + ], + "lines": [ + { + "bbox": [ + 372, + 167, + 462, + 178 + ], + "spans": [ + { + "bbox": [ + 372, + 167, + 462, + 178 + ], + "type": "text", + "content": "(b) Generative win-rates" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 187, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 104, + 187, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 187, + 504, + 232 + ], + "type": "text", + "content": "Figure 6: Impact of code data in pre-training cooldown: Including code data in the cooldown phase improves downstream relative to cooldown with no code. All cooldown variants benefit for downstream tasks and especially generative quality. We find the largest gains from cooldown with code, with the highest win-rates of " + }, + { + "bbox": [ + 104, + 187, + 504, + 232 + ], + "type": "inline_equation", + "content": "52.3\\%" + }, + { + "bbox": [ + 104, + 187, + 504, + 232 + ], + "type": "text", + "content": " over a model with no cooldown." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "text", + "content": "Across tasks, we find that a cooldown with code data is most beneficial with " + }, + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "inline_equation", + "content": "3.6\\%" + }, + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "inline_equation", + "content": "10.1\\%" + }, + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 253, + 504, + 276 + ], + "type": "text", + "content": " in NL reasoning, world knowledge, and code relative to the model without cooldown." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 281, + 504, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 504, + 315 + ], + "type": "text", + "content": "In contrast, we find that cooldown without code does not provide any increases for both NL reasoning and Code, while providing a relative improvement of " + }, + { + "bbox": [ + 104, + 281, + 504, + 315 + ], + "type": "inline_equation", + "content": "3.1\\%" + }, + { + "bbox": [ + 104, + 281, + 504, + 315 + ], + "type": "text", + "content": " in World Knowledge tasks compared to no cooldown, showing the critical role of code data in also cooldown phase of pre-training." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 319, + 504, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 397 + ], + "type": "text", + "content": "Generative win-rates after cooldown As expected, cooldown has a significant impact on generative performance as measured by win-rates (seen in Figure 6b). This is because we up-weight high-quality data sources in pre-training mix including instruction-style datasets such as Dolly v2 (Conover et al., 2023). Both cooldown variants (cooldown w/o code, cooldown w/ code) beat no-cooldown model by large win-rates (48.2% and 52.3%) as seen in Figure 6b. Comparing the cooldown variants, including code leads an additional 4.1% generative win-rates against no-cooldown compared to cooldown without code." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 411, + 291, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 291, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 291, + 421 + ], + "type": "text", + "content": "3.6 COMPARING PRE-TRAINING RECIPES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 430, + 504, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 504, + 454 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 504, + 454 + ], + "type": "text", + "content": "Considering all our experiments, we summarize our findings and recommend recipes for pre-training with code data. Table 2 (Appendix B) shows the different variants along with pre-training phases." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "text", + "content": "Best recipe for natural language tasks As seen in Sections 3.1, 3.3, and 3.5, including code in all phases of pre-training provides improvements across all task categories. When looking at the final recipes, we find that balanced " + }, + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "text", + "content": " text model followed by cooldown that includes code data corresponds to the best overall performance in natural language tasks considering NL reasoning, world knowledge, and generative performance. Notably this model achieves the highest generative win-rates with " + }, + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "inline_equation", + "content": "37.7\\%" + }, + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "text", + "content": " vs 33.7 against text-only as shown in Figure 7." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": "Best recipe for code performance Among complete recipes shown in Table 2, balanced-only provides the best performance in code benchmarks. This model achieves " + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": " relative gain compared to second best code " + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": " text and " + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "inline_equation", + "content": "55\\%" + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": " relative gain compared to balanced " + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": " text. However, balanced-only falls behind in natural language performance by " + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "inline_equation", + "content": "2.5\\%" + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": " relative difference and " + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "inline_equation", + "content": "5.0\\%" + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": " win-rate difference (vs text-only), both compared to balanced " + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 531, + 504, + 586 + ], + "type": "text", + "content": " text." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 591, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 504, + 647 + ], + "type": "text", + "content": "Including code in all phases of pre-training is beneficial across our three task categories and generative performance. Our recommendation for the best overall performance is to include a balanced mixture of code and text data during pre-training from scratch (§ 3.3), use a relatively lower code percentage during continual pre-training (§ 3.1), and include code data into cooldown mixture. Further, we recommend including high-quality code data during all phases of pre-training (§ 3.4)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 663, + 209, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 663, + 209, + 675 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 209, + 675 + ], + "type": "text", + "content": "4 RELATED WORK" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "Understanding the impact of pre-training mixes Several works have studied the effects of data age, quality, toxicity and domain of pre-training data (Longpre et al., 2023; Üstün et al., 2024). Several works have looked at the impact of filtering (Raffel et al., 2020; Rae et al., 2021; Penedo et al., 2023), de-duping (Zhang et al., 2022) and data pruning (Lozhkov et al., 2024; Marion et al., 2023;" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 81, + 250, + 188 + ], + "blocks": [ + { + "bbox": [ + 137, + 81, + 250, + 188 + ], + "lines": [ + { + "bbox": [ + 137, + 81, + 250, + 188 + ], + "spans": [ + { + "bbox": [ + 137, + 81, + 250, + 188 + ], + "type": "image", + "image_path": "0acead19bcc93dc191f7b4b05f59a204988bbe81cefebf3380086b79102e5615.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 198, + 490, + 210 + ], + "lines": [ + { + "bbox": [ + 118, + 198, + 490, + 210 + ], + "spans": [ + { + "bbox": [ + 118, + 198, + 490, + 210 + ], + "type": "text", + "content": "Figure 7: Generative performance as measured by win-rates for variants with full-coutdown." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 261, + 82, + 366, + 187 + ], + "blocks": [ + { + "bbox": [ + 261, + 82, + 366, + 187 + ], + "lines": [ + { + "bbox": [ + 261, + 82, + 366, + 187 + ], + "spans": [ + { + "bbox": [ + 261, + 82, + 366, + 187 + ], + "type": "image", + "image_path": "717a60ebef812534c77f398e4b5a40fba0b77fa12b96fb613eb94aff0608eedb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 378, + 82, + 473, + 187 + ], + "blocks": [ + { + "bbox": [ + 378, + 82, + 473, + 187 + ], + "lines": [ + { + "bbox": [ + 378, + 82, + 473, + 187 + ], + "spans": [ + { + "bbox": [ + 378, + 82, + 473, + 187 + ], + "type": "image", + "image_path": "57d292212ebabb7a14ce6e8158b991527af35051c5e192939ade8e652c7d273e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 231, + 506, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 231, + 506, + 287 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 506, + 287 + ], + "type": "text", + "content": "Chimoto et al., 2024; Boubdir et al., 2023). Furthermore, several works have considered the role of synthetic data at improving performance (Shimabucoro et al., 2024; Dang et al., 2024; Aakanksha et al., 2024) and helping bridge the gap in performance between open weights and proprietary models (Gunasekar et al., 2023; Li et al., 2023b). In contrast to our work which focuses explicitly on understanding the role of code, these studies focus on characteristics of training data as a whole." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "content": "Understanding the role of code Including code in the pre-training data mixture, even for models not specifically designed for code, has been a common practice in LLMs pre-training (Dubey et al., 2024; Gemini-Team et al., 2024; Groeneveld et al., 2024). In addition to serving the popular use case in code completion and generation (Chen et al., 2021), previous studies suggest that the addition of code improves the performance of LLMs on various NLP tasks, such as entity linking (Kim et al., 2024) and commonsense reasoning (Madaan et al., 2022b)), mathematical reasoning tasks (Liang et al., 2022; Madaan et al., 2022a; Gao et al., 2023; Shao et al., 2024), and general reasoning capabilities (Muennighoff et al., 2023a; Fu & Khot, 2022; Ma et al., 2023). Muennighoff et al. (2023b) demonstrated Python code data can be used to improve pretraining performance. They focused on a low-resource pre-training regime with limited data and an evaluation set-up limited to perplexity evaluations. Zhang et al. (2024) investigated the impact of code on LLMs' internal reasoning capability across various tasks and model families. They only focus on the effect of code in the supervised fine-tuning stage (SFT) primarily measuring the impact on reasoning. Zhu et al. (2024) report the performance of their DeepSeek-Coder-V2 models on General Natural Language benchmarks. They compare chat and instruct models, and do not investigate different phases of pre-training and properties of code." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 473, + 506, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 473, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 473, + 506, + 518 + ], + "type": "text", + "content": "To the best of our knowledge, this work is the first study that presents a thorough investigation of the impact of code in pre-training on non-code tasks. Our experiment spans several axes and a exhaustive evaluation suite, with costly ablations at scale including model initialization strategies, different proportions and properties of code data, and model scales." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 534, + 196, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 534, + 196, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 196, + 547 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": "We perform a first-of-its-kind systematic study to answer \"what is the impact of code data used in pre-training on a large variety of downstream tasks beyond code generation\". We focus, not just on code performance but on downstream natural language performance, as well as generative quality using LLM-as-a-judge win-rates. We conduct ablations that look at initialization, proportions of code, quality and properties of code, and role of code in pre-training cooldown. We find across all scales of experiments that code provides critical improvements to performance on non-code tasks. Compared to text-only pre-training, for our best variant, the addition of code results in relative increase of " + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "8.2\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": " in natural language (NL) reasoning, " + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "4.2\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": " in world knowledge, " + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "6.6\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": " improvement in generative win-rates, and a 12x boost in code performance respectively. Further performing cooldown with code, improves " + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "3.6\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "10.1\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": " in NL reasoning, world knowledge, and code relative to the model before cooldown and leads " + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "52.3\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": " generative win-rates. Finally, we find that adding a small amount of high-quality synthetic data can have an outsized impact on both NL reasoning (" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": " relative increase) and code performance (" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "inline_equation", + "content": "44.9\\%" + }, + { + "bbox": [ + 104, + 559, + 506, + 703 + ], + "type": "text", + "content": " relative increase)." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 176, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 176, + 93 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 176, + 93 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 100, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 100, + 505, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 100, + 505, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 100, + 505, + 134 + ], + "type": "text", + "content": "Aakanksha, Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. The multilingual alignment prism: Aligning global and local preferences to reduce harm, 2024. URL https://arxiv.org/abs/2406.18682." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 505, + 198 + ], + "type": "text", + "content": "Viraat Aryabumi, John Dang, Dwarak Talupuru, Saurabh Dash, David Cairuz, Hangyu Lin, Bharat Venkitesh, Madeline Smith, Jon Ander Campos, Yi Chern Tan, Kelly Marchisio, Max Bartolo, Sebastian Ruder, Acyr Locatelli, Julia Kreutzer, Nick Frosst, Aidan Gomez, Phil Blunsom, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. Aya 23: Open weight releases to further multilingual progress, 2024. URL https://arxiv.org/abs/2405.15032." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 505, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 505, + 238 + ], + "type": "text", + "content": "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. Program synthesis with large language models, 2021. URL https://arxiv.org/abs/2108.07732." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 245, + 505, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 505, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 505, + 279 + ], + "type": "text", + "content": "Meriem Boubdir, Edward Kim, Beyza Ermis, Marzieh Fadaee, and Sara Hooker. Which prompts make the difference? data prioritization for efficient human llm evaluation, 2023. URL https://arxiv.org/abs/2310.14424." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 286, + 505, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 286, + 505, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 505, + 332 + ], + "type": "text", + "content": "James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, George Necula, Adam Paszke, Jake VanderPlas, Skye Wanderman-Milne, and Qiao Zhang. JAX: composable transformations of Python+NumPy programs, 2018. URL http://github.com/google/jax." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 338, + 505, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 505, + 416 + ], + "type": "text", + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. arXiv, abs/2005.14165, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 423, + 505, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 423, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 505, + 545 + ], + "type": "text", + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fiotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021. URL https://arxiv.org/abs/2107.03374." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 552, + 505, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 552, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 505, + 597 + ], + "type": "text", + "content": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, and Yuanzhi Li. Towards understanding the mixture-of-experts layer in deep learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=MaYzugDmQV." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 604, + 505, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 628 + ], + "type": "text", + "content": "Cheng-Han Chiang and Hung yi Lee. Can large language models be an alternative to human evaluations?, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 635, + 505, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 505, + 669 + ], + "type": "text", + "content": "Everlyn Asiko Chimoto, Jay Gala, Orevaoghene Ahia, Julia Kreutzer, Bruce A. Bassett, and Sara Hooker. Critical learning periods: Leveraging early training dynamics for efficient data pruning, 2024. URL https://arxiv.org/abs/2405.19462." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 676, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 676, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 505, + 732 + ], + "type": "text", + "content": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen-tau Yih, Yejin Choi, Percy Liang, and Luke Zettlemoyer. QuAC: Question answering in context. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 2174-2184, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1241. URL https://aclanthology.org/D18-1241." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 107, + 82, + 506, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 506, + 225 + ], + "type": "text", + "content": "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan Firat, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. Palm: Scaling language modeling with pathways, 2022. URL https://arxiv.org/abs/2204.02311." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 232, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 506, + 300 + ], + "type": "text", + "content": "Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. BoolQ: Exploring the surprising difficulty of natural yes/no questions. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 2924–2936, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1300. URL https://aclanthology.org/N19-1300." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 306, + 504, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 306, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 504, + 340 + ], + "type": "text", + "content": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 347, + 504, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 347, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 504, + 371 + ], + "type": "text", + "content": "Together Computer. Redpajama: an open dataset for training large language models, 2023. URL https://github.com/togethercomputer/RedPajama-Data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 377, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 377, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 377, + 504, + 422 + ], + "type": "text", + "content": "Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. URL https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "text", + "content": "John Dang, Arash Ahmadian, Kelly Marchisio, Julia Kreutzer, Ahmet Üstün, and Sara Hooker. Rlhf can speak many languages: Unlocking multilingual preference optimization for llms, 2024. URL https://arxiv.org/abs/2407.02552." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 471, + 504, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 504, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 504, + 494 + ], + "type": "text", + "content": "Marie-Catherine de Marneffe, Mandy Simons, and Judith Tonhauser. The commitmentbank: Investigating projection in naturally occurring discourse, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 501, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 501, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 506, + 732 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurelien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Roziere, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Gregoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Junteng Jia, Kalyan Vasuden Alwala, Kartikeya Upasani, Kate Plawiak, Ke Li, Kenneth Heafield, Kevin Stone, Khalid El-Arini, Krithika Iyer, Kshitiz Malik, Kuenley Chiu, Kunal Bhalla, Lauren Rantala-Yeary, Laurens van der Maaten, Lawrence Chen, Liang Tan, Liz Jenkins, Louis Martin Lovish Madaan Lubo Malo Lukas Blecher Lukas Landzaat Luke de Oliveira Madeline Muzzi Mahesh Pasupuleti Mannat Singh Manohar Paluri Marcin Kardas Mathew Oldham Mathieu Rita Maya Pavlova" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "spans": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "type": "text", + "content": "Melanie Kambadur, Mike Lewis, Min Si, Mitesh Kumar Singh, Mona Hassan, Naman Goyal, Narjes Torabi, Nikolay Bashlykov, Nikolay Bogoychev, Niladri Chatterji, Olivier Duchenne, Onur Celebi, Patrick Alrassy, Pengchuan Zhang, Pengwei Li, Petar Vasic, Peter Weng, Prajjwal Bhargava, Pratik Dubal, Praveen Krishnan, Punit Singh Koura, Puxin Xu, Qing He, Qingxiao Dong, Ragavan Srinivasan, Raj Ganapathy, Ramon Calderer, Ricardo Silveira Cabral, Robert Stojnic, Roberta Raileanu, Rohit Girdhar, Rohit Patel, Romain Sauvestre, Ronnie Polidoro, Roshan Sumbaly, Ross Taylor, Ruan Silva, Rui Hou, Rui Wang, Saghar Hosseini, Sahana Chennabasappa, Sanjay Singh, Sean Bell, Seohyun Sonia Kim, Sergey Edunov, Shaoliang Nie, Sharan Narang, Sharath Raporthy, Sheng Shen, Shengye Wan, Shruti Bhosale, Shun Zhang, Simon Vandenhende, Soumya Batra, Spencer Whitman, Sten Sootla, Stephane Collot, Suchin Gururangan, Sydney Borodinsky, Tamar Herman, Tara Fowler, Tarek Sheasha, Thomas Georgiou, Thomas Scialom, Tobias Speckbacher, Todor Mihaylov, Tong Xiao, Ujjwal Karn, Vedanuj Goswami, Vibhor Gupta, Vignesh Ramanathan, Viktor Kerkez, Vincent Gonguet, Virginie Do, Vish Vogeti, Vladan Petrovic, Weiwei Chu, Wenhan Xiong, Wenyin Fu, Whitney Meers, Xavier Martinet, Xiaodong Wang, Xiaqing Ellen Tan, Xinfeng Xie, Xuchao Jia, Xuewei Wang, Yaelle Goldschlag, Yashesh Gaur, Yasmine Babaei, Yi Wen, Yuwen Song, Yuchen Zhang, Yue Li, Yuning Mao, Zacharie Delpierre Coudert, Zheng Yan Zhengxing Chen Zoe Papakipos Aaditya Singh Aaron Grattafori Abha Jain Adam Kelsey Adam Shajnfeld Adithya Gangidi Adolfo Victoria,Ahuva Goldstand Ajay Menon Ajay Sharma Alex Boesenberg Alex Vaughan Alexei Baevski Allie Feinstein Amanda Kallet Amit Sangani Anam Yunus Andrei Lupu Andres Alvarado Andrew Caples Andrew GuAndrew Ho Andrew Poulton Andrew Ryan Ankit Ramchandani Annie Franco Aparajita Saraf Arkabandhu Chowdhury Ashley Gabriel Ashwin Bharambe Assaf Eisenman Azadeh Yazdan Beau James Ben Maurer Benjamin Leonhardi Bernie Huang Beth Loyd Beto De Paola Bhargavi ParanjapeBing LiuBo WuBoyu Ni Braden Hancock Bram Wasti Brandon Spence Brani Stojkovic Brian Gamido Britt Montalvo Carl Parker Carly Burton Catalina Mejia Changhan Wang Changkyu Kim Chao Zhou Chester Hu Ching-Hsiang Chu Chris Cai Chris Tindal Christoph Feichtenhofer Damon Civin Dana Beaty Daniel Kreymer Daniel Li Danny Wyatt David Atkins David Xu Davide Testuggine Delia David Devi Parikh Diana Liskovich Didem Foss Dingkang Wang Duc Le,Dustin Holland Edward Dowling Eissa Jamil Elaine Montgomery Eleonora Presani Emily Hahn Emily Wood Erik Brinkman Esteban Arcaute Evan Dunbar Evan Smothers Fei Sun Felix Kreuk Feng Tian First Ozgenel Francesco Caggioni Francisco Guzmán Frank Kanayet Frank Seide Gabriela Medina FlorezGabriella Schwarz Gada Badeer Georgia Swee Gil Halpern Govind Thattai Grant Herman Grigory Sizov Guangyi Zhang Guna Lakshminarayanan Hamid Shojanazeri Han Zou Hannah Wang Hanwen Zha Haroun Habeeb Harrison Rudolph Helen Suk Henry Aspegren Hunter Goldman Igor Molybog Igor Tufanov Irina-Elena Veliche Itai Gat Jake Weissman James Geboski James Kohli Japhet Asher Jean-Baptiste Gaya Jeff MarcusJeff Tang Jennifer Chan Jenny Zhen Jeremy Reizenstein Jeremy Teboul Jessica Zhong Jian Jin Jingyi Yang Joe Cummings Jon Carvill Jon Shepard Jonathan McPhie Jonathan Torres Josh Ginsburg Junjie Wang Kai Wu Kam Hou U Karan Saxena Karthik Prasad Kartikay Khandelwal Katayoun Zand Kathy Matosich Kaushik Veeraraghavan Kelly Michelena Keqian Li Kun Huang Kunal ChawlaKushal Lakhotia Kyle Huang Lailin Chen Lakshya Garg Lavender A Leandro Silva Lee Bell Lei Zhang Liangpeng Guo Licheng Yu Liron Moshkovich Luca Wehrstedt Madian Khabsa Manav Avalani Manish Bhatt Maria Tsimpoukelli Martynas Mankus Matan Hasson Matthew Lennie Matthias Reso Maxim Groshev Maxim Naumov Maya Lathi Meghan Keneally Michael L. Seltzer Michal Valko Michelle Restrepo Mihir Patel Mik Vyatskov Mikayel Samvelyan Mike Clark Mike Macey Mike Wang Miquel Jubert Hermoso Mo Metanat Mohammad Rastegari Munish Bansal Nandhini Santhanam Natascha Parks Natasha White Navyata Bawa Nayan Singhal Nick Egebo Nicolas Usunier Nikolay Pavlovich Laptev Ning Dong Ning Zhang Norman Cheng Oleg Chernoguz Olivia Hart Omkar Salpekar Ozlem Kalinli Parkin Kent Parth Parekh Paul Saab Pavan Balaji Pedro Rittner Philip Bontrager Pierre Roux Piotr Dollar Polina Zvyagina Prashant Ratanchandani British Yuvraj Qian Liang Rachad Alao Rachel Rodriguez Rafy Aub Ragtoham Murthy Raghu Nayani Rahul Mitra Raymond Li Rebekkah Hogan Robin Battey Rocky Wang Rohan Maheswari Russ Howes Rudy Tinott Sai Jayesh Bondu Samyak Datta Sara Chugh Sara Hunt Sargun Dhillon Sasha Sidorov Satadru Pan Saurabh Verma Seiji Yamamoto Sharadh Ramaswamy Shaun Lindsay Shaun Lindsay Sheng Feng Shenghao Lin Shengxin Cindy Zha Shiva Shankar Shuqiang Zhang Shuqiang Zhang Sinong WangSneha Agarwal Soji Sajuyigbe Soumith Chintala Stephanie Max Stephen Chen Steve Kehoe Steve Satterfield Sudarshan Govindaprasad Sumit Gupta Sungmin Cho" + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 115, + 82, + 504, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 504, + 193 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 504, + 193 + ], + "type": "text", + "content": "Sunny Virk, Suraj Subramanian, Sy Choudhury, Sydney Goldman, Tal Remez, Tamar Glaser, Tamara Best, Thilo Kohler, Thomas Robinson, Tianhe Li, Tianjun Zhang, Tim Matthews, Timothy Chou, Tzook Shaked, Varun Vontimitta, Victoria Ajayi, Victoria Montanez, Vijai Mohan, Vinay Satish Kumar, Vishal Mangla, Vlad Ionescu, Vlad Poenaru, Vlad Tiberiu Mihailescu, Vladimir Ivanov, Wei Li, Wenchen Wang, Wenwen Jiang, Wes Bouaziz, Will Constable, Xiaocheng Tang, Xiaofang Wang, Xiaojian Wu, Xiaolan Wang, Xide Xia, Xilun Wu, Xinbo Gao, Yanjun Chen, Ye Hu, Ye Jia, Ye Qi, Yenda Li, Yilin Zhang, Ying Zhang, Yossi Adi, Youngjin Nam, Yu, Wang, Yuchen Hao, Yundi Qian, Yuzi He, Zach Rait, Zachary DeVito, Zef Rosnbrick, Zhaoduo Wen, Zhenyu Yang, and Zhiwei Zhao. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 201, + 504, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 504, + 245 + ], + "type": "text", + "content": "Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Alpacafarm: A simulation framework for methods that learn from human feedback, 2024. URL https://arxiv.org/abs/2305.14387." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 253, + 504, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 253, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 504, + 288 + ], + "type": "text", + "content": "Hao Fu, Yao; Peng and Tushar Khot. How does gpt obtain its ability? tracing emergent abilities of language models to their sources. Yao Fu's Notion, Dec 2022. URL https://yaofu.notion.site/b9a57ac0acf74f30a1ab9e3e36fa1dc1?pvs=25." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 295, + 504, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 295, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 504, + 308 + ], + "type": "text", + "content": "Jinlan Fu, See-Kiong Ng, Zhengbao Jiang, and Pengfei Liu. Gptscore: Evaluate as you desire, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 316, + 504, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 504, + 372 + ], + "type": "text", + "content": "Yifu Gao, Yongquan He, Zhigang Kan, Yi Han, Linbo Qiao, and Dongsheng Li. Learning joint structural and temporal contextualized knowledge embeddings for temporal knowledge graph completion. In Findings of the Association for Computational Linguistics: ACL 2023, pp. 417-430, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023-findings-acl.28." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 379, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 379, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 732 + ], + "type": "text", + "content": "Gemini-Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M. Dai, Anja Hauth, Katie Millican, David Silver, Melvin Johnson, Ioannis Antonoglou, Julian Schrittwieser, Amelia Glaese, Jilin Chen, Emily Pitler, Timothy Lillicrap, Angeliki Lazaridou, Orhan First, James Molloy, Michael Isard, Paul R. Barham, Tom Hennigan, Benjamin Lee, Fabio Viola, Malcolm Reynolds, Yuanzhong Xu, Ryan Doherty, Eli Collins, Clemens Meyer, Eliza Rutherford, Erica Moreira, Kareem Ayoub, Megha Goel, Jack Krawczyk, Cosmo Du, Ed Chi, Heng-Tze Cheng, Eric Ni, Purvi Shah, Patrick Kane, Betty Chan, Manaal Faruqui, Aliaksei Severyn, Hanzhao Lin, YaGuang Li, Yong Cheng, Abe Ittycheriah, Mahdis Mahdieh, Mia Chen, Pei Sun, Dustin Tran, Sumit Bagri, Balaji Lakshminarayanan, Jeremiah Liu, Andras Orban, Fabian Gura, Hao Zhou, Xinying Song, Aurelien Boffy, Harish Ganapathy, Steven Zheng, HyunJeong Choe, Agoston Weisz, Tao Zhu, Yifeng Lu, Siddharth Gopal, Jarrod Kahn, Maciej Kula, Jeff Pitman, Rushin Shah, Emanuel Taropa, Majd Al Merey, Martin Baeuml, Zhifeng Chen, Laurent El Shafey, Yujing Zhang, Olcan Sercinoglu, George Tucker, Enrique Piqueras, Maxim Krikun, Iain Barr, Nikolay Savinov, Ivo Danihelka, Becca Roelofs, Anaïs White, Anders Andreassen, Tamara von Glehn, Lakshman Yagati, Mehran Kazemi, Lucas Gonzalez, Misha Khalman, Jakub Sygnowski, Alexandre Frechette, Charlotte Smith, Laura Culp, Lev Proleev, Yi Luan, Xi Chen, James Lottes, Nathan Schucher, Federico Lebron, Alban Rrustemi, Natalie Clay, Phil Crone, Tomas Kocisky, Jeffrey Zhao, Bartek Perz, Dian Yu, Heidi Howard, Adam Bloniarz, Jack W. Rae, Han Lu, Laurent Sifre, Marcello Maggioni, Fred Alcober, Dan Garrette, Megan Barnes, Shantanu Thakoor, Jacob Austin, Gabriel Barth-Maron, William Wong,Rishabh Joshi,Rahma Chaabouni Deeni Fatiha Arun Ahuja,Gaurav Singh Tomar Evan Senter,Martin Chadwick,Ilya Kornakov,Nithya Attaluri Inaki Iturrate,Ruibo Liu,Yunxuan Li Sarah Cogan Jeremy Chen Chao Jia Chenjie Gu Qiao Zhang Jordan Grimstad Ale Jakse Hartman Xavier Garcia Thanumalayan Sankaranarayana PillaiJacob Devlin Michael LaskinDiego de Las Casas,Dasha ValterConnie TaoLorenzo BlancoAdria Puigdomenech BadiaDavid Reitter Mianna Chen Jenny Brennan Clara Rivera,Sergey BrinShariq Iqbal,Gabriela Surita Jane Labanowski Abhi Rao Stephanie Winkler Emilio Parisotto Yiming Gu Kate Olszewka Ravi Addanki Antoine Miech Annie Louis Denis Teplyashin Geoff Brown Elliot Catt Jan BalaguerJackie Xiang,Pidong Wang,Zoe Ashwood,Anton BriukhovAlbert WebsonSanjay GanapathySmit Sanghavi Ajay Kannan Ming-Wei ChangAxel StjerngrenJosip DjolongaYuting SunAnkur Bapna Matthew Aitchison Pedram Pejman Henryk Michalewski Tianhe Yu Cindy Wang,Juliette Love Junwhan Ahn Dawn Bloxwich,Kehang Han Peter Humphreys Thibault" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "spans": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "type": "text", + "content": "Sellam, James Bradbury, Varun Godbole, Sina Samangooei, Bogdan Damoc, Alex Kaskasoli, Sebastien M. R. Arnold, Vijay Vasudevan, Shubham Agrawal, Jason Riesa, Dmitry Lepikhin, Richard Tanburn, Srivatsan Srinivasan, Hyeontaek Lim, Sarah Hodgkinson, Pranav Shyam, Johan Ferret, Steven Hand, Ankush Garg, Tom Le Paine, Jian Li, Yujia Li, Minh Giang, Alexander Neitz, Zaheer Abbas, Sarah York, Machel Reid, Elizabeth Cole, Aakanksha Chowdhery, Dipanian Das, Dominika Rogozinska, Vitaliy Nikolaev, Pablo Spechmann, Zachary Nado, Lukas Zilka, Flavien Prost, Luheng He, Marianne Monteiro, Gaurav Mishra, Chris Welty, Josh Newlan, Dawei Jia, Miltiadis Allamanis, Clara Huiyi Hu, Raoul de Liedekerke, Justin Gilmer, Carl Saroufim, Shruti Rijhwani, Shaobo Hou, Disha Shrivastava, Anirudh Baddepudi, Alex Goldin, Adnan Ozturel, Albin Cassirer, Yunhan Xu, Daniel Sohn, Devendra Sachan, Reinald Kim Amplayo, Craig Swanson, Dessie Petrova, Shashi Narayan, Arthur Guez, Siddhartha Brahma, Jessica Landon, Miteyan Patel, Ruizhe Zhao, Kevin Villela, Luyu Wang, Wenhao Jia, Matthew Rahtz, Mai Giménez, Legg Yeung, James Keeling, Petko Georgiev, Diana Mincu, Boxi Wu, Salem Haykal, Rachel Saputro, Kiran Vodrahalli, James Qin, Zeynep Cankara, Abhanshu Sharma, Nick Fernando, Will Hawkins, Behnam Neyshabur, Solomon Kim, Adrian Hutter, Priyanka Agrawal, Alex Castro-Ros, George van den Driessche, Tao Wang, Fan Yang, Shuo yiin Chang, Paul Komarek, Ross McIlroy, Mario Lučić Guodong Zhang Wael Farhan Michael Sharman Paul Natsev Paul Michel, Yamini Bansal Siyuan Qiao Kris Cao Siamak Shakeri Christina Butterfield Justin ChungPaul Kishan Rubenstein Shivani Agrawal Arthur MenschKedar Soparkar Karel Lenc Timothy ChungAedan PopeLoren MaggioreJackie KayPriya JhakraShibo WangJoshua MaynezMary Phuong Taylor Tobin Andrea Tacchetti Maja TrebaczKevin RobinsonYash Katariya Sebastian Riedel Paige Bailey Kefan Xiao Nimesh Ghelani Lora Aroyo Ambrose Slone Neil Houlsby Xuehan Xiong Zhen Yang Elena Gribovskaya Jonas Adler Mateo Wirth Lisa Lee Music Li Thais Kagohara Jay Pavagadhi Sophie Bridgers Anna Bortsova Sanjay Ghemawat,Zafarali Ahmed Tianqi LiuRichard PowellVijay BolinaMariko InumaPolina ZablotskaiaJames Besley,Da-Woon ChungTimothy Dozat Ramona ComanescuXiance Si Jeremy Greer Guolong Su Martin Polacek Raphael Lopez Kaufman Simon Tokumine Hexiang HuElena Buchatskaya Yingjie Miao Mohamed Elhawaty Aditya SiddhantNenad Tomasev Jinwei XingChristina Greer Helen Miller Shereen Ashraf Aurko RoyZizhao ZhangAda Ma Angelos Filos Milos Besta Rory Blevins Ted Klimenko Chih-Kuan Yeh Soravit Changpinyo Jiaq MuOscar ChangMantas PajarskasCarrie Muir Vered Cohen Charline Le Lan Krishna Haridasan Amit Marathe Steven Hansen Sholto Douglas Rajkumar Samuel Mingqiu WangSophia AustinChang LanJiepu JiangJustin Chiu Jaime Alonso Lorenzo Lars Lowe Sjösund Sebastien Cevey,Zach Gleicher Thi Avrahami Anudhyan BoralHansa Srinivasan Vittorio Selo Rhys May Konstantinos Aisos ,Leonard HussenotLivio Baldini Soares Kate Baumli Michael B.ChangAdria RecasensBen CaineAlexander PritzelFilip PaveticFabio Pardo Anita Gergely Justin FryeVinay RamaseshDan Horgan Kartikeya Badola Nora Kassner Subhrajit Roy,Ethan DyerVictor Campos Campos Alex Tomala Yunhao TangDalia El Badawy Elspeth White Basil Mustafa Oran LangAbhishek JindalSharad Vikram Zhitao GongSergi Caelles Ross Hemsley Gregory Thornton Fangxiaoyu Feng Wojciech Stokowiec Ce ZhengPhoebe Thacker,Cagliar Unlu Zhishuai Zhang Mohammad SalehJames Svensson Max BileschiPiyush PatilAnkesh Anand Roman RingKaterina TsihlasArpi Vezer Marco Selvi Toby Shevlane,Mikel RodriguezTom KwiatkowskiSamira Daruki Keran RongAllan Dafoe Nicholas FitzGerald Keren Gu-Lemberg Mina Khan Lisa Anne Hendricks Marie Pelllat,Vladimir FeinbergJames Cobon-KerrTara Sainath Maribeth Rauh Sayed Hadi Hashemi Richard Ives Yana Hasson Eric Noland Yuan Cao Nathan Byrd Le Hou Qingze Wang Thibault Sottiaux Michela Paganini Jean-Baptiste Lespiau Alexandre Moufarek Samer Hassan Kaushik Shivakumar Joost van AmersfoortAmol Mandhane Pratik Joshi Anirudh Goyal Matthew TungAndrew BrockHannah SheahanVedant MisraCheng LiNemanja Rakicevic Mostafa Deghhani Fangyu Liu Sid Mittal Junhyuk Oh Seb Noury Eren Sezener,Fantine Huot Matthew Lamm Nicola De Cao Charlie Chen Sidharth Mudgal Romina Stella Kevin Brooks Gautam Vasudevan Chenxi Liu Mainak Chain Nivedita Melinkeri Aaron Cohen Venus Wang Kristie Seymour,Sergey Zubkov,Rahul Goel Summer Yue,Sai Krishnakumaran,Brian Albert Nate Hurley Motoki Sano Anhad MohananeyJonah Joughin Egor Filonov Tomasz Kepa Yomna Eldawy Jiawern Lim Rahul Rishi Shirin Badiezadegan Taylor Bos Jerry ChangSanil Jain Sri Gayatri Sundara Padmanabhan Subha Puttagunta Kalpesh Krishna Leslie Baker Norbert Kalb,Vamsi Bedapudi Adam Kurzrok Shuntong Lei Anthony Yu Oren Litvin Xiang Zhou Zhichun WuSam SobellAndrea SicilianoAlan Papir Robby NealeJonas Bragagnolo Tej Toor Tina ChenValentin AnklinFeiran WangRichie FengMilad Gholami Kevin LingLijuan" + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 115, + 82, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 504, + 731 + ], + "type": "text", + "content": "Liu, Jules Walter, Hamid Moghaddam, Arun Kishore, Jakub Adamek, Tyler Mercado, Jonathan Mallinson, Siddhinita Wandekar, Stephen Cagle, Eran Ofek, Guillermo Garrido, Clemens Lombriser, Maksim Mukha, Botu Sun, Hafeezul Rahman Mohammad, Josip Matak, Yadi Qian, Vikas Peswani, Pawe Janus, Quan Yuan, Leif Schelin, Oana David, Ankur Garg, Yifan He, Oleksii Duzhyi, Anton Algmyr, Timothee Lottaz, Qi Li, Vikas Yadav, Luyao Xu, Alex Chinien, Rakesh Shivanna, Aleksandr Chuklin, Josie Li, Carrie Spadine, Travis Wolfe, Kareem Mohamed, Subhabrata Das, Zihang Dai, Kyle He, Daniel von Dincklage, Shyam Upadhyay, Akanksha Maurya, Luyan Chi, Sebastian Krause, Khalid Salama, Pam G Rabinovitch, Pavan Kumar Reddy M, Aarush Selvan, Mikhail Dektiarev, Golnaz Ghiasi, Erdem Guven, Himanshu Gupta, Boyi Liu, Deepak Sharma, Idan Heimlich Shtacher, Shachi Paul, Oscar Akerlund, François-Xavier Aubet, Terry Huang, Chen Zhu, Eric Zhu, Elico Teixeira, Matthew Fritze, Francesco Bertolini, LianaEleonora Marinescu, Martin Bolle, Dominik Paulus, Khyatti Gupta, Tejasi Latkar, Max Chang Jason Sanders, Roopa Wilson, Xuewei Wu, Yi-Xuan Tan, Lam Nguyen Thiet, Tulsee Doshi Sid Lall, Swaroop Mishra, Wanming Chen, Thang Luong, Seth Benjamin Jasmine Lee, Ewa Andrejczuk, Dominik Rabiej, Vipul Ranjan, Krzysztof Styrc,Pengcheng Yin, Jon Simon, Malcolm Rose Harriott, Mudit Bansal, Alexei Robsky, Geoff Bacon, David Greene, Daniil Mirylenka Chen Zhou, Obaid Sarvana, Abhimanyu Goyal, Samuel Andermatt, Patrick Siegler, Ben Horn Assaf Israel, Francesco Pongetti Chih-Wei \"Louis\" Chen Marco Selvatici Pedro Silva Kathie Wang Jackson Tolins Kelvin Guu Roey YogevXiaochen Cai Alessandro Agostini,Maulik Shah,Hung Nguyen Noah O Donnaile,Sebastien Pereira Linda Friso Adam Stambler Adam KurzrokChenkai Kuang Yan Romanikhin Mark Geller ZJ Yan Kane Jang Cheng-Chun Lee Wojciech FicaEric Malmi Qijun Tan Dan Banica,Daniel Balle Ryan Pham,Yanping Huang Diana Avram Hongzhi Shi Jasot Singh Chris Hidey Niharika Ahuja Pranab SaxenaDan Dooley Srividya Pranavi PotharajuEileen ONeill AnandGokulchandranRyan FoleyKai Zhao Mike DusenberryYuan Liu Pulkit Mehta Raga Kotikalapudi Chalance Safranek-Shrader Andrew Goodman Joshua Kessinger,Eran Globen Prateek Kolhar Chris Gorgolewski Ali Ibrahim Yang SongAli Eichenbaum Thomas Brovelli Sahitya Potluri Preethi LahotiCip Baetu Ali Ghorbani Charles ChenAndy CrawfordShalini PalMukund Sridhar Petru GuritaAsier Mujika Igor Petrovski Pierre-Louis CedozChenmei Li Shiyuan Chen Niccolo Dal Santo Siddharth Goyal Jitesh Punjabi Karthik Kappaganthu Chester KwakPallavi LV Sarmishta Velury Himadri Choudhury Jamie Hall Premal Shah Ricardo Figueira Matt Thomas Minjie Lu Ting Zhou Chintu Kumar Thomas Jurdi Sharat Chikkerur Yenai Ma Adams Yu Soo KwakVictor AhdelSujeevan RajayogamTravis ChomaFei Liu Aditya Barua Colin Ji,Ji Ho Park Vincent HellendoornAlex Bailey Taylan Bilal Huanjie Zhou Mehrdad Khatir Charles Sutton Wojciech Rzadkowski Fiona Macintosh Konstantin Shagin Paul Medina Chen Liang Jinjing Zhou Pararth Shah Yingying Bi Attila Dankovics Shipra Banga Sabine Lehmann Marissa Bredesen Zifan Lin John Eric Hoffmann Jonathan Lai Raynald Chung Kai Yang Nihal Balani Arthur Brazinskas Andrei Sozanschi Matthew Hayes Hector Fernandez Alcalde Peter Makarov Will Chen Antonio Stella Liselotte Snijders Michael Mandl Ante Karrman Pawel Nowak Xinyi Wu Alex Dyck Krishnan Vaidyanathan Raghavender R Jessica Mallet Mitch Rudominer Eric JohnstonSushil Mittal Akhil Udathu Janara Christensen,Vishal Verma,Zach Irving Andreas Santucci Gamaleldin Elsayed Elnaz Davoodi Marin Georgiev Ian Tenney Nan Hua Geoffrey Cideron Edouard Leurent Mahmoud Alnahlawi Ionut Georgescu Nan Wei Ivy ZhengDylan Scandinaro Heinrich Jiang Jasper Snoke Mukund Sundararajan Xuezhi WangZack Ontiveros Itay Karo Jeremy Cole Vinu Rajashekhar Lara Tumeh Eyal Ben-David Rishub Jain Jonathan Uesato Romina Datta Oskar Bunyan Shimu Wu John Zhang Piotr Stanczyk Ye Zhang David Steiner Subhajit Naskar Michael Azzam Matthew Johnson Adam Paszke Chung-Cheng Chiu Jaume Sanchez Elias Afroz Mohiuddin Faizan Muhammad Jin Miao Andrew Lee Nino Vieillard Jane Park Jiageng Zhang Jeff Stanway Drew Garmon Abhijit Karmarkar Zhe Dong Jong LeeAviral Kumar Luowei Zhou Jonathan Evens William Isaac Geoffrey Irving Edward Loper Michael Fink Isha Arkatkar Nanxin Chen Izhak Shafran Ivan Petrychenko Zhe Chen Johnson Jia Anselm Levskaya Zhenkai Zhu Peter Grabowski Yu Mao Alberto Magni Kaisheng Yao Javier Snader,Norman Casagrande Evan PalmerPaul SuganthanAlfonso Castano Irene Giannoumis Wooyeol Kim Mikolaj Rybinski Ashwin Sreevatsa Jennifer Prendki David Soergel Adrian Goedeckemeyer Willi Gierke Mohsen Jafari Meenu Gaba Jeremy Wiesner Diana Gage Wright Yawen Wei Harsha Vashisht Yana Kulizhskaya Jay Hoover Maigo Le Lu Li Chimezie Iwuanyanwu Lu Liu Kevin Ramirez Andrey Khorlin Albert Cui Tian LIN Marcus Wu Ricardo Aguilar Keith Pallo Abhishek Chakladar Ginger Perng Elena Allica Abellan Mingyang Zhang Ishita Dasgupta Nate Kushman Ivo Penchev Alena Repina Xihui Wu Tom" + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "spans": [ + { + "bbox": [ + 116, + 82, + 504, + 729 + ], + "type": "text", + "content": "van der Weide, Priya Ponnapalli, Caroline Kaplan, Jiri Simsa, Shuangfeng Li, Olivier Doussé, Fan Yang, Jeff Piper, Nathan Ie, Rama Pasumarthi, Nathan Lintz, Anitha Vijayakumar, Daniel Andor, Pedro Valenzuela, Minnie Lui, Cosmin Paduraru, Daiyi Peng, Katherine Lee, Shuyuan Zhang, Somer Greene, Duc Dung Nguyen, Paula Kurylowicz, Cassidy Hardin, Lucas Dixon, Lili Janzer, Kiam Choo, Ziqiang Feng, Biao Zhang, Achintya Singhal, Dayou Du, Dan McKinnon, Natasha Antropova, Tolga Bolukbasi, Orgad Keller, David Reid, Daniel Finchelstein, Maria Abi Raad, Remi Crocker, Peter Hawkins, Robert Dadashi, Colin Gaffney, Ken Franko, Anna Bulanova, Rémi Leblond, Shirley Chung, Harry Askham, Luis C. Cobo, Kelvin Xu, Felix Fischer, Jun Xu, Christina Sorokin, Chris Alberti, Chu-Cheng Lin, Colin Evans, Alek Dimitriev, Hannah Forbes, Dylan Banarse, Zora Tung, Mark Omernick, Colton Bishop, Rachel Sterneck, Rohan Jain, Jiawei Xia, Ehsan Amid, Francesco Piccinno, Xingyu Wang, Praseem Banzal, Daniel J. Mankowitz, Alex Polozov, Victoria Krakovna, Sasha Brown, Mohammad Hossein Bateni, Dennis Duan, Vlad Firoiu, Meghana Thotakuri, Tom Natan, Matthieu Geist, Ser tan Girgin, Hui Li, Jiayu Ye, Ofir Roval, Reiko Tojo, Michael Kwong, James Lee-Thorp, Christopher Yew, Danila Sinopalnikov, Sabela Ramos, John Mellor, Abhishek Sharma, Kathy Wu, David Miller, Nicolas Sonnerat, Denis Vnukov, Rory Greig, Jennifer Beattie, Emily Caveness, Libin Bai, Julian Eisenschlos, Alex Korchemniy, Tomy Tsai, Mimi Jasarevic, Weize Kong Phuong Dao Zeyu Zheng Frederick Liu Fan Yang Rui Zhu Tian Huey Teh Jason Sanmiya Evgeny Gladchenko Nejc Trdin Daniel Toyama Evan Rosen,Sasan Tavakkol Linting Xue Chen Elkind Oliver Woodman John Carpenter George Papamakarios Rupert Kemp Sushant Kafle Tanya Grunina Rishika Sinha Alice Talbert Diane Wu,Denese Owusu-Afriyie Cosmo DuChloe Thornton Jordi Pont-Tuset Pradyumna Narayana Jing Li Saaber Fatehi John WietingOmar Ajmeri Benigno Uria Yeongil Ko Laura Knight Amelie Heliou Ning Niu Shane Gu Chenxi Pang Yeqing Li,Nir Levine,Ariel StolovichRebeca Santamaria-FernandezSonam Goenk,Wenny Yustalim Robin Strudel Ali Elqursh Charlie Deck Hyo LeeZonglin Li Kyle Levin Raphael Hoffmann Dan Holtmann-Rice Olivier Bachem Sho Arora Christy Koh Soheil Hassas Yeganeh Siim Poder Mukarram Tariq Yanhua Sun Lucian Ionita Mojtaba Seyedhosseini Pouya Tafti Zhiyu Liu Anmol Gulati Jasmine Liu Xinyu Ye Bart Chrzaszcz Lily Wang Nikhil Sethi Tianrun Li Ben Brown Shreya Singh Wei Fan Aaron Parisi Joe Stanton Vinod Koverkathu Christopher A. Choquette-Choo Yunjie Li TJ Lu Abe Ittycheriah Prakash Shroff Mani Varadarajan Sanaz Bahargam Rob Willoughby David Gaddy Guillaume Desjardins Marco Cornero Brona Robenegek,Bhavishya Mittal Ben Albrecht Ashish Shenoy Fedor Moiseev Henrik Jacobsson Alireza Ghaffarkhah,Morgane Riviere Alanna Walton Clément Crepy Alicia Parrish Zongwei ZhouClement Farabet Carey Radebaugh Praveen Srinivasan Claudia van der Salm Andreas Fidjeland Salvatore Scellato Eri Latorre-Chimoto Hanna Klimczak-Plucinska David Bridson Dario de Cesare Tom Hudson Piermaria Mendolicchio Lexi Walker Alex Morris Matthew Mauger Alexey Guseynov Alison Reid Seth Odoom Lucia Loher Victor Cotruta Madhavi Yenugula Dominik Grewe Anastasia Petrushkina Tom Duerig Antonio Sanchez Steve YadlowskyAmy Shen Amir Globerson,Lynette Webb Sahil Dua Dong Li Surya BhupatirajuDan Hurt Haroon Qureshi Ananth Agarwal Tomer Shani Matan Eyal Anuj Khare Shreyas Rammohan Belle Lei Wang Chetan Tekur Mihir Sanjay Kale Jinliang Wei Ruoxin Sang Brennan Saeta Tyler Liechty Yi Sun Yao Zhao Stephan Lee Pandu Nayak Doug Fritz Manish Reddy Vuyyyuru John Aslanides Nidhi Vyas Martin Wicke Xiao Ma Evgenii Eltsychev Nina Martin Hardie Cate James Manyika Keyvan Amiri Yelin Kim Xi Xiong Kai Kang Florian Luisier Nilesh Tripuraneni David Madras Mandy Guo Austin Waters Oliver Wang Joshua Ainslie Jason Baldridge Han Zhang Garima Pruthi Jakob Bauer Feng Yang Riham Mansour Jason Gelman Yang XuGeorge Polovets Ji Liu Honglong CaiWarren ChenXiangHai Sheng Emily Xue Sherjil Ozair Christof Angermueller Xiaowei Li Anoop Sinha Weiren Wang Julia Wiesinger Emmanueloul Koukoumidis Yuan Tian Anand Iyer Madhu Gurumurthy Mark Goldenson Parashar Shah MK Blake Hongkun Yu Anthony Urbanowicz Jennimaria Palomaki Chrisantha Fernando Ken Durden Harsh Mehta Nikola Momchev Elahe Rahimtoroghi Maria Georgaki Amit Raul Sebastian Ruder Morgan Redshaw Jinhyuk Lee Denny Zhou Komal Jalan Dinghua Li Blake Hechtman Parker Schuh Milad Nasr Kieran Milan Vladimir Mikulik Juliana Franco Tim Green Nam Nguyen Joe Kelley Aroma Mahendru Andrea Hu Joshua Howland Ben Vargas Jeffrey Hui Kshitij Bansal,Vikram Rao Rakesh Ghiya Emma Wang Ke Ye Jean Michel Sarr Melanie Moranski Preston Madeleine Elish Steve Li Aakash Kaku Jigar Gupta Ice Pasupat Da-Cheng Juan Milan Someswar Tejvi M., Xinyun Chen Aida Amini Alex Fabrikant Eric Chu Xuanyi Dong Amrutta Muthal Senaka Buthpitiya Sarthak Jauhari Nan Hua Urvashi Khandelwal Ayal Hitron Jie Ren Larissa Rinaldi Shahar Drath Avigail Dabush" + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 504, + 732 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 115, + 82, + 504, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 504, + 247 + ], + "type": "text", + "content": "Nan-Jiang Jiang, Harshal Godhia, Uli Sachs, Anthony Chen, Yicheng Fan, Hagai Taitelbaum, Hila Noga, Zhuyun Dai, James Wang, Chen Liang, Jenny Hamer, Chun-Sung Ferng, Chenel Elkind, Aviel Atias, Paulina Lee, Vít Listík, Mathias Carlen, Jan van de Kerkhof, Marcin Pikus, Krunoslav Zaher, Paul Müller, Sasha Zykova, Richard Stefanec, Vitaly Gatsko, Christoph Hirnschall, Ashwin Sethi, Xingyu Federico Xu, Chetan Ahuja, Beth Tsai, Anca Stefanoiu, Bo Feng, Keshav Dhandhania, Manish Katyal, Akshay Gupta, Atharva Parulekar, Divya Pitta, Jing Zhao, Vivaan Bhatia, Yashodha Bhavnani, Omar Alhadlaq, Xiaolin Li, Peter Danenberg, Dennis Tu, Alex Pine, Vera Filippova, Abhipso Ghosh, Ben Limonchik, Bhargava Urala, Chaitanya Krishna Lanka, Derik Clive, Yi Sun, Edward Li, Hao Wu, Kevin Hongtongsak, Ianna Li, Kalind Thakkar, Kanyush Omarov, Kushal Majmundar, Michael Alverson, Michael Kucharski, Mohak Patel, Mudit Jain, Maksim Zabelin, Paolo Pelagatti, Rohan Kohli, Saurabh Kumar, Joseph Kim, Swetha Sankar, Vineet Shah, Lakshmi Ramachandruni, Xiangkai Zeng, Ben Bariach, Laura Weidinger, Amar Subramanya, Sissie Hsiao, Demis Hassabis, Koray Kavukcuoglu, Adam Sadovsky, Quoc Le, Trevor Strohman, Yonghui Wu, Slav Petrov, Jeffrey Dean, and Oriol Vinyals. Gemini: A family of highly capable multimodal models, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 255, + 504, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 504, + 289 + ], + "type": "text", + "content": "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. Deep sparse rectifier neural networks. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 315-323, 2011." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 296, + 504, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 296, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 296, + 504, + 331 + ], + "type": "text", + "content": "Dirk Groeneveld, Iz Beltagy, Pete Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Harsh Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. Olmo: Accelerating the science of language models. arXiv preprint arXiv:2402.00838, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 337, + 504, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 337, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 504, + 372 + ], + "type": "text", + "content": "Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cesar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, et al. Textbooks are all you need. arXiv preprint arXiv:2306.11644, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 378, + 504, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 378, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 504, + 423 + ], + "type": "text", + "content": "Srinivasan Iyer, Xi Victoria Lin, Ramakanth Pasunuru, Todor Mihaylov, Daniel Simig, Ping Yu, Kurt Shuster, Tianlu Wang, Qing Liu, Punit Singh Koura, et al. Opt-iml: Scaling language model instruction meta learning through the lens of generalization. arXiv preprint arXiv:2212.12017, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 431, + 504, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 431, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 431, + 504, + 487 + ], + "type": "text", + "content": "Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1601-1611, Vancouver, Canada, July 2017. Association for Computational Linguistics. doi: 10.18653/v1/P17-1147. URL https://aclanthology.org/P17-1147." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 495, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 504, + 660 + ], + "type": "text", + "content": "Norman P. Jouppi, Cliff Young, Nishant Patil, David A. Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, Rick Boyle, Pierre-luc Cantin, Clifford Chao, Chris Clark, Jeremy Coriell, Mike Daley, Matt Dau, Jeffrey Dean, Ben Gelb, Tara Vazir Ghaemmaghami, Rajendra Gottipati, William Gulland, Robert Hagmann, C. Richard Ho, Doug Hogberg, John Hu, Robert Hundt, Dan Hurt, Julian Ibarz, Aaron Jaffey, Alek Jaworski, Alexander Kaplan, Harshit Khaitan, Daniel Killebrew, Andy Koch, Naveen Kumar, Steve Lacy, James Laudon, James Law, Diemthu Le, Chris Leary, Zhuyuan Liu, Kyle Lucke, Alan Lundin, Gordon MacKean, Adriana Maggiore, Maire Mahony, Kieran Miller, Rahul Nagarajan, Ravi Narayanaswami, Ray Ni, Kathy Nix, Thomas Norrie, Mark Omernick, Narayana Penukonda, Andy Phelps, Jonathan Ross, Matt Ross, Amir Salek, Emad Samadiani, Chris Severn, Gregory Sizikov, Matthew Snelham, Jed Souter, Dan Steinberg, Andy Swing, Mercedes Tan, Gregory Thorson, Bo Tian, Horia Toma, Erick Tuttle, Vijay Vasudevan, Richard Walter, Walter Wang, Eric Wilcox, and Doe Hyun Yoon. In-Datacenter Performance Analysis of a Tensor Processing Unit. In Proceedings of the 44th Annual International Symposium on Computer Architecture ISCA, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 667, + 504, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 504, + 691 + ], + "type": "text", + "content": "Najoung Kim, Sebastian Schuster, and Shubham Toshniwal. Code pretraining improves entity tracking abilities of language models. arXiv preprint arXiv:2405.21068, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, Dzmitry Bahdanau, Leandro von Werra, and Harm de Vries. The stack: 3 tb of permissively licensed source code. Preprint, 2022." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 150 + ], + "type": "text", + "content": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466, 2019. doi: 10.1162/tacl_a_00276. URL https://doi.org/10.1162/tacl_a_00276." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 155, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 155, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 506, + 212 + ], + "type": "text", + "content": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6086-6096, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1612. URL https://www.aclweb.org/anthology/P19-1612." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 217, + 504, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 504, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 504, + 252 + ], + "type": "text", + "content": "Raymond Li, Loubna Ben Allal, Yangtian Zi, Niklas Muennighoff, Denis Kocetkov, Chenghao Mou, Marc Marone, Christopher Akiki, Jia Li, Jenny Chim, et al. Starcoder: may the source be with you! arXiv preprint arXiv:2305.06161, 2023a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 256, + 504, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 256, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 106, + 256, + 504, + 281 + ], + "type": "text", + "content": "Yuanzhi Li, Sebastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023b." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 286, + 504, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 286, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 504, + 320 + ], + "type": "text", + "content": "Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, et al. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 326, + 504, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 326, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 504, + 350 + ], + "type": "text", + "content": "Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. G-eval: Nlg evaluation using gpt-4 with better human alignment, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 355, + 504, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 355, + 504, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 504, + 400 + ], + "type": "text", + "content": "Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. arXiv, abs/2305.13169, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 406, + 503, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 503, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 503, + 430 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization, 2019. URL https://arxiv.org/abs/1711.05101." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 435, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 435, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 435, + 504, + 460 + ], + "type": "text", + "content": "Anton Lozhkov, Loubna Ben Allal, Leandro von Werra, and Thomas Wolf. Fineweb-edu, May 2024. URL https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 464, + 504, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 504, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 504, + 488 + ], + "type": "text", + "content": "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help llms reasoning? arXiv preprint arXiv:2309.16298, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 493, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 504, + 550 + ], + "type": "text", + "content": "Aman Madaan, Dheeraj Rajagopal, Niket Tandon, Yiming Yang, and Antoine Bosselut. Conditional set generation using seq2seq models. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 4874-4896, Abu Dhabi, United Arab Emirates, December 2022a. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.324." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 555, + 504, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 504, + 579 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 504, + 579 + ], + "type": "text", + "content": "Aman Madaan, Shuyan Zhou, Uri Alon, Yiming Yang, and Graham Neubig. Language models of code are few-shot commonsense learners. arXiv preprint arXiv:2210.07128, 2022b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "type": "text", + "content": "Max Marion, Ahmet Üstün, Luiza Pozzobon, Alex Wang, Marzieh Fadaee, and Sara Hooker. When less is more: Investigating data pruning for pretraining llms at scale, 2023. URL https:// arxiv.org/abs/2309.04564." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 625, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 504, + 692 + ], + "type": "text", + "content": "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and cloze evaluation for deeper understanding of commonsense stories. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 839-849, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-1098. URL https://aclanthology.org/N16-1098." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Niklas Muennighoff, Alexander M Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models. arXiv preprint arXiv:2305.16264, 2023a." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Niklas Muennighoff, Alexander M. Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models, 2023b. URL https://arxiv.org/abs/2305.16264." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 123, + 506, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 123, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 506, + 224 + ], + "type": "text", + "content": "Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. Crosslingual generalization through multitask finetuning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15991-16111, Toronto, Canada, July 2023c. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.891. URL https://aclanthology.org/2023.acl-long.891." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 231, + 504, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 504, + 275 + ], + "type": "text", + "content": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35: 27730-27744, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 284, + 504, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 284, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 504, + 351 + ], + "type": "text", + "content": "Jupinder Parmar, Shrimai Prabhumoye, Joseph Jennings, Mostofa Patwary, Sandeep Subramanian, Dan Su, Chen Zhu, Deepak Narayanan, Aastha Jhunjunwala, Ayush Dattagupta, Vibhu Jawa, Jiwei Liu, Ameya Mahabaleshwarkar, Osvald Nitski, Annika Brundyn, James Maki, Miguel Martinez, Jiaxuan You, John Kamalu, Patrick LeGresley, Denys Fridman, Jared Casper, Ashwath Aithal, Oleksii Kuchaiev, Mohammad Shoeybi, Jonathan Cohen, and Bryan Catanzaro. Nemotron-4 15b technical report, 2024. URL https://arxiv.org/abs/2402.16819." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 358, + 504, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 504, + 392 + ], + "type": "text", + "content": "Guilherme Penedo, Quentin Malartic, Daniel Hesslow, Ruxandra Cojocaru, Alessandro Cappelli, Hamza Alobeidli, Baptiste Pannier, Ebtesam Almazrouei, and Julien Launay. The refined web dataset for falcon llm: Outperforming curated corpora with web data, and web data only, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 399, + 504, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 504, + 424 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 504, + 424 + ], + "type": "text", + "content": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "text", + "content": "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 472, + 505, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 505, + 650 + ], + "type": "text", + "content": "Jack W. Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, Eliza Rutherford, Tom Hennigan, Jacob Menick, Albin Cassirer, Richard Powell, George van den Driessche, Lisa Anne Hendricks, Maribeth Rauh, Po-Sen Huang, Amelia Glaese, Johannes Welbl, Sumanth Dathathri, Saffron Huang, Jonathan Uesato, John Mellor, Irina Higgins, Antonia Creswell, Nat McAleese, Amy Wu, Erich Olsen, Siddhant Jayakumar, Elena Buchatskaya, David Budden, Esme Sutherland, Karen Simonyan, Michela Paganini, Laurent Sifre, Lena Martens, Xiang Lorraine Li, Adhiguna Kuncoro, Aida Nematzadeh, Elena Gribovskaya, Domenic Donato, Angeliki Lazaridou, Arthur Mensch, Jean-Baptiste Lespiau, Maria Tsimpoukelli, Nikolai Grigorev, Doug Fritz, Thibault Sottiaux, Mantas Pajarskas, Toby Pohlen, Zhitao Gong, Daniel Toyama, Cyprien de Masson d'Autume, Yu-jia Li, Tayfun Terzi, Vladimir Mikulik, Igor Babuschkin, Aidan Clark, Diego de Las Casas, Aurelia Guy, Chris Jones, James Bradbury, Matthew Johnson, Blake Hechtman, Laura Weidinger, Iason Gabriel, William Isaac, Ed Lockhart, Simon Osindero, Laura Rimell, Chris Dyer, Oriol Vinyals, Kareem Ayoub, Jeff Stanway, Lorrayne Bennett, Demis Hassabis, Koray Kavukcuoglu, and Geoffrey Irving. Scaling language models: Methods, analysis & insights from training gopher, 2022. URL https://arxiv.org/abs/2112.11446." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 656, + 504, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 504, + 690 + ], + "type": "text", + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, abs/1910.10683, 2020." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In The Second Tiny Papers Track at ICLR 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 505, + 148 + ], + "type": "text", + "content": "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 156, + 504, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 156, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 504, + 178 + ], + "type": "text", + "content": "Maarten Sap, Hannah Rashkin, Derek Chen, Ronan LeBras, and Yejin Choi. Socialiaq: Common-sense reasoning about social interactions. arXiv, abs/1904.09728, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 186, + 505, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 505, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 505, + 243 + ], + "type": "text", + "content": "Minjoon Seo, Tom Kwiatkowski, Ankur Parikh, Ali Farhadi, and Hannaneh Hajishirzi. Phrase-indexed question answering: A new challenge for scalable document comprehension. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 559-564, Brussels, Belgium, October-November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-1052. URL https://aclanthology.org/D18-1052." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 250, + 504, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 250, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 504, + 285 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, YK Li, Yu Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 293, + 504, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 315 + ], + "type": "text", + "content": "Noam Shazeer. Glu variants improve transformer, 2020. URL https://arxiv.org/abs/2002.05202." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 323, + 504, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 323, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 504, + 357 + ], + "type": "text", + "content": "Luísa Shimabucoro, Sebastian Ruder, Julia Kreutzer, Marzieh Fadaee, and Sara Hooker. Llm see, llm do: Guiding data generation to target non-differentiable objectives, 2024. URL https://arxiv.org/abs/2407.01490." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 365, + 505, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 365, + 505, + 454 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 505, + 454 + ], + "type": "text", + "content": "Shivalika Singh, Freddie Vargus, Daniel Dsouza, Borje F. Karlsson, Abinaya Mahendiran, Wei-Yin Ko, Herumb Shandilya, Jay Patel, Deividas Mataciunas, Laura OMahony, Mike Zhang, Ramith Hettiarachchi, Joseph Wilson, Marina Machado, Luisa Souza Moura, Dominik Krzeminski, Hakimeh Fadaei, Irem Ergun, Ifeoma Okoh, Aisha Alaagib, Oshan Mudannayake, Zaid Alyafeai, Vu Minh Chien, Sebastian Ruder, Surya Guthikonda, Emad A. Alghamdi, Sebastian Gehrmann, Niklas Muennighoff, Max Bartolo, Julia Kreutzer, Ahmet Üstün, Marzieh Fadaee, and Sara Hooker. Aya dataset: An open-access collection for multilingual instruction tuning. arXiv preprint arXiv:2402.06619, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 463, + 504, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 463, + 504, + 497 + ], + "spans": [ + { + "bbox": [ + 106, + 463, + 504, + 497 + ], + "type": "text", + "content": "Daria Soboleva, Faisal Al-Khateeb, Robert Myers, Jacob R Steeves, Joel Hestness, and Nolan Dey. SlimPajama: A 627B token cleaned and deduplicated version of RedPajama, 2023. URL https://huggingface.co/datasets/cerebras/SlimPajama-627B." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 504, + 504, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 504, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 504, + 539 + ], + "type": "text", + "content": "Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 546, + 505, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 546, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 505, + 690 + ], + "type": "text", + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models. arXiv, abs/2307.09288, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "text", + "content": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Superglue: A stickier benchmark for general-purpose language understanding systems, 2020. URL https://arxiv.org/abs/1905.00537." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "content": "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingofflolz/mesh-transformer-jax, May 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 505, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 112, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 505, + 168 + ], + "type": "text", + "content": "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 13484-13508, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.acl-long.754." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 176, + 504, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 176, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 176, + 504, + 209 + ], + "type": "text", + "content": "Johannes Welbl, Nelson F Liu, and Matt Gardner. Crowdsourcing multiple choice science questions. pp. 94-106, September 2017. doi: 10.18653/v1/W17-4413. URL https://aclanthology.org/W17-4413." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 217, + 440, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 440, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 440, + 229 + ], + "type": "text", + "content": "Wikipedia. Wikipedia downloads. URL https://dumps.wikipedia.org." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 236, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 732 + ], + "type": "text", + "content": "BigScience Workshop, :, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagne, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, Jonathan Tow, Alexander M. Rush, Stella Biderman, Albert Webson, Pawan Sasanka Ammanamchi, Thomas Wang, Benoit Sagot, Niklas Muennighoff, Albert Villanova del Moral, Olatunj Ruwase, Rachel Bawden, Stas Bekman, Angelina McMillan-Major, Iz Beltagy, Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Ortiz Suarez, Victor Sanh, Hugo Laurencion, Yacine Jernite, Julien Launay, Margaret Mitchell, Colin Raffel, Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav, Canwen Xu, Chenghao Mou, Chris Emezue, Christopher Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa Adelani, Dragomir Radev, Eduardo Gonzalez Ponferrada, Efrat Levkovizh, Ethan Kim, Eyal Bar Natan, Francesco De Toni, Gerard Dupont, German Kruszewski, Giada Pistilli, Hady Elsahar, Hamza Benyamina, Hieu Tran, Ian Yu, Idris Abdulmumin, Isaac Johnson, Itziar Gonzalez-Dios, Javier de la Rosa, Jenny Chim, Jesse Dodge, Jian Zhu, Jonathan Chang, Jorg Frohberg, Joseph Tobing, Joydeep Bhattacharjee, Khalid Almubarak, Kimbo Chen, Kyle Lo, Leandro Von Werra, Leon Weber, Long Phan, Loubna Ben allal, Ludovic Tanguy, Manan Dey, Manuel Romero Munoz, Maraim Masoud, Maria Grandury, Mario Saško, Max Huang, Maximin Coavoux, Mayank Singh, Mike Tian-Jian Jiang, Minh Chien Vu, Mohammad A. Jauhar, Mustafa Ghaleb, Nishant Subramani, Nora Kassner, Nurulaqilla Khamis, Olivier Nguyen, Omar Espejel, Ona de Gibert, Paulo Villegas, Peter Henderson, Pierre Colombo, Priscilla Amuok, Quentin Lhoest, Rheza Harliman, Rishi Bommasani, Roberto Luis López, Rui Ribeiro, Salomey Osei, Sampa Pyysalo, Sebastian Nagel, Shamik Bose, Shamsuddeen Hassan Muhammad, Shanya Sharma, Shayne Longpre, Somaieh Nikpoor, Stanislav Silberberg, Suhas Pai, Sydney Zink, Tiago Timponi Torrent, Timo Schick, Tristan Thrush, Valentin Danchev Vassilina Nikoulina Veronika Laippala Violette Lepercq,Vrinda Prabhu,Zaid Alyafeai Zeerak Talat Arun Raja Benjamin HeinzerlingChenglei Si Davut Emre Taşar Elizabeth Salesky Sabrina J.Mielke Wilson Y. Lee Abheesht Sharma Andrea Santilli Antoine Chaffin Arnaud Stiegler Debajyoti Datta Eliza Szczechla Gunjan Chhablani Han WangHarshit Pandey,Hendrik Strobelt Jason Alan Fries Jos RozenLeo Gao Lintang Sutawika,M Saiful Bari Maged S.Al-shaibani Matteo Manica Nihal Nayak Ryan Teehan Samuel Albanie Sheng Shen Srulik Ben-David Stephen H.Bach Taewoon KimTali Bers Thibault Fevry Trishala Neeraj Urmish Thakker Vikas Raunak Xiangru Tang Zheng-Xin Yong Zhiqing Sun Shaked Brody Yallow Uri Hadar Tojarieh Adam Roberts Hyung Won Chung Jaesung TaoJason Phang Ofir Press Conglong Li Deepak Narayanan Hatim Bourfoune Jared Casper Jeff Rasley Max Ryabinin Mayank Mishra Minjia Zhang Mohammad Shoeybi Myriam Peyrounette Nicolas Patry Nouamane Tazi Omar Sanseviero Patrick von Platen Pierre Cornette Pierre François Lavallee Remi Lacroix Samyam Rajbhandari Sanchit Gandhi Shaden Smith Stephane Requena Suraj Patil Tim Dettmers Ahmed Baruwa Amanpreet Singh Anastasia Cheveleva Anne-Laure Ligozat Arjun Subramonian Aurélie Névol Charles Lovering Dan Garrette Deepak Tunuguntla Ehud Reiter Ekaterina Taktasheva Ekaterina Voloshina Eli Bogdanov Genta Indra Winata Hailey Schoelkopf Jan-Christoph Kalo Jekaterina Novikova Jessica Zosa Forde Jordan Clive Jungo Kasai Ken Kawamura Liam Hazan Marine Carpuat Miruna Clinciu Najoung Kim Newton Cheng Oleg Serikov Omer Antverg Oskar van der Wal Rui Zhang Ruochen Zhang Sebastian Gehrmann Shachar Mirkin Shani Pais Tatiana Shavrina Thomas Scialom Tian Yun Tomasz Limisiewicz Verena Rieser Vitaly Protasov Vladislav Mikhailov Yada Pruksachatkun Yonatan Belinkov Zachary Bamberger Zdenek Kasner Alice Rueda Amanda Pestana Amir Feizpour Armar Khan Amy Faranak Ana Santos Anthony" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 115, + 82, + 504, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 504, + 379 + ], + "type": "text", + "content": "Hevia, Antigona Unldreaj, Arash Aghagol, Arezoo Abdollahi, Aycha Tammour, Azadeh Haji-Hosseini, Bahareh Behroozzi, Benjamin Ajibade, Bharat Saxena, Carlos Munoz Ferrandis, Daniel McDuff, Danish Contractor, David Lansky, Davis David, Douwe Kiela, Duong A. Nguyen, Edward Tan, Emi Baylor, Ezinwanne Ozoani, Fatima Mirza, Frankline Ononiwu, Habib Rezanejad, Hessie Jones, Indrani Bhattacharya, Irene Solaiman, Irina Sedenko, Isar Nejadgholi, Jesse Passmore, Josh Seltzer, Julio Bonis Sanz, Livia Dutra, Mairon Samagaio, Maraim Elbadri, Margot Mieskes, Marissa Gerchick, Martha Akinlolu, Michael McKenna, Mike Qiu, Muhammed Ghauri, Mykola Burynok, Nafis Abrar, Nazneen Rajani, Nour Elkott, Nour Fahmy, Olanrewaju Samuel, Ran An, Rasmus Kromann, Ryan Hao, Samira Alizadeh, Sarmad Shubber, Silas Wang, Sourav Roy, Sylvain Viguier, Thanh Le, Tobi Oyebade, Trieu Le, Yoyo Yang, Zach Nguyen, Abhinav Ramesh Kashyap, Alfredo Palasciano, Alison Callahan, Anima Shukla, Antonio Miranda-Escalada, Ayush Singh, Benjamin Beilharz, Bo Wang, Caio Brito, Chenxi Zhou, Chirag Jain, Chuxin Xu, Clémentine Fourrier, Daniel León Periñan, Daniel Molano, Dian Yu, Enrique Manjavacas, Fabio Barth, Florian Fuhrimann, Gabriel Altay, Giyaseddin Bayrak, Gully Burns, Helena U. Vrabec, Imane Bello, Ishani Dash, Jihyun Kang, John Giorgi, Jonas Golde, Jose David Posada, Karthik Rangasai Sivaraman, Lokesh Bulchandani, Lu Liu, Luisa Shinzato Madeleine Hahn de Bykhovetz, Maiko Takeuchi, Marc Pamies, Maria A Castillo, Marianna Nezhurina, Mario Sänger, Matthias Samwald, Michael Cullan, Michael Weinberg, Michiel De Wolf, Mina Mihaljcic, Minna Liu Moritz Freidank Myungsun KangNatasha Seelam Nathan Dahlberg,Nicholas Michio Broad,Nikolaus MuellnerPascale Fung Patrick Haller Ramya Chandrasekhar,Renata Eisenberg,Robert Martin,Rodrigo Canalli,Rosaline Su,Ruisi Su,Samuel Cahyawijaya,Samuele GardaShlok S Deshmukh,Shubanshu Mishra,Sid Kblawi,Simon Ott Sinee Sang-aroonsiri,Srishti Kumar Stefan Schweter,Sushil Bharati Tanmay LaudTheo Gigant Tomoya Kainuma,Wojciech Kusa,Yanis Labrak,Yash Shailesh Bajaj,Yash Venkatraman Yifan Xu,Yingxin XuYu Xu,Zhe TanZhongli XieZifan Ye,Mathilde Bras,Younes Belkada and Thomas Wolf.Bloom: A 176b-parameter open-access multilingual language model, 2023. URL https://arxiv.org/abs/2211.05100." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 386, + 504, + 594 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 386, + 504, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 386, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 504, + 409 + ], + "type": "text", + "content": "Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? arXiv, abs/1905.07830, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 416, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 416, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 504, + 460 + ], + "type": "text", + "content": "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, Todor Mihaylov, Myle Ott, Sam Shleifer, Kurt Shuster, Daniel Simig, Punit Singh Koura, Anjali Sridhar, Tianlu Wang, and Luke Zettlemoyer. Opt: Open pre-trained transformer language models, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 468, + 504, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 504, + 502 + ], + "type": "text", + "content": "Xinlu Zhang, Zhiyu Zoey Chen, Xi Ye, Xianjun Yang, Lichang Chen, William Yang Wang, and Linda Ruth Petzold. Unveiling the impact of coding data instruction fine-tuning on large language models reasoning. arXiv preprint arXiv:2405.20535, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 509, + 504, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 509, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 504, + 542 + ], + "type": "text", + "content": "Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 548, + 504, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 504, + 594 + ], + "type": "text", + "content": "Ahmet Üstün, Viraat Aryabumi, Zheng-Xin Yong, Wei-Yin Ko, Daniel D'souza, Gbemileke Onilude, Neel Bhandari, Shivalika Singh, Hui-Lee Ooi, Amr Kayid, Freddie Vargus, Phil Blunsom, Shayne Longpre, Niklas Muennighoff, Marzieh Fadaee, Julia Kreutzer, and Sara Hooker. Aya model: An instruction finetuned open-access multilingual language model, 2024." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 308, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 308, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 308, + 94 + ], + "type": "text", + "content": "ETHICS STATEMENT AND LIMITATIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "type": "text", + "content": "While we systematically study the impact of code data on downstream natural language tasks, we do not study its impact on safety and bias. Additionally, given the nature of pre-training and the number of ablations we have conducted we were limited by the scale of larger model sizes due to prohibitive compute costs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 166, + 204, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 166, + 204, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 166, + 204, + 178 + ], + "type": "text", + "content": "REPRODUCIBILITY" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 191, + 504, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 504, + 225 + ], + "type": "text", + "content": "We provide details about our data mixture (Section 2.1), data filtering (Appendix C.1, C.2, C.3), evaluation (Section 2.2, Appendix A) and training (Section 2.3) setups. We believe these details provide a clear picture on how to obtain our data setup, model ablations and evaluation results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 240, + 244, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 240, + 244, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 244, + 253 + ], + "type": "text", + "content": "A EVALUATION DETAILS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 265, + 504, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 288 + ], + "type": "text", + "content": "We briefly describe the details of our evaluation benchmarks and the composite datasets used for each category below:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 297, + 506, + 481 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 129, + 297, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 297, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 129, + 297, + 504, + 342 + ], + "type": "text", + "content": "1. World knowledge. These benchmarks aim to measure world knowledge, testing knowledge memorization, retrieval, and question answering capability given context. We include Natural Questions Open (Kwiatkowski et al., 2019), and TriviaQA (Joshi et al., 2017) as the datasets. We report the average exact match scores for both these benchmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 129, + 344, + 506, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 344, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 129, + 344, + 506, + 433 + ], + "type": "text", + "content": "2. Natural language reasoning. The Natural language (NL) reasoning suite consists of 11 benchmarks that involve natural language based reasoning such as Question Answering (Clark et al., 2019; Seo et al., 2018; Welbl et al., 2017; Sap et al., 2019; Choi et al., 2018), natural language inference (NLI) (Wang et al., 2020; de Marneffte et al., 2019; Wang et al., 2020), sentence completion (Mostafazadeh et al., 2016; Zellers et al., 2019), co-reference resolution (Sakaguchi et al., 2019) and general intelligence (Clark et al., 2018). We include a full list of the constituent benchmarks in Table 1. We report the average accuracy scores across all benchmarks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 129, + 435, + 506, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 435, + 506, + 481 + ], + "spans": [ + { + "bbox": [ + 129, + 435, + 506, + 481 + ], + "type": "text", + "content": "3. Code. While our main focus is general performance, we also want to measure any changes to code generation performance. For code benchmarks, we focus on the function completion task. We evaluate on HumanEval-Python (Chen et al., 2022) and MBPP (Austin et al., 2021). We report the average pass@1 scores of these benchmarks." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 499, + 383, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 499, + 383, + 512 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 383, + 512 + ], + "type": "text", + "content": "B SUMMARY RESULTS FOR PRE-TRAINING RECIPES" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 524, + 263, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 524, + 263, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 263, + 536 + ], + "type": "text", + "content": "Summary results are shown in Table 2." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 552, + 271, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 552, + 271, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 271, + 564 + ], + "type": "text", + "content": "C CODE-DATASETS FILTERING" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 576, + 211, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 211, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 211, + 588 + ], + "type": "text", + "content": "C.1 QUALITY FILTERS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 597, + 506, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 642 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 642 + ], + "type": "text", + "content": "In addition to the dedduplication and quality filtering applied on the GitHub scrapes by Starcoder for The Stack dataset (Li et al., 2023a), we apply filters to remove documents with greater than 1000 float numbers, with instances of the string " + }, + { + "bbox": [ + 104, + 597, + 506, + 642 + ], + "type": "inline_equation", + "content": "0 \\times" + }, + { + "bbox": [ + 104, + 597, + 506, + 642 + ], + "type": "text", + "content": " that are lists of top-level domains, and with 'generated by' in the first 400 characters" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 654, + 432, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 654, + 432, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 432, + 666 + ], + "type": "text", + "content": "C.2 PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 675, + 471, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 675, + 471, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 471, + 687 + ], + "type": "text", + "content": "Programming languages included in our version of The Stack dataset are present in Table 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 700, + 503, + 712 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 700, + 503, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 503, + 712 + ], + "type": "text", + "content": "C.3 MARKUP-STYLE PROGRAMMING LANGUAGES PRESENT IN WEB-BASED CODE DATASET" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 720, + 440, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 440, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 440, + 732 + ], + "type": "text", + "content": "Markup-style languages included in our version of The Stack dataset are in Table 4" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 79, + 486, + 277 + ], + "blocks": [ + { + "bbox": [ + 127, + 79, + 486, + 277 + ], + "lines": [ + { + "bbox": [ + 127, + 79, + 486, + 277 + ], + "spans": [ + { + "bbox": [ + 127, + 79, + 486, + 277 + ], + "type": "table", + "html": "
TaskDatasetMetric
WORLD KNOWLEDGE TASKS
Question AnsweringTriviaQA (Joshi et al., 2017)0-shotAcc.
NaturalQuestionsOpen (Lee et al., 2019)0-shotAcc.
NATURAL LANGUAGE REASONING
Question AnsweringBoolQ (Clark et al., 2019)0-shotAcc.
PiQA (Seo et al., 2018)0-shotAcc.
SciQ (Welbl et al., 2017)0-shotAcc.
SocialQA (Sap et al., 2019)0-shotAcc.
QUAC (Choi et al., 2018)0-shotAcc.
Natural Language InferenceSuperGLUE-CB (Wang et al., 2020; de Marneffé et al., 2019)0-shotAcc.
SuperGLUE-COPA (Wang et al., 2020)0-shotAcc.
Sentence CompletionStoryCloze (Mostafazadeh et al., 2016)0-shotAcc.
HellaSwag (Zellers et al., 2019)0-shotAcc.
Coreference ResolutionWinogrande (Sakaguchi et al., 2019)0-shotAcc.
General IntelligenceARC-Easy (Clark et al., 2018)0-shotAcc.
TEXT GENERATION
Open-Ended GenerationDolly-200 (English) (Singh et al., 2024)0-shotwin-rate
CODE GENERATION
Function completionHumanEval (Chen et al., 2021)0-shotpass@1
MBPP (Austin et al., 2021)0-shotpass@1
", + "image_path": "370a2828b37def0568aff41c3df22bc6420744c2a2dc57342146cd3cfb410a08.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 127, + 329, + 485, + 462 + ], + "blocks": [ + { + "bbox": [ + 104, + 284, + 504, + 319 + ], + "lines": [ + { + "bbox": [ + 104, + 284, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 284, + 504, + 319 + ], + "type": "text", + "content": "Table 1: Datasets considered for evaluation: We conduct extensive evaluations across benchmarks detailed above. These provide valuable proxies for performance in natural language reasoning, world knowledge, open ended text generation, and code generation tasks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 329, + 485, + 462 + ], + "lines": [ + { + "bbox": [ + 127, + 329, + 485, + 462 + ], + "spans": [ + { + "bbox": [ + 127, + 329, + 485, + 462 + ], + "type": "table", + "html": "
Model VariantRecipeToken CountNatural LanguageCodeTotal Avg.
TextCodeReason.Know.Avg.
TEXT-ONLYPre-training400B-49.09.529.20.419.6
Cooldown+32B+8B54.111.132.64.423.2
BALANCED-ONLYPre-training200B200B51.88.130.09.023.0
Cooldown+32B+8B53.211.132.18.424.2
BALANCED→TEXTPre-training Init.100B100B52.07.429.67.822.4
Continue Pre-train.+180B+20B53.09.931.54.822.6
Cooldown+32B+8B54.910.932.95.823.9
CODE→TEXTPre-training Init.-200B44.71.523.115.520.6
Continue Pre-train.+180B+20B53.39.531.44.122.3
Cooldown+32B+8B52.110.331.27.523.3
", + "image_path": "e436392147d7301cae44171b952a680b8907968533a98587efe33a7df44cfcdc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "type": "text", + "content": "Table 2: Model variants with the corresponding pre-training recipes: Pre-training recipes include initial pre-training, continued pre-training, and cooldown phases. Balanced " + }, + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "type": "text", + "content": " Text achieves the best NL performance while Balanced-only performs significantly better in code generation." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 523, + 408, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 523, + 408, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 408, + 536 + ], + "type": "text", + "content": "D LLM JUDGE PROMPT AND PREAMBLE FOR WIN-RATES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 548, + 149, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 149, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 149, + 559 + ], + "type": "text", + "content": "Preamble" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 566, + 497, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 566, + 497, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 497, + 588 + ], + "type": "text", + "content": "You are a helpful following assistant whose goal is to select the preferred" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 588, + 375, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 588, + 375, + 600 + ], + "spans": [ + { + "bbox": [ + 107, + 588, + 375, + 600 + ], + "type": "text", + "content": "(least wrong) output for a given instruction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 605, + 142, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 142, + 616 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 142, + 616 + ], + "type": "text", + "content": "Prompt" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 621, + 466, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 466, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 466, + 643 + ], + "type": "text", + "content": "Which of the following answers is the best one for the given instruction." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 644, + 345, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 345, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 345, + 654 + ], + "type": "text", + "content": "A good answer should follow these rules:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 655, + 500, + 699 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 105, + 655, + 323, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 323, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 323, + 666 + ], + "type": "text", + "content": "1) It should have correct reasoning," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 666, + 411, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 411, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 411, + 677 + ], + "type": "text", + "content": "2) It should answer the request in the instruction," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 677, + 500, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 500, + 688 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 500, + 688 + ], + "type": "text", + "content": "3) It should be factually correct and semantically comprehensible," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 689, + 399, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 399, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 399, + 699 + ], + "type": "text", + "content": "4) It should be grammatically correct and fluent." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 710, + 257, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 257, + 721 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 257, + 721 + ], + "type": "text", + "content": "Instruction: instruction" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 198, + 80, + 413, + 348 + ], + "blocks": [ + { + "bbox": [ + 198, + 80, + 413, + 348 + ], + "lines": [ + { + "bbox": [ + 198, + 80, + 413, + 348 + ], + "spans": [ + { + "bbox": [ + 198, + 80, + 413, + 348 + ], + "type": "table", + "html": "
Language NameProportion of total code documents
java15.54
javascript15.29
php12.46
python9.60
c-sharp8.30
typescript7.92
c6.63
cpp4.91
go3.49
ruby2.69
shell1.82
kotlin1.76
Swift1.52
Vue1.48
rust1.00
scala0.94
JSX0.83
sql0.74
dart0.72
makefile0.53
lua0.47
haskell0.45
smalltalk0.43
tex0.37
clojure0.10
", + "image_path": "f4a3e903a014576d8d0554a3c729ae6487310106d13cba7cbf9076c780ed64d3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 186, + 392, + 425, + 511 + ], + "blocks": [ + { + "bbox": [ + 148, + 355, + 463, + 369 + ], + "lines": [ + { + "bbox": [ + 148, + 355, + 463, + 369 + ], + "spans": [ + { + "bbox": [ + 148, + 355, + 463, + 369 + ], + "type": "text", + "content": "Table 3: Programming languages included in our version of The Stack dataset" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 186, + 392, + 425, + 511 + ], + "lines": [ + { + "bbox": [ + 186, + 392, + 425, + 511 + ], + "spans": [ + { + "bbox": [ + 186, + 392, + 425, + 511 + ], + "type": "table", + "html": "
Language NameProportion of total code documents
markdown54.23
yaml10.77
json9.97
html8.57
css6.86
SCSS5.84
restructuredtext2.26
TOML1.25
rmarkdown0.02
Sass0.22
", + "image_path": "64918e1134cae412b8c538546ecc7dffba51409b6391f52ca845a844ccf1fdce.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 148, + 519, + 463, + 533 + ], + "lines": [ + { + "bbox": [ + 148, + 519, + 463, + 533 + ], + "spans": [ + { + "bbox": [ + 148, + 519, + 463, + 533 + ], + "type": "text", + "content": "Table 4: Markup-style languages included in our version of The Stack dataset" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "type": "code", + "bbox": [ + 104, + 567, + 492, + 734 + ], + "blocks": [ + { + "bbox": [ + 104, + 567, + 492, + 734 + ], + "lines": [ + { + "bbox": [ + 104, + 567, + 492, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 492, + 734 + ], + "type": "text", + "content": "Answer (A): completion_a \nAnswer (B): completion_b \nFIRST provide a concise comparison of the two answers which explains \nwhich answer you prefer and why. \nSECOND, on a new line, state exactly one of 'Preferred: Answer (A)' or 'Preferred: Answer (B)' to indicate your choice of preferred response. \nYour response should use the format: \nComparison: \nPreferred: <'Answer (A)' or 'Answer (B)'>" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 422, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 422, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 422, + 94 + ], + "type": "text", + "content": "E GENERATIVE WIN-RATES FOR IMPACT OF INITIALIZATION" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 147, + 110, + 318, + 250 + ], + "blocks": [ + { + "bbox": [ + 147, + 110, + 318, + 250 + ], + "lines": [ + { + "bbox": [ + 147, + 110, + 318, + 250 + ], + "spans": [ + { + "bbox": [ + 147, + 110, + 318, + 250 + ], + "type": "image", + "image_path": "010a96056f4b958d13db52d5e1b4d70ef0db1a2846ea9f5553ec924432c3772b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 260, + 487, + 274 + ], + "lines": [ + { + "bbox": [ + 123, + 260, + 487, + 274 + ], + "spans": [ + { + "bbox": [ + 123, + 260, + 487, + 274 + ], + "type": "text", + "content": "Figure 8: Impact of initialization on generative quality as judged by LLM-as-a-judge." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 320, + 111, + 462, + 249 + ], + "blocks": [ + { + "bbox": [ + 320, + 111, + 462, + 249 + ], + "lines": [ + { + "bbox": [ + 320, + 111, + 462, + 249 + ], + "spans": [ + { + "bbox": [ + 320, + 111, + 462, + 249 + ], + "type": "image", + "image_path": "1f4ab37c52750ad830097a2cfd0cc3e97ed46650431d831913e3ea67803dac32.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 297, + 418, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 418, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 418, + 309 + ], + "type": "text", + "content": "F EVALUATION OF 470M COOLDOWN MODELS ON GSM8K" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 222, + 342, + 389, + 489 + ], + "blocks": [ + { + "bbox": [ + 242, + 328, + 391, + 341 + ], + "lines": [ + { + "bbox": [ + 242, + 328, + 391, + 341 + ], + "spans": [ + { + "bbox": [ + 242, + 328, + 391, + 341 + ], + "type": "text", + "content": "Mathematical Evaluation" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 222, + 342, + 389, + 489 + ], + "lines": [ + { + "bbox": [ + 222, + 342, + 389, + 489 + ], + "spans": [ + { + "bbox": [ + 222, + 342, + 389, + 489 + ], + "type": "image", + "image_path": "937a3c8cec3999d07174f00929dff46a62dc5ae9893d57e4b42b002b5eea7a9d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 192, + 495, + 217, + 507 + ], + "blocks": [ + { + "bbox": [ + 192, + 495, + 217, + 507 + ], + "lines": [ + { + "bbox": [ + 192, + 495, + 217, + 507 + ], + "spans": [ + { + "bbox": [ + 192, + 495, + 217, + 507 + ], + "type": "image", + "image_path": "2198d10d441a490dce596eb6cef60a5843dc892d8c541979cc1eb9043d2ab53d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 222, + 496, + 354, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 496, + 354, + 507 + ], + "spans": [ + { + "bbox": [ + 222, + 496, + 354, + 507 + ], + "type": "text", + "content": "text " + }, + { + "bbox": [ + 222, + 496, + 354, + 507 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 222, + 496, + 354, + 507 + ], + "type": "text", + "content": " no-code cooldown" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 193, + 510, + 217, + 522 + ], + "blocks": [ + { + "bbox": [ + 193, + 510, + 217, + 522 + ], + "lines": [ + { + "bbox": [ + 193, + 510, + 217, + 522 + ], + "spans": [ + { + "bbox": [ + 193, + 510, + 217, + 522 + ], + "type": "image", + "image_path": "ed8bfb10c083e92fdfdb6076cad5798956f4b34fe94d82ba856ec5a1e41f9886.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 222, + 511, + 353, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 511, + 353, + 522 + ], + "spans": [ + { + "bbox": [ + 222, + 511, + 353, + 522 + ], + "type": "text", + "content": "text " + }, + { + "bbox": [ + 222, + 511, + 353, + 522 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 222, + 511, + 353, + 522 + ], + "type": "text", + "content": " cooldown w/ code" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 193, + 526, + 217, + 537 + ], + "blocks": [ + { + "bbox": [ + 193, + 526, + 217, + 537 + ], + "lines": [ + { + "bbox": [ + 193, + 526, + 217, + 537 + ], + "spans": [ + { + "bbox": [ + 193, + 526, + 217, + 537 + ], + "type": "image", + "image_path": "60c004598d25c1f7ce4d49f2b2dce9ad16115bea7b6b2420dd89f392de91625b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 222, + 526, + 416, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 526, + 416, + 537 + ], + "spans": [ + { + "bbox": [ + 222, + 526, + 416, + 537 + ], + "type": "text", + "content": "balanced " + }, + { + "bbox": [ + 222, + 526, + 416, + 537 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 222, + 526, + 416, + 537 + ], + "type": "text", + "content": " text " + }, + { + "bbox": [ + 222, + 526, + 416, + 537 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 222, + 526, + 416, + 537 + ], + "type": "text", + "content": " no-code cooldown" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 193, + 541, + 217, + 553 + ], + "blocks": [ + { + "bbox": [ + 193, + 541, + 217, + 553 + ], + "lines": [ + { + "bbox": [ + 193, + 541, + 217, + 553 + ], + "spans": [ + { + "bbox": [ + 193, + 541, + 217, + 553 + ], + "type": "image", + "image_path": "bcdd78c6a09e68b0aa568fb31013bf1df36f16e3ac2d7d2ab259b146a97c8ece.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 570, + 506, + 625 + ], + "lines": [ + { + "bbox": [ + 104, + 570, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 570, + 506, + 625 + ], + "type": "text", + "content": "Figure 9: Evaluation of 470M cooldown models on GSM8K Including code in any stage of the pre-training improves performance compared to the model where no code has been seen in any of the training stages: pre-training, continual pre-training and cooldown. The most performant model in this comparison has seen code in all stages including cooldown where it leads a significant improvement (from 2.9 to 4.12, +42% relative gain)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 222, + 541, + 416, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 541, + 416, + 553 + ], + "spans": [ + { + "bbox": [ + 222, + 541, + 416, + 553 + ], + "type": "text", + "content": "balanced " + }, + { + "bbox": [ + 222, + 541, + 416, + 553 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 222, + 541, + 416, + 553 + ], + "type": "text", + "content": " text " + }, + { + "bbox": [ + 222, + 541, + 416, + 553 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 222, + 541, + 416, + 553 + ], + "type": "text", + "content": " cooldown w/ code" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_content_list.json b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f62f28f526244720f6b8483ba09aa8b7cd4d7a35 --- /dev/null +++ b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_content_list.json @@ -0,0 +1,2685 @@ +[ + { + "type": "text", + "text": "TO TACKLE ADVERSARIAL TRANSFERABILITY: A NOVEL ENSEMBLE TRAINING METHOD WITH FOURIER TRANSFORMATION", + "text_level": 1, + "bbox": [ + 171, + 99, + 826, + 170 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wanlin Zhang $^{1,3}$ , Weichen Lin $^{2}$ , Ruomin Huang $^{4}$ , Shihong Song $^{1}$ , Hu Ding $^{1*}$", + "bbox": [ + 179, + 193, + 720, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ School of Computer Science and Technology, University of Science and Technology of China \n $^{2}$ School of Artificial Intelligence and Data Science, University of Science and Technology of China \n $^{3}$ Shanghai Innovation Institute $^{4}$ Department of Computer Science, Duke University \n{ideven, linweichen, shihongsong}@mail.ustc.edu.cn \nruomin.huang@duke.edu,HUDING@ustc.edu.cn", + "bbox": [ + 179, + 209, + 834, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 316, + 545, + 330 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ensemble methods are commonly used for enhancing robustness in machine learning. However, due to the \"transferability\" of adversarial examples, the performance of an ensemble model can be seriously affected even it contains a set of independently trained sub-models. To address this issue, we propose an efficient data transformation method based on a cute \"weakness allocation\" strategy, to diversify non-robust features. Our approach relies on a fine-grained analysis on the relation between non-robust features and adversarial attack directions. Moreover, our approach enjoys several other advantages, e.g., it does not require any communication between sub-models and the construction complexity is also quite low. We conduct a set of experiments to evaluate the performance of our proposed method and compare it with several popular baselines. The results suggest that our approach can achieve significantly improved robust accuracy over most existing ensemble methods, and meanwhile preserve high clean accuracy.", + "bbox": [ + 228, + 347, + 767, + 527 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 553, + 336, + 566 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the past decade, Deep neural networks (DNNs) have achieved prominent performance on a broad range of real-world tasks (Goodfellow et al., 2016). However, a number of previous works show that DNNs are susceptible to carefully-crafted manipulations, where the manipulated data are called \"adversarial examples\" (Szegedy et al., 2014; Zhou et al., 2018; Heaven, 2019). The existence of adversarial examples severely impedes the application of DNNs in security-conscious scenarios, such as self-driving car (Rossolini et al., 2023; Zhu et al., 2021) and heath care (Newaz et al., 2020).", + "bbox": [ + 169, + 574, + 826, + 659 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The adversarial training approach (Wang et al., 2023a; Madry et al., 2018) has gained significant attention due to its great effectiveness for defending against adversarial examples. However, the adversarial training approach often necessitates considerably high training time and large training dataset (Gowal et al., 2021; Carmon et al., 2019). Moreover, it has been observed that adversarial training is likely to incur certain decline in the accuracy on clean data, which also hinders the trained model to be applied for many practical tasks (Tsipras et al., 2018; Zhang et al., 2019).", + "bbox": [ + 169, + 665, + 823, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Another important approach to enhance adversarial robustness is ensemble training (Tramér et al., 2018). But recent studies (Yang et al., 2025; Gao et al., 2022; Waseda et al., 2023) demonstrated that an adversarial example can attack different models even they are trained independently, and this phenomenon is the so-called \"transferability\" of adversarial examples. Hence, the strategy that simply integrates different models trained on the same original dataset is not sufficient to guarantee the overall robustness. To resolve this issue, different approaches have been proposed for maximizing the \"diversity\" among sub-models; in general, these approaches can be categorized into two classes: \"simultaneous training\" and \"individual training\" (Pang et al., 2019).", + "bbox": [ + 169, + 756, + 826, + 867 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To reduce the similarity among sub-models, most existing \"simultaneous training\" methods attempt to incorporate some penalty during each epoch of parameter updates. Kariyappa & Qureshi (2019)", + "bbox": [ + 169, + 875, + 823, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 189, + 910, + 333, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "proposed the \"Gradient Alignment Loss (GAL)\" method to minimize the gradient similarity between sub-models directly. Further, Yang et al. (2021) proposed the \"Transferability Reduced Smooth (TRS)\" method to improve GAL by adding a regularization term to increase the smoothness, as the models with a smoother loss function can reduce the \"transferability\" of attacks. Yang et al. (2020) aimed to isolate the adversarial vulnerability in each sub-model by distilling non-robust features, where the sub-models can then generate diverse outputs being resilient against transfer attacks. Despite their effectiveness for defending adversarial attacks, the simultaneous training methods often require a substantial amount of memory since all the sub-models need to be stored in the GPUs in the training stage, which could be prohibitive if the number of sub-models is not small (say, more than 10) and/or their sizes are large. Additionally, the information interaction in parallel training can also cause extra large communication cost.", + "bbox": [ + 169, + 103, + 826, + 256 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Different from simultaneous training, most \"individual training\" methods train each sub-model independently on a randomly transformed version of the given training dataset (Pang et al., 2019; AprilPyone & Kiya, 2021). This \"random transformation\" strategy yields diverse datasets, and thus different sub-models trained on these datasets can present diverse performances when confronting an adversarial attack. The individual training approach has higher flexibility and also requires less GPU memory, because the sub-models do not need to be stored simultaneously. Since there is no communication between sub-models, individual training methods are more suitable for parallel training with multiple GPUs. But unfortunately, recent studies showed that the commonly used random transformations (e.g. image cropping and rescaling) are not that effective under adversarial attacks (Athalye et al., 2018). The major cause of suppressing the performance of individual training is that the \"transferability\" problem is still not well addressed.", + "bbox": [ + 169, + 263, + 826, + 417 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions. To tackle the transferability obstacle, we consider developing a new data transformation method for ensemble training. Our main contributions are summarized as follows:", + "bbox": [ + 169, + 422, + 823, + 450 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- First, we propose a fine-grained analysis on the relation between non-robust features and adversarial attack directions (Section 3). Being different from the previous analysis on non-robust features, our new analysis provides us the hints that are particularly useful to allocate the potential vulnerability directions to a set of sub-models, and therefore paves the way for designing our ensemble training strategy.", + "bbox": [ + 168, + 458, + 823, + 529 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Second, we propose a data transform framework that can effectively promote the diversity of training data for robust ensemble training. The framework consists of two steps: \"frequency selection\" and \"frequency transformation\", where the frequency is based on the Fourier transformation on the images. We propose two efficient frequency transformations with low complexities on the identified non-robust features. The first one is based on simple random noise, and the second one is a cute \"targeted attack transformation\" that can modify the non-robust features more effectively (Section 4.2).", + "bbox": [ + 168, + 535, + 826, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Finally, we conduct a set of experiments to evaluate the adversarial robustness of our approach on several benchmark datasets under the widely used attack algorithms. We also compare our approach with several open-source ensemble methods, such as ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021). Compared with those baselines, the experimental results suggest that our proposed approach can significantly outperform most of them in robust accuracy and also preserve comparable high clean accuracy.", + "bbox": [ + 168, + 626, + 826, + 710 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1.1 OTHER RELATED WORKS", + "text_level": 1, + "bbox": [ + 171, + 724, + 387, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Data transformation for ensemble training. Guo et al. (2018) and Raff et al. (2019) proposed the transformations that preserve semantic information to reduce the impact of adversarial perturbation. AprilPyone & Kiya (2021) developed a training method that employs block-wise data transformations, where the input image is partitioned into blocks based on some private key. LINAC (Rusu et al., 2022) uses a predetermined random seed (private key) to initialize and train a DNN to encode the input data, serving as an encrypted input transformation.", + "bbox": [ + 169, + 750, + 826, + 834 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Adversarial attack from frequency perspective. Wang et al. (2020) explained that the model's vulnerability to small distortions may be due to its dependence on high-frequency features. Yucel et al. (2023) proposed a data augmentation method that reduces the reliance on high-frequency components, so as to improve model's robustness while maintaining clean accuracy. Maiya et al. (2021) and Bernhard et al. (2021) respectively showed that to fully understand the vulnerability, we should consider the distribution of the entire dataset with high and low frequencies.", + "bbox": [ + 169, + 840, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 PRELIMINARIES", + "text_level": 1, + "bbox": [ + 171, + 102, + 341, + 118 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Some notations. We consider the $k$ -classification task: $\\mathcal{X} \\to \\mathcal{Y}$ where $\\mathcal{X}$ is the input data space and $\\mathcal{Y} = \\{1,2,\\dots,k\\}$ is the set of labels. A soft-classification model $f(\\cdot;\\beta)$ maps each $x \\in \\mathcal{X}$ to a vector $f(x;\\beta) \\in \\mathbb{R}^k$ , where $\\beta$ is the parameter vector that needs to be trained. Its associated hard-classification model is $F(x;\\beta) = \\arg \\max_i [f(x;\\beta)]_i$ where $[\\cdot]_i$ stands for the $i$ -th coordinate. The model $f$ is usually equipped with a loss function $\\ell(f(x;\\beta), y)$ , $x \\in \\mathcal{X}$ and $y \\in \\mathcal{Y}$ , which is differentiable on $\\beta$ (e.g., cross-entropy loss). We refer to the accuracy on the original dataset as \"clean accuracy\" and the accuracy on adversarial examples as \"robust accuracy\". We denote the one-hot $k$ -dimensional vector that corresponds to the target label $y$ as $h(y)$ .", + "bbox": [ + 169, + 125, + 826, + 238 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 2.1 (Ensemble Model) Let $\\mathcal{M} = \\{f_1, \\dots, f_M\\}$ be a set of sub-models for a $k$ -classification task. We build the ensemble model with the following function:", + "bbox": [ + 169, + 243, + 826, + 273 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf _ {\\mathrm {E}} (x; \\beta_ {[ 1: M ]}) = \\frac {1}{M} \\sum_ {m \\in [ M ]} \\widehat {F} _ {m} (x; \\beta_ {m}), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 275, + 825, + 314 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\beta_{[1:M]} = \\{\\beta_m \\mid 1 \\leq m \\leq M\\}$ , and $\\widehat{F}_m(x; \\beta_m)$ is the one-hot $k$ -dimensional vector of the hard-classification model $F_m(x; \\beta_m)$ of $f_m$ .", + "bbox": [ + 169, + 321, + 823, + 356 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 2.2 (Adversarial Attack and Targeted Attack) Given a model $f(\\cdot; \\beta)$ and an input $(x, y) \\in \\mathcal{X} \\times \\mathcal{Y}$ , the adversarial attack algorithm $\\mathcal{A}$ returns a perturbed data $x'$ inside the $l_p$ ball of radius $\\epsilon > 0$ , which maximizes the loss function $\\ell(f(\\cdot; \\beta), \\cdot)$ , or minimizes the loss function $\\ell(f(\\cdot; \\beta), y_t)$ if given a target label $y_t \\neq y$ . For the latter one, we say it is a \"targeted attack from $y$ to $y_t$ \". Usually we set $p = 2$ or $p = \\infty$ .", + "bbox": [ + 169, + 367, + 825, + 438 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As mentioned in Section 1, because our proposed approach is based on Fourier transform, we introduce several necessary notations below. Given an image $x$ of size $L \\times N$ , the corresponding two-dimensional discrete Fourier transform can be written as: for any $0 \\leq u \\leq L - 1$ and $0 \\leq v \\leq N - 1$ ,", + "bbox": [ + 169, + 441, + 826, + 484 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {x} [ u, v ] = \\sum_ {s = 0} ^ {L - 1} \\sum_ {t = 0} ^ {N - 1} x [ s, t ] \\cdot e ^ {- 2 \\mathrm {j} \\pi \\left(\\frac {u _ {s}}{L} + \\frac {v t}{N}\\right)}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 493, + 825, + 534 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where “ $j$ ” denotes the imaginary unit, and “ $\\tilde{x}[u,v]$ ” is the entry in the $u$ -th column and $v$ -th row of the Fourier matrix $\\tilde{x}$ (“ $x[s,t]$ ” is defined similarly for the original image $x$ ). The pixels of the image $x$ form the time domain, and the entries of $\\tilde{x}$ form the frequency domain. For a frequency $(u,v)$ , the amplitude is the absolute value $|\\tilde{x}[u,v]|$ . We call a frequency $(u,v)$ as a frequency feature.", + "bbox": [ + 169, + 541, + 826, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 FINE-GRAINED ANALYSIS ON ENSEMBLE MODEL VULNERABILITY", + "text_level": 1, + "bbox": [ + 169, + 609, + 754, + 626 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The previous work (Ilyas et al., 2019) categorizes the features learned by a model into robust and non-robust features. It shows that adversarial vulnerability is a natural consequence of the presence of highly predictive but non-robust features. Moreover, different models trained on the same dataset often have similar non-robust features, and therefore an adversarial example usually exhibits the \"transferability\" property among them. Several other works also presented detailed discussions on the impact of non-robust features (Benz et al., 2021; Springer et al., 2021). Following those studies, a natural idea for tackling the transferability issue is to ensure that the sub-models should have diverse non-robust features. In this section, we provide a fine-grained analysis on the vulnerability of ensemble models and then conclude two important hints for achieving this \"diversity\" goal.", + "bbox": [ + 169, + 636, + 826, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The following definitions are inspired by (Ilyas et al., 2019). Note that different from the term \"feature\" used in their article, we use \"feature extractor\" instead in our paper, since \"feature\" will be particularly used for referring to image feature in time or frequency domain. Specifically, we define a \"feature extractor\" as a function that maps the input $x \\in \\mathcal{X}$ to a vector in $\\mathbb{R}^k$ . A model $f$ is composed of a set of different feature extractors, with each feature extractor focusing on distinct feature. The combination of outputs from these feature extractors forms the model's final output. We then further define the \"useful feature extractors\".", + "bbox": [ + 169, + 768, + 825, + 867 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 3.1 (Useful feature extractor) For a given data distribution $\\mathcal{D} = \\mathcal{X}\\times \\mathcal{Y}$ , a feature extractor $\\theta :\\mathcal{X}\\to \\mathbb{R}^k$ is useful, if we have", + "bbox": [ + 169, + 873, + 823, + 901 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} [ h (y) ^ {\\top} \\theta (x) ] > \\frac {1}{k}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 900, + 825, + 929 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recall that $h(y)$ is the one-hot $k$ -dimensional vector of the label $y$ . Roughly speaking, the inequality (3) implies that the expected contribution of a useful feature extractor to the model's correct prediction is higher than the average contribution over all the $k$ classes.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Definition 3.2 (robust and non-robust feature extractor) We use $\\mathcal{A}(x)$ to denote the adversarial example of a data item $x$ as described in Definition 2.2. Let $\\theta$ be a useful feature extractor. (1) We say $\\theta$ is robust if the following condition holds for any $i$ ( $1 \\leq i \\leq k$ ):", + "bbox": [ + 169, + 157, + 825, + 200 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {(x, y) \\sim \\mathcal {D} _ {i}} \\left[ \\theta (\\mathcal {A} (x)) \\right] _ {i} > \\frac {1}{k}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 200, + 588, + 229 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{D}_i$ represents the $i$ -th class data. We denote the set of these robust feature extractors as $\\Theta_{R}$ .", + "bbox": [ + 169, + 233, + 823, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(2) The remaining useful feature extractors are non-robust. We assign these non-robust extractors to $k(k - 1)$ sets: $\\{\\Theta_{i,j} \\mid 1 \\leq i \\neq j \\leq k\\}$ as follows. Initially, all these $k(k - 1)$ sets are empty. Then we go through all the non-robust feature extractors. For each non-robust $\\theta$ , there must exist at least an index \"i\" such that", + "bbox": [ + 169, + 255, + 826, + 310 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {(x, y) \\sim \\mathcal {D} _ {i}} [ \\theta (\\mathcal {A} (x)) ] _ {i} \\leq 1 / k;\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 311, + 578, + 329 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "we let $j = \\arg \\max_{s} \\mathbb{E}_{(x,y) \\sim \\mathcal{D}_i}[\\theta(\\mathcal{A}(x))]_s$ and assign $\\theta$ to $\\Theta_{i,j}$ (note that $j$ should be not equal to $i$ , or there are multiple indices achieving the maximum expectation and at least one is not equal to $i$ , since otherwise $\\sum_{s=1}^{k} \\mathbb{E}_{(x,y) \\sim \\mathcal{D}_i}[\\theta(\\mathcal{A}(x))]_s$ will less than 1). Eventually, these $k(k-1)$ sets are constructed, where each $\\Theta_{i,j}$ contains the feature extractors that are not robust to the attack from $i$ to $j$ .", + "bbox": [ + 169, + 334, + 823, + 407 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Remark 3.3 Intuitively, if a feature extractor is robust, it should have the capability to preserve its contribution to the correct prediction even under perturbation. It is also worth noting that a non-robust feature extractor $\\theta$ could be assigned to multiple $\\Theta_{i,j}s$ .", + "bbox": [ + 169, + 419, + 825, + 464 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Assume we have a standardly trained model $f$ consisting of a set of useful feature extractors, and we denote it as $\\Theta_f$ . Each of them can be classified as robust or non-robust as Definition 3.2. Similar with the formulation proposed in (Ilyas et al., 2019), we can represent the model as", + "bbox": [ + 169, + 474, + 825, + 517 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf (x) = \\sum_ {\\theta \\in \\Theta_ {R} \\cap \\Theta_ {f}} w _ {\\theta} \\theta (x) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} \\cap \\Theta_ {f}} w _ {\\theta} \\theta (x), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 523, + 825, + 566 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where each $\\theta$ has a coefficient $w_{\\theta} \\in \\mathbb{R}$ . We then conduct our analysis based on Equation (4). Some recent works reveal that adversarial training method can obtain robust model through reducing the dependence on non-robust feature extractors (Allen-Zhu & Li, 2022; Tsipras et al., 2018). However, this strategy may cause certain downgrade performance on clean accuracy (because the non-robust feature extractors also contribute to obtaining correct prediction). Fortunately, we are able to avoid this dilemma in the context of ensemble training. Namely, we just need to keep the non-robust features as diverse as possible, instead of entirely eliminating the dependence on those non-robust feature extractors. To pave the way for realizing this goal, we introduce the definition of vulnerability of ensemble model below.", + "bbox": [ + 169, + 571, + 826, + 699 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Definition 3.4 (Vulnerability of ensemble model) Suppose $f_{\\mathrm{E}}$ is an ensemble model as described in Definition 2.1, and its associated hard-classification model is denoted by $F_{\\mathrm{E}}$ : $\\forall x$ , $F_{\\mathrm{E}}(x) = \\arg \\max_{i}[f_{\\mathrm{E}}(x)]_{i}$ . Given the data distribution $\\mathcal{D} = \\mathcal{X} \\times \\mathcal{Y}$ , the vulnerability of $F_{\\mathrm{E}}$ is defined as:", + "bbox": [ + 169, + 709, + 825, + 753 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\right\\} \\right], \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 758, + 825, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbb{I}(\\cdot)$ represents the indicator function. Furthermore, for any target class $y_{t}$ , we can define the vulnerability towards $y_{t}$ as $\\mathrm{Vr}(F_{\\mathrm{E}}, y_{t}) = \\mathbb{E}_{(x,y) \\sim \\mathcal{D}}\\left[\\mathbb{I}\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_{t}\\}\\right]$ .", + "bbox": [ + 171, + 789, + 823, + 829 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The vulnerability of Definition 3.4 describes the success probability of an attack $\\mathcal{A}$ to the ensemble model $F_{\\mathrm{E}}$ . We have the following key inequality, which indicates that $\\operatorname{Vr}(F_{\\mathrm{E}})$ is bounded by considering all the attack directions, i.e.,", + "bbox": [ + 169, + 839, + 825, + 882 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) \\leq \\sum_ {y _ {t} \\in \\mathcal {Y}} \\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 887, + 825, + 921 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proof of Inequality (6) is placed in Appendix A.1. Moreover, if $F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t$ , there are at least $M / k$ sub-models returning the wrong label $y_t$ due to the pigeonhole principle. Namely, \" $\\sum_{m=1}^{M} \\mathbb{I}\\left([f_m(\\mathcal{A}(x))]_y < [f_m(\\mathcal{A}(x))]_{y_t}\\right) > \\frac{M}{k}$ \" should be a necessary condition for successfully attacking from $y$ to $y_t$ . So it implies", + "bbox": [ + 168, + 103, + 826, + 164 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right) \\leq \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left(\\sum_ {m = 1} ^ {M} \\mathbb {I} \\left([ f _ {m} (\\mathcal {A} (x)) ] _ {y} < [ f _ {m} (\\mathcal {A} (x)) ] _ {y _ {t}}\\right) > \\frac {M}{k}\\right) \\right]. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 170, + 825, + 210 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "From the upper bound (6), we can decrease the total vulnerability by reducing $\\mathrm{Vr}(F_{\\mathrm{E}},y_t)$ for each $y_{t}$ . Also, from (7) we know that $\\mathrm{Vr}(F_{\\mathrm{E}},y_{t})$ can be reduced by decreasing the chance of “ $[f_m(\\mathcal{A}(x))]_y < [f_m(\\mathcal{A}(x))]_{y_t}$ ” over $m\\in \\{1,2,\\dots ,M\\}$ . According to the Equation (4), the inequality $\\left[f_m(\\mathcal{A}(x))\\right]_y < \\left[f_m(\\mathcal{A}(x))\\right]_{y_t}$ ” can be rewritten as", + "bbox": [ + 168, + 217, + 823, + 280 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\sum_ {\\theta \\in \\Theta_ {R} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) \\right] _ {y} < \\left[ \\sum_ {\\theta \\in \\Theta_ {R} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) \\right] _ {y _ {t}}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 286, + 825, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\Theta_R^m$ and $\\Theta_{i,j}^{m}$ respectively denote the sets of robust and non-robust feature extractors for the sub-model $f_{m}$ . Moreover, the set $\\Theta_{y,y_t}^m$ should have relatively larger influence to the right-hand side of (8) than other feature extractor set $\\Theta_{y,j}^{m}$ with $j\\neq y_{t}$ , due to the outer operator “ $[\\cdot ]_{y_t}$ . Therefore, we conclude our first hint as an intuition for reducing $\\mathrm{Vr}(F_{\\mathrm{E}})$ .", + "bbox": [ + 169, + 337, + 826, + 398 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Hint (i): To decrease the vulnerability in the attack direction $y_{t}$ (i.e., each term $\\mathrm{Vr}(F_{\\mathrm{E}},y_{t})$ in the upper bound of (6)), it is reasonable to decrease the influence from the non-robust feature extractors of $\\Theta_{y,y_t}^m$ .", + "bbox": [ + 169, + 404, + 823, + 449 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Hint (i), a major difference from the previous analysis (Ilyas et al., 2019; Allen-Zhu & Li, 2022) is that, we in particular relate each attack direction $y_{t}$ to some specific non-robust feature extractors, where the benefit is that these correspondences can effectively help us to build the diverse ensemble model. Moreover, According to the principle of ensemble methods, as long as at least $M / 2 + 1$ sub-models are not successfully attacked, the ensemble model will successfully defend against the attack. So we conclude the second hint that is also important for designing our approach.", + "bbox": [ + 169, + 453, + 825, + 537 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Hint (ii): For each attack direction $y_{t}$ , we only need to consider manipulating the training data of $M / 2 + 1$ sub-models instead of all the $M$ sub-models.", + "bbox": [ + 169, + 542, + 826, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Overall, the above Hint (i) & (ii) play the key roles for inspiring our data transformation method in Section 4.", + "bbox": [ + 169, + 579, + 823, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 OUR ENSEMBLE TRAINING METHOD", + "text_level": 1, + "bbox": [ + 169, + 618, + 514, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first introduce our model and high-level idea in Section 4.1, and then elaborate on the technical details for the data transformations in Section 4.2.", + "bbox": [ + 169, + 640, + 823, + 669 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 OVERVIEW OF OUR FRAMEWORK", + "text_level": 1, + "bbox": [ + 169, + 686, + 447, + 699 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that the feature extractors of a model depend on the given training data. Namely, any modification on the features of the training data can implicitly influence the model. Thus, in this section we follow the Hint (i) & (ii) of Section 3 to design an effective data transformation method. The transformation is expected to modify the features of the training data, so as to enhance the robustness of the trained ensemble model. We train a set of distinct sub-models on the transformed training data; these sub-models can be integrated into an ensemble model being robust against adversarial attacks, while preserving the clean accuracy of each sub-model as much as possible. We use “ $\\pi_{m}$ ” to denote the transformation for the $m$ -th sub-model, $1 \\leq m \\leq M$ , and formulate the following problem by slightly modifying Definition 2.1 (replace $x$ by the adversarial example $\\mathcal{A}(x)$ for each sub-model):", + "bbox": [ + 169, + 710, + 823, + 838 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min \\mathbb {E} _ {(x, y) \\sim \\mathcal {X} \\times \\mathcal {Y}} \\ell \\left(\\frac {1}{M} \\sum_ {m \\in [ M ]} \\widehat {F} _ {m} (\\mathcal {A} (x); \\beta_ {m}), y\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 333, + 843, + 825, + 882 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\beta_{m}$ is obtained by training on the transformed data, i.e., $\\beta_{m} = \\operatorname*{argmin}_{\\beta}\\mathbb{E}_{(x,y)\\sim \\mathcal{X}\\times \\mathcal{Y}}\\ell (f_{m}(\\pi_{m}(x);\\beta),y)$ for each $m\\in \\{1,2,\\dots ,M\\}$ .", + "bbox": [ + 169, + 887, + 823, + 926 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The major challenge for solving the above problem (9) is how to design a set of appropriate transformations $\\{\\pi_m\\mid 1\\leq m\\leq M\\}$ , so that the obtained parameters $\\beta_{[1:M]}$ can yield sufficiently diverse sub-models. To address this issue, we leverage the transformation from frequency domain to guide the non-robust features of each sub-model to be as diverse as possible. Specifically, we introduce a method called \"Frequency Domain Transformation (FDT)\" for constructing the set of diverse training datasets $\\{\\pi_1(\\mathcal{X}),\\pi_2(\\mathcal{X}),\\dots ,\\pi_M(\\mathcal{X})\\}$ . FDT relies on a key \"weakness allocation\" strategy. Roughly speaking, the strategy aims to promote the diversity of the constructed datasets, and meanwhile preserve that the overall clean accuracy should not be sacrificed in the ensemble. The details are presented in the next section.", + "bbox": [ + 169, + 103, + 826, + 229 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 FREQUENCY DOMAIN TRANSFORMATION", + "text_level": 1, + "bbox": [ + 171, + 244, + 500, + 258 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Before performing the transformation, we need to select a set of non-robust features. In time domain, a simple observation is that an image feature is usually invariant under spatial translation, e.g., it can appear at different positions in images. This property causes the challenge for directly identifying and representing non-robust features in time domain. Thus we turn our attention to the frequency domain. Moreover, some previous studies on robust learning already revealed that robust and non-robust features are often deeply related to frequency domain (Wang et al., 2020; Bernhard et al., 2021; Maiya et al., 2021).", + "bbox": [ + 169, + 263, + 826, + 362 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Amplitude based selection. To identify the non-robust frequencies, a straightforward idea is to test the robustness of each individual frequency and select the non-robust ones. Nevertheless, it may take a large computational cost since the number of frequencies is high (e.g., if the input image is $64 \\times 64$ , the number of frequencies is also $64 \\times 64 \\approx 4 \\times 10^{3}$ ). We propose an easy-to-implement selection idea based on the amplitudes, since the amplitudes can be directly obtained via the Fourier transformation with low complexity. According to the previous research (Ilyas et al., 2019; Benz et al., 2021; Springer et al., 2021), a feature can be regarded as \"robust\" if it cannot be easily manipulated by small perturbations. We observe that high-amplitude frequency features usually dominate the ground truth of an image. Figure 4 in our Appendix illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image. So in our following approach, we maintain high-amplitude frequency features as \"robust features\", and select the frequencies with low amplitudes (by setting a threshold \" $\\tau$ \" to transform. Moreover, we can conveniently observe the performance changing through varying the threshold $\\tau$ in our experiment. Figure1 illustrates the amplitude-based selection.", + "bbox": [ + 169, + 368, + 826, + 592 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0d7e14b81dd688209bf8bee950ed47a465fdf9705d30a2b85cd269279036a2fb.jpg", + "image_caption": [ + "Figure 1: We use a $5 \\times 5$ image as a toy example, where the intensity of the color indicates the magnitude of the amplitude. In our amplitude-based selection, we retain the high-amplitude frequencies (i.e., the darker regions) and perform data transformations on the low-amplitude frequencies (i.e., the white regions)." + ], + "image_footnote": [], + "bbox": [ + 173, + 612, + 496, + 694 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following our frequency selection, we propose two transformation methods for promoting the diversity by using the identified non-robust features. Our first approach is from a straightforward idea, which is just to replace the non-robust features by random noise (due to the space limit, we leave the details to Appendix C). This method is very easy to implement in practice. Though it can achieve certain degree of improvement upon previous ensemble training methods, the performance is still not very promising (as shown in our experiments). To further improve the effectiveness, we propose a more sophisticated approach called \"targetedattack transformation\", which constructs a set of different \"substitute\" features through attack", + "bbox": [ + 509, + 597, + 826, + 819 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ing the images to different targeted classes, and then use them to replace the selected non-robust frequencies.", + "bbox": [ + 169, + 818, + 825, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Targeted-attack transformation: We briefly explain our intuition first. It was shown that adversarial attacks have the capability to manipulate non-robust features (Ilyas et al., 2019; Yang et al., 2020). In particular, a targeted attack as introduced in Definition 2.2, aims at modifying non-robust features that are associated with a specific target label. For instance, let us consider a data point $(x,y)$ in the original dataset $\\mathcal{X} \\times \\mathcal{Y}$ ; we set the target label as $y_{t}$ and obtain the corresponding adversarial example", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$x^{\\prime}$ ( $x^{\\prime}$ contains the modified non-robust features that are associated with $y_{t}$ ). When training a model using $(x^{\\prime}, y)$ , intuitively it can be viewed as an \"immunization\" for defending the attack from $y$ to $y_{t}$ ; and consequently, the chance that obtaining the wrong label $y_{t}$ for the data with label $y$ decreases. In other words, it becomes more difficult to attack the images with label $y$ to $y_{t}$ than to the other classes. We call the modified non-robust feature as a \"substitute\" feature derived by the targeted attack.", + "bbox": [ + 169, + 103, + 826, + 174 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Motivated by this observation, we can construct different transformations by using $k \\times (k - 1)$ targeted attacks (since each label can be attacked to be the other $k - 1$ labels); these attacks can yield different substitute features, and then we use these features to replace the corresponding non-robust features in the original dataset (based on Hint (i) in Section 3); finally, the $M$ transformed datasets are obtained via an allocation algorithm, where each substitute feature is captured by at least $M / 2 + 1$ datasets (based on Hint (ii) in Section 3). Overall, due to the completeness of the $k \\times (k - 1)$ targeted attacks, the $M$ sub-models trained on those datasets can guarantee the robustness of the final ensemble solution. We introduce some definitions for our transformation first.", + "bbox": [ + 169, + 180, + 826, + 292 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Definition 4.1 (Strengthen a dataset) Let $y_{1} \\neq y_{2} \\in \\mathcal{Y}$ . If a given training dataset $P$ contains at least one adversarial example who has the original label $y_{1}$ but is misclassified as $y_{2}$ , we say that $P$ has been strengthened by the attack direction from $y_{1}$ to $y_{2}$ (\" $\\overrightarrow{y_{1}y_{2}}$ -direction\" for short).", + "bbox": [ + 169, + 304, + 825, + 348 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In other words, if $P$ is not strengthened in $\\overrightarrow{y_1y_2}$ -direction, the model trained on $P$ is more likely to be fragile to the targeted attacks from $y_1$ to $y_2$ . Also, the dataset $P$ may have not been strengthened in multiple different directions. So we define its \"weakness set\" $\\mathcal{W} = \\{\\overrightarrow{y_1y_2} \\mid 1 \\leq y_1, y_2 \\leq k, y_1 \\neq y_2, \\text{ and } P \\text{ has not been strengthened in } \\overrightarrow{y_1y_2} \\text{-direction}\\}$ .", + "bbox": [ + 169, + 359, + 823, + 417 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Definition 4.2 (Diversity of weakness sets) Given $M$ datasets $\\{P_1, P_2, \\dots, P_M\\}$ with their corresponding weakness sets $\\{\\mathcal{W}_1, \\mathcal{W}_2, \\dots, \\mathcal{W}_M\\}$ , we define their diversity:", + "bbox": [ + 169, + 429, + 826, + 458 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {D i v} (P _ {1}, P _ {2}, \\dots , P _ {M}) = 1 - \\frac {| \\mathcal {W} _ {1} \\cap \\mathcal {W} _ {2} \\cap \\cdots \\cap \\mathcal {W} _ {M} |}{\\max \\{| \\mathcal {W} _ {1} | , | \\mathcal {W} _ {2} | , \\cdots , | \\mathcal {W} _ {M} | \\}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 464, + 696, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "It is easy to see that the higher the value $\\mathbf{Div}(P_1, P_2, \\dots, P_M)$ , the more diverse the corresponding weakness sets. A higher diversity suggests that the vulnerabilities of the $M$ sub-models trained on those datasets are more likely to be different. To achieve a nice performance in terms of both accuracy and robustness, we need to take account of the diversity function \"Div\" for designing the transformations. The basic principle is:", + "bbox": [ + 169, + 512, + 823, + 583 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "On the one hand, our transformed datasets should have a sufficiently large number of diverse substitute features, so that one adversarial attack cannot easily capture more than half of the $M$ sub-models. On the other hand, the datasets should also maintain the major information of the original input as much as possible, since otherwise the clean accuracy may decline due to the added substitute features.", + "bbox": [ + 169, + 589, + 823, + 660 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To provide an appropriate trade-off, we propose the following constrained optimization objective: let $\\mathbb{C}$ be the set of all the $\\binom{M}{\\lceil M/2 \\rceil}$ combinations of $\\lceil M/2 \\rceil$ -size subsets from $\\{1, 2, \\dots, M\\}$ , and then", + "bbox": [ + 169, + 665, + 823, + 698 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {P _ {1}, P _ {2}, \\dots , P _ {M}} \\quad \\min \\left\\{\\left| \\mathcal {W} _ {1} \\right|, \\left| \\mathcal {W} _ {2} \\right|, \\dots , \\left| \\mathcal {W} _ {M} \\right| \\right\\} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 698, + 823, + 719 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {s . t .} \\forall \\left\\{i _ {1}, i _ {2}, \\dots , i _ {\\lceil M / 2 \\rceil} \\right\\} \\in \\mathbb {C}, \\quad \\boldsymbol {D i v} \\left(P _ {i _ {1}}, P _ {i _ {2}}, \\dots , P _ {i _ {\\lceil M / 2 \\rceil}}\\right) = 1. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 723, + 823, + 741 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We maximize the objective function of (10) because we want to minimize the modification degree for each transformed dataset. Intuitively, a large weakness set indicates that the corresponding dataset is not changed significantly by the transformation, and thus the clean accuracy is likely to be well preserved. The constraint (11) guarantees that any $\\lceil M/2\\rceil$ datasets have the intersection $\\mathcal{W}_{i_1} \\cap \\mathcal{W}_{i_2} \\cap \\dots \\cap \\mathcal{W}_{i_{\\lceil M/2 \\rceil}} = \\emptyset$ , that is, they do not share any common direction. Consequently, the ensemble solution should be robust to any attack direction. To achieve this twofold goal, we design an efficient allocation strategy together with an attack-guided transformation on the training data. Specifically, the procedure consists of the following two stages.", + "bbox": [ + 169, + 744, + 826, + 857 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Stage (1): allocating the weakness sets to the sub-models. For each $\\overrightarrow{y_1y_2}$ -direction, $1 \\leq y_1, y_2 \\leq k$ , there are at most $\\lceil \\frac{M}{2} \\rceil - 1$ sets that contain this direction (due to the constraint (11)), so the sum $\\sum_{1 \\leq i \\leq M} |\\mathcal{W}_i|$ is no larger than $k(k - 1) * (\\lceil \\frac{M}{2} \\rceil - 1)$ . Therefore, the maximum value of Eq (10) is no larger than", + "bbox": [ + 169, + 863, + 825, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nk (k - 1) * \\left(\\left\\lceil \\frac {M}{2} \\right\\rceil - 1\\right) / M \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 99, + 825, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "based on the pigeonhole principle. We assign the total $k(k - 1)*\\left(\\lceil \\frac{M}{2}\\rceil -1\\right)$ directions (each direction is duplicated to be $\\lceil \\frac{M}{2}\\rceil -1$ copies) to $M$ sets in a round-robin way, where the number of directions assigned to each set is no larger than the upper bound (12). Please refer to Figure 2 for an example.", + "bbox": [ + 169, + 131, + 826, + 176 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/81da266492f525e16623c8d9c4838cae2ed0700f7989ac6d5225781028064ca7.jpg", + "image_caption": [ + "Figure 2: Assign the attack directions to five sub-models for a three-class classification task." + ], + "image_footnote": [], + "bbox": [ + 173, + 188, + 823, + 354 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Stage (2): constructing the new datasets. Following the allocation, we transform the original dataset, denoted by $P_{\\mathrm{ori}}$ , to align with the assigned weakness sets for the $M$ sub-models correspondingly. Using the same notations of Definition 4.2, we denote the waiting-to-construct dataset for the $m$ -th sub-model as $P_{m}$ (which is initialized to be $\\emptyset$ ), $1 \\leq m \\leq M$ . First, we divide $P_{\\mathrm{ori}}$ into $k$ subsets $C_1, C_2, \\dots, C_k$ , where each $C_j$ corresponds to the label $j$ , for $1 \\leq j \\leq k$ ; further, each $C_j$ is equally partitioned to $k - 1$ disjoint parts $\\{C_{j,1}, C_{j,2}, \\dots, C_{j,k - 1}\\}$ at random. For each data $(x,j)$ in $C_{j,i}$ , we attack it from $j$ to $h$ (let $h = i + j \\mod k$ ) to obtain the adversarial perturbation; then we only substitute the low-amplitude frequencies of $x$ with the perturbation, and other frequencies (which have their amplitudes higher than the aforementioned threshold $\\tau$ ) remain unchanged. We denote the new dataset as $C_{j,i}'$ . Finally, we add $C_{j,i}'$ to $P_{m}$ if the $i \\overrightarrow{h}$ -direction is not in the weakness set $\\mathcal{W}_m$ . From the construction method of the weakness sets, we know that the $i \\overrightarrow{h}$ -direction can appear in at most $\\lceil \\frac{M}{2} \\rceil - 1$ weakness sets. So, the set $C_{i,j}'$ can be added to at least $\\lceil \\frac{M}{2} \\rceil$ different $P_m s$ . Consequently, the completeness for defending the $k(k - 1)$ attack directions can be guaranteed, i.e., the constraint (11) is satisfied. Figure 3 shows the schematic diagram of the construction process, and the full details are shown in Appendix D.", + "bbox": [ + 169, + 412, + 826, + 632 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/59d02788a1cae134e8012c9728787df7de52ced148d3576dd7f45ebf2927d6cd.jpg", + "image_caption": [ + "Figure 3: A schematic diagram of the construction process. In the allocation stage, each $C_{j,i}^{\\prime}$ is added to $P_{m}$ if the $i \\vec{h}$ -direction is not in the weakness set $\\mathcal{W}_{m}$ , $h = i + j \\mod k$ ." + ], + "image_footnote": [], + "bbox": [ + 235, + 642, + 761, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Remark 4.3 We are aware of some previous robust learning approaches that also depend on data modification (Allen-Zhu & Li, 2022; Tsipras et al., 2018). But their approaches usually tend to", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "completely eliminate non-robust features. Our method is quite different, where the goal is to leverage the carefully selected non-robust features to weaken the transferability among sub-models. For each sub-model, we only modify the non-robust features corresponding to certain directions, rather than all non-robust features, and therefore the modification yields relatively lower impact on clean accuracy. Moreover, we partition each class $C_j$ into $k - 1$ subsets $\\{C_{j,1}, C_{j,2}, \\dots, C_{j,k - 1}\\}$ , with each subset being attacked to a specified class. This step eliminates the need to attack each data point across all classes, thereby reducing the computational complexity of constructing the new datasets.", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 220, + 328, + 237 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We conduct our experiments on the widely used image datasets CIFAR-10, CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). As for the baselines, we reproduce the existing ensemble models including ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021), with their released codes and recommended hyperparameter settings. As for our approach, \"FDT-random\" and \"FDT-target\" respectively denote the methods utilizing random noise based transformation and target-attack transformation; \"FDT-hybrid\" represents the method that combines both, that is, we set two frequency selection thresholds $\\tau_{1}$ and $\\tau_{2}$ ( $\\tau_{1} < \\tau_{2}$ ), and perform random and target-attack transformations on the frequencies less than $\\tau_{1}$ and the frequencies between $\\tau_{1}$ and $\\tau_{2}$ , respectively (due to the space limit, more details are shown in Appendix E). Our code will be available at https://github.com/ideven123/FDT.", + "bbox": [ + 169, + 242, + 826, + 383 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We train each sub-model based on ResNet-20 (He et al., 2016) and use Adam optimizer (Kingma & Ba, 2015) with an initial learning rate of 0.001 for 200 epochs. To further test their performance on neural network with larger scale, we also use WideResNet28-10 (Zagoruyko & Komodakis, 2016) to train the sub-models and the results are placed in our supplement. All the experiments are implemented with PyTorch (Paszke et al., 2017) on a single NVIDIA GeForce RTX 3090 with 24GB of memory and 1TB of storage. We assess the performance of our models through 5 repeated runs and compute error bars. Utilizing the numpy library, we calculate the standard deviation and subsequently derive the standard error of the mean (SEM).", + "bbox": [ + 169, + 388, + 826, + 501 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Varying the number of sub-models. We take the ResNet-20 model trained on CIFAR-10 as an example and test the performance of FDT with different numbers of sub-models in the ensemble. In this experiment, we set the frequency selection threshold $\\tau_{1}$ to be 0.2 and $\\tau_{2}$ to be 0.8. Then we evaluate the performance of FDT-hybrid under FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), and AutoAttack (AA) (Croce & Hein, 2020) attack methods with $l_{\\infty}$ perturbations of size $\\epsilon = 0.02$ . The results in Table 1 indicate that our clean accuracy has relatively smaller change as the number increases, while the robust accuracy can be substantially improved from 3 to 20 sub-models.", + "bbox": [ + 169, + 507, + 826, + 606 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/719405a5d2d8cbb9f19328bc5857d634c80b85130360abd317e4feadec2be521.jpg", + "table_caption": [ + "Table 1: Performance of FDT-hybrid with different sub-model numbers on CIFAR-10." + ], + "table_footnote": [], + "table_body": "
Sub-model numbers3581220
Clean accuracy90.20 ± 0.0390.75±0.0391.35±0.0591.51 ± 0.0691.86±0.07
FGSM (ε=0.02)58.04 ± 0.1361.66± 0.1562.41 ± 0.1163.96 ± 0.1264.27 ± 0.14
PGD (ε=0.02)20.01 ± 0.0426.10± 0.0729.20± 0.0529.78 ± 0.0829.71± 0.07
AutoAttack (ε=0.02)19.42± 0.0425.37± 0.0527.33± 0.0428.12± 0.0728.92± 0.07
", + "bbox": [ + 176, + 633, + 818, + 712 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results for white-box attack. To maintain consistency with the baseline ensemble methods from the literature, we ensemble three ResNet-20 sub-models here and evaluate the robust accuracy using $\\epsilon = 0.01$ and $\\epsilon = 0.02$ . In this experiment, we set the frequency selection threshold $\\tau_{1}$ to be 0.2 and $\\tau_{2}$ to be 0.8. In the white-box attack setting, the attacker has full knowledge of the models, including model parameters, architecture, and ensemble training strategy. To evaluate the adversarial robustness of the ensemble, we conduct the following white-box attacks: PGD, FGSM, BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), C&W (Carlini & Wagner, 2017) and AutoAttack (AA). The attacks are implemented using AdverTorch (Ding et al., 2019). We take the robust and clean accuracies, and average training time per epoch as the evaluation metrics.", + "bbox": [ + 169, + 720, + 826, + 848 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 2 presents the obtained robust accuracies of the baseline ensemble methods on CIFAR-10 and CIFAR-100. In addition, we show the average training time per epoch of different ensemble methods. The experimental results suggest that our FDT-random method can achieve higher adversarial robustness over other baselines on both CIFAR-10 and CIFAR-100, with the training time only higher than ADP (and much lower than other baselines). Furthermore, the FDT-hybrid ensemble method", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/55e1a9fcdd1e91daee42c95bed2f183be402b94549e910b3159a1ea61cc33fe0.jpg", + "table_caption": [ + "Table 2: Robust and Clean Accuracy (\\%) and average training time of different ensemble methods against white-box attacks on CIFAR-10 and CIFAR-100. “ $\\epsilon$ ” and “ $\\lambda$ ” stand for the $l_{\\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The TRS results are reported in the original paper Yang et al. (2021), with “-” indicating results not provided." + ], + "table_footnote": [], + "table_body": "
CIFAR-10ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy91.8491.8191.37-89.88±0.0290.16±0.0490.20±0.03
FGSM (ε=0.01)59.4844.9770.05-66.96±0.1272.88±0.1272.24±0.12
FGSM (ε=0.02)53.3830.5856.3344.246.28±0.1055.54±0.0958.04±0.13
PGD (ε=0.01)14.451.3540.5550.545.42±0.0946.58±0.0748.48±0.09
PGD (ε=0.02)2.950.3411.4915.112.24±0.0315.08±0.0520.01±0.04
BIM (ε=0.01)14.151.3740.5150.645.24±0.0346.86±0.0448.57±0.05
BIM (ε=0.02)3.010.2710.6515.811.68±0.0314.86±0.0316.63±0.02
MIM (ε=0.01)20.382.0544.7451.547.73±0.0549.97±0.0651.50±0.07
MIM (ε=0.02)5.110.6914.7617.215.14±0.0418.27±0.0220.09±0.03
AA (ε=0.01)1.800.0043.34-46.09±0.0948.83±0.0851.56±0.08
AA (ε=0.02)0.000.0013.72-9.38±0.0515.70±0.0519.42±0.04
C&W (λ=0.1)20.9631.5752.3558.145.01±0.1055.48±0.1056.08±0.11
CIFAR-100ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy67.0467.7066.16-66.29±0.1167.64±0.0866.70±0.09
FGSM (ε=0.01)17.8216.8933.94-35.42±0.1240.46±0.1439.85±0.14
FGSM (ε=0.02)10.537.8026.6119.322.40±0.0532.30±0.0630.27±0.08
PGD (ε=0.01)0.800.1114.6223.021.54±0.0622.19±0.0524.93±0.05
PGD (ε=0.02)0.010.024.255.34.84±0.037.27±0.028.63±0.03
BIM (ε=0.01)0.680.2314.8522.921.10±0.0922.39±0.0724.35±0.06
BIM (ε=0.02)0.020.04.075.44.80±0.046.80±0.058.40±0.05
MIM (ε=0.01)0.780.1216.8223.423.14±0.0624.68±0.1027.09±0.09
MIM (ε=0.02)0.010.025.316.26.47±0.038.87±0.0410.19±0.05
AA (ε=0.01)0.010.0011.23-16.02±0.0916.03±0.0916.41±0.12
AA (ε=0.02)0.000.002.72-3.12±0.044.54±0.055.47±0.07
C&W (λ=0.1)0.743.7010.6826.925.07±0.1029.43±0.0930.66±0.13
", + "bbox": [ + 184, + 169, + 810, + 553 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/33367032556425bfd27a7ff1d5b581121cbba297437e9fd41abecae725ac9758.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Time (s)ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
CIFAR-1030.1569.92134.33350.4237.04108.22114.23
CIFAR-10030.3469.71129.25344.9237.12108.43113.87
", + "bbox": [ + 205, + 556, + 785, + 607 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "achieves an even better robustness than FDT-random, though its running time is higher since it needs to perform the target-attack transformation.", + "bbox": [ + 169, + 633, + 823, + 662 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Summary on other experimental results placed in Appendix F. We also conduct the experiments to examine the performance of FDT under black-box attack, and assess the transferability of our method across various sub-models. The results indicate the competitive robustness of our method in defending against black-box attacks. Then, we evaluate the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold $\\tau$ . The result shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of $\\tau$ . Moreover, we included some ablation studies on datasets and model architectures. These experiments demonstrate that our method performs the best among ensemble-based baseline methods.", + "bbox": [ + 169, + 669, + 826, + 781 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6 CONCLUSION AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 171, + 799, + 495, + 814 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this paper, we present a novel data transformation approach to improve the robustness of ensemble models against adversarial attacks. By leveraging the frequency based features and strategically allocating adversarial examples, we demonstrate the effectiveness of our method in enhancing adversarial robustness while maintaining high accuracy on clean data. As for the future work, we can consider other types of transformation methods (e.g., beyond using frequency) to improve the ensemble robustness. Also, it is interesting to consider more complicated scenarios for ensemble training, such as federated learning with concerning the privacy issue.", + "bbox": [ + 169, + 825, + 823, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 104, + 328, + 118 + ], + "page_idx": 10 + }, + { + "type": "ref_text", + "text": "The authors would like to thank the reviewers for their constructive comments and suggestions. This work was partially supported by the National Natural Science Foundation of China (No. 62272432 and No. 62432016), the National Key Research and Development Program of China (No. 2021YFA1000900), and the Natural Science Foundation of Anhui Province (No. 2208085MF163).", + "bbox": [ + 171, + 128, + 828, + 186 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 205, + 287, + 220 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zeyuan Allen-Zhu and Yanzhi Li. Feature purification: How adversarial training performs robust deep learning. In 2021 IEEE 62nd Annual Symposium on Foundations of Computer Science (FOCS), pp. 977-988. IEEE, 2022.", + "MaungMaung AprilPyone and Hitoshi Kiya. Block-wise image transformation with secret key for adversarially robust defense. IEEE Transactions on Information Forensics and Security, 16: 2709-2723, 2021.", + "Anish Athalye, Nicholas Carlini, and David Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International conference on machine learning, pp. 274-283. PMLR, 2018.", + "Philipp Benz, Chaoning Zhang, and In So Kweon. Batch normalization increases adversarial vulnerability and decreases adversarial transferability: A non-robust feature perspective. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7818-7827, 2021.", + "Rémi Bernhard, Pierre-Alain Moëllic, Martial Mermillod, Yannick Bourrier, Romain Cohendet, Miguel Solinas, and Marina Reyboz. Impact of spatial frequency based constraints on adversarial robustness. In 2021 International Joint Conference on Neural Networks (IJCNN), pp. 1-8. IEEE, 2021.", + "Nicholas Carlini and David Wagner. Towards evaluating the robustness of neural networks. In 2017, IEEE symposium on security and privacy (sp), pp. 39-57. IEEE, 2017.", + "Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian Goodfellow, Aleksander Madry, and Alexey Kurakin. On evaluating adversarial robustness. arXiv preprint arXiv:1902.06705, 2019.", + "Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, John C Duchi, and Percy S Liang. Unlabeled data improves adversarial robustness. Advances in Neural Information Processing Systems, 32, 2019.", + "Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International conference on machine learning, pp. 2206-2216. PMLR, 2020.", + "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009.", + "Gavin Weiguang Ding, Luyu Wang, and Xiaomeng Jin. Advertorch v0.1: An adversarial robustness toolbox based on pytorch. arXiv preprint arXiv:1902.07623, 2019.", + "Yinpeng Dong, Fangzhou Liao, Tianyu Pang, Hang Su, Jun Zhu, Xiaolin Hu, and Jianguo Li. Boosting adversarial attacks with momentum. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 9185-9193, 2018.", + "Xitong Gao, Cheng-Zhong Xu, et al. Mora: Improving ensemble robustness evaluation with model reweighing attack. Advances in Neural Information Processing Systems, 35:26955-26965, 2022.", + "Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 1050:20, 2015." + ], + "bbox": [ + 171, + 227, + 826, + 925 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ian J. Goodfellow, Yoshua Bengio, and Aaron C. Courville. Deep Learning. Adaptive computation and machine learning. MIT Press, 2016. ISBN 978-0-262-03561-3.", + "Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, and Timothy A Mann. Improving robustness using generated data. Advances in Neural Information Processing Systems, 34:4218-4233, 2021.", + "Chuan Guo, Mayank Rana, Moustapha Cissé, and Laurens van der Maaten. Countering adversarial images using input transformations. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Douglas Heaven. Why deep-learning ais are so easy to fool. Nature, 574(7777):163-166, 2019.", + "Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. Advances in neural information processing systems, 32, 2019.", + "Sanjay Kariyappa and Moinuddin K Qureshi. Improving adversarial robustness of ensembles with diversity training. arXiv e-prints, pp. arXiv-1901, 2019.", + "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015.", + "Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009.", + "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018.", + "Shishira R. Maiya, Max Ehrlich, Vatsal Agarwal, Ser-Nam Lim, Tom Goldstein, and Abhinav Shrivastava. A frequency perspective of adversarial robustness. CoRR, abs/2111.00861, 2021.", + "AKM Iqtidar Newaz, Nur Imtiazul Haque, Amit Kumar Sikder, Mohammad Ashiqur Rahman, and A Selcuk Uluagac. Adversarial attacks to machine learning-based smart healthcare systems. In GLOBECOM 2020-2020 IEEE Global Communications Conference, pp. 1-6. IEEE, 2020.", + "Tianyu Pang, Kun Xu, Chao Du, Ning Chen, and Jun Zhu. Improving adversarial robustness via promoting ensemble diversity. In International Conference on Machine Learning, pp. 4970-4979. PMLR, 2019.", + "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017.", + "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli. *Helper-based adversarial training: Reducing excessive margin to achieve a better accuracy vs. robustness trade-off*. In ICML 2021 Workshop on Adversarial Machine Learning, 2021.", + "Edward Raff, Jared Sylvester, Steven Forsyth, and Mark McLean. Barrage of random transforms for adversarially robust defense. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6528-6537, 2019.", + "Giulio Rossolini, Federico Nesti, Gianluca D'Amico, Saasha Nair, Alessandro Biondi, and Giorgio Buttazzo. On the real-world adversarial robustness of real-time semantic segmentation models for autonomous driving. IEEE Transactions on Neural Networks and Learning Systems, pp. 1-15, 2023. doi: 10.1109/TNNLS.2023.3314512." + ], + "bbox": [ + 171, + 102, + 825, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Andrei A. Rusu, Dan Andrei Calian, Sven Gowal, and Raia Hadsell. Hinding adversarial attacks with implicit neural representations. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvári, Gang Niu, and Sivan Sabato (eds.), International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pp. 18910-18934. PMLR, 2022.", + "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015.", + "James C Spall. Multivariate stochastic approximation using a simultaneous perturbation gradient approximation. IEEE transactions on automatic control, 37(3):332-341, 1992.", + "Jacob Springer, Melanie Mitchell, and Garrett Kenyon. A little robustness goes a long way: Leveraging robust features for targeted transfer attacks. Advances in Neural Information Processing Systems, 34:9759-9773, 2021.", + "Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In 2nd International Conference on Learning Representations, ICLR 2014, 2014.", + "Florian Tramér, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018.", + "Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In International Conference on Learning Representations, 2018.", + "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 36246-36263. PMLR, 2023a.", + "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In International Conference on Machine Learning, pp. 36246-36263. PMLR, 2023b.", + "Zifan Wang, Yilin Yang, Ankit Shrivastava, Varun Rawal, and Zihao Ding. Towards frequency-based explanation for robust cnn. arXiv preprint arXiv:2005.03141, 2020.", + "Futa Waseda, Sosuke Nishikawa, Trung-Nghia Le, Huy H Nguyen, and Isao Echizen. Closer look at the transferability of adversarial examples: How they fool different models differently. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 1360-1368, 2023.", + "Sven-Ake Wegner. Lecture notes on high-dimensional data. arXiv preprint arXiv:2101.05841, 2021.", + "Yuancheng Xu, Yanchao Sun, Micah Goldblum, Tom Goldstein, and Furong Huang. Exploring and exploiting decision boundary dynamics for adversarial robustness. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023, 2023.", + "Huanrui Yang, Jingyang Zhang, Hongliang Dong, Nathan Inkawich, Andrew Gardner, Andrew Touchet, Wesley Wilkes, Heath Berry, and Hai Li. Diverse: diversifying vulnerabilities for enhanced robust generation of ensembles. Advances in Neural Information Processing Systems, 33:5505-5515, 2020.", + "Ruijie Yang, Yuanfang Guo, Junfu Wang, Jiantao Zhou, and Yunhong Wang. Common knowledge learning for generating transferable adversarial examples. Frontiers Comput. Sci., 19(10):1910359, 2025. doi: 10.1007/S11704-024-40533-4." + ], + "bbox": [ + 171, + 102, + 826, + 922 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zhuolin Yang, Linyi Li, Xiaojun Xu, Shiliang Zuo, Qian Chen, Pan Zhou, Benjamin Rubinstein, Ce Zhang, and Bo Li. Trs: Transferability reduced ensemble via promoting gradient diversity and model smoothness. Advances in Neural Information Processing Systems, 34:17642-17655, 2021.", + "Mehmet Kerim Yucel, Ramazan Gokberk Cinbis, and Pinar Duygulu. Hybridaugment++: Unified frequency spectra perturbations for model robustness. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5718-5728, 2023.", + "Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. CoRR, abs/1605.07146, 2016.", + "Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric P. Xing, Laurent El Ghaoui, and Michael I. Jordan. Theoretically principled trade-off between robustness and accuracy. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 7472-7482. PMLR, 2019.", + "Wen Zhou, Xin Hou, Yongjun Chen, Mengyun Tang, Xiangqi Huang, Xiang Gan, and Yong Yang. Transferable adversarial perturbations. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-467, 2018.", + "Yi Zhu, Chenglin Miao, Tianhang Zheng, Foad Hajiaghajani, Lu Su, and Chunming Qiao. Can we use arbitrary objects to attack lidar perception in autonomous driving? In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pp. 1945-1960, 2021." + ], + "bbox": [ + 171, + 102, + 828, + 404 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A OMITTED PROOFS", + "text_level": 1, + "bbox": [ + 171, + 102, + 362, + 118 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.1 PROOF OF INEQUALITY (6):", + "bbox": [ + 171, + 137, + 413, + 152 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sum_ {y _ {t} \\in \\mathcal {Y}} \\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right) \\\\ = \\sum_ {y _ {t} \\in \\mathcal {Y}} \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right] \\\\ = \\sum_ {y _ {t} \\in \\mathcal {Y}} \\sum_ {(x, y) \\in \\mathcal {D}} p _ {(x, y)} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 165, + 588, + 231 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Then, we interchange the order of summation, and so the above equation is equal to", + "bbox": [ + 169, + 236, + 723, + 251 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left. \\sum_ {(x, y) \\in \\mathcal {D}} p _ {(x, y)} \\sum_ {y _ {t} \\in \\mathcal {Y}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right] \\right. \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\Big [ \\sum_ {y _ {t} \\in \\mathcal {Y}} \\mathbb {I} \\big \\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\big \\} \\Big ]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 258, + 568, + 314 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For each $(x,y)$ , without loss of generality, let $F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_0$ . For $y_t \\neq y_0$ , $\\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t\\big\\} = 0$ . For $y_t = y_0$ , $\\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t\\big\\} = \\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y\\big\\}$ . So the above equation is equal to", + "bbox": [ + 169, + 320, + 823, + 367 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\right\\} \\right] \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge \\left(F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\vee F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y\\right) \\right\\} \\right] \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\right\\} + \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y \\right\\} \\right]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 373, + 718, + 460 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We split $\\mathbb{I}(.)$ because $F_{\\mathrm{E}}(\\mathcal{A}(x)) \\neq y$ and $F_{\\mathrm{E}}(\\mathcal{A}(x)) = y$ are mutually exclusive. Then, the above equation is equal to", + "bbox": [ + 169, + 465, + 823, + 494 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) + \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y \\right\\} \\right] \\\\ \\geq \\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 502, + 532, + 547 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Overall, we obtain the inequality (6): $\\sum_{y_t\\in \\mathcal{Y}}\\mathrm{Vr}(F_{\\mathrm{E}},y_t)\\geq \\mathrm{Vr}(F_{\\mathrm{E}})$", + "bbox": [ + 169, + 553, + 643, + 571 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B FREQUENCY SELECTION", + "text_level": 1, + "bbox": [ + 171, + 595, + 413, + 611 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 4 illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). On the other hand, if we keep the low-amplitude frequencies only, the semantic information is almost missing. This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image.", + "bbox": [ + 169, + 625, + 826, + 709 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C RANDOM NOISE BASED TRANSFORMATION", + "text_level": 1, + "bbox": [ + 171, + 736, + 565, + 750 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Random noise based transformation: This approach substitutes the identified non-robust frequencies with Gaussian noise. For an $N \\times N$ image, we take the non-robust frequencies based on the pre-specified threshold $\\tau$ , and replace them with random vector for each sub-model in our experiment. In particular, to further increase the randomness, we perform this transformation for each epoch in the training stage. If we select the top $s$ non-robust frequencies, the overall dimensionality of the edited random feature should be $s \\times E$ (we concatenate those $s$ -dimensional features together), where $E$ is the number of epochs. For example, if $N = 32$ , $s = N^2 / 2$ , and $E = 200$ , the overall dimensionality can be as large as $10^5$ . Because these $M$ features are random and have high dimensions, they are very likely to be nearly orthogonal with each other (this phenomenon in high-dimensional geometry can be proved by the central limit theorem (Wegner, 2021)). As a consequence, they tend to yield diverse training results for the sub-models.", + "bbox": [ + 169, + 771, + 826, + 924 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/4f4ad8a9ae15fd8ff6ef622e7b947bd3b49e9657db4fb8f2ef987138577adde8.jpg", + "image_caption": [ + "Figure 4: The first and second rows are the figures by adding random noise to high-amplitude and low-amplitude frequencies, respectively. \"20% changed\" for the first row means we remove the 20% lowest-amplitudes frequencies, and add small noise to the remaining high-amplitude frequencies. \"20% changed\" for the second row means we remove the 20% highest-amplitude frequencies, and add small noise to the remaining low-amplitude frequencies. \"50% changed\" and \"80% changed\" follow the same procedure as \"20% changed\"." + ], + "image_footnote": [], + "bbox": [ + 305, + 104, + 689, + 272 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The implementation details are as follows. Given an image $x$ , we perform Fourier Transform on $x$ and also on a generated Gaussian noise $n_0$ . Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of $x$ by setting an amplitude threshold. Next, we generate two masks $(M_1$ and $M_2$ ) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of $n_0$ (i.e., $M_2(n_0)$ ) to high-amplitude frequencies of $x$ (i.e., $M_1(x)$ ), and obtain the transformation of $x$ (denoted as $\\pi(x)$ ). Finally, we transform $\\pi(x)$ to time domain by inverse Fourier transform and train the model with $\\pi(x)$ .", + "bbox": [ + 169, + 412, + 826, + 512 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D ALGORITHM OF FDT", + "text_level": 1, + "bbox": [ + 171, + 545, + 387, + 560 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Algorithm 1 shows the overall framework of training an ensemble model with FDT. It illustrates that our data transformation is performed at each iteration.", + "bbox": [ + 169, + 584, + 823, + 613 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1 Training ensemble model with FDT" + ], + "code_body": "Input: dataset $\\mathcal{X}\\times \\mathcal{Y}$ , the number of sub-models $M$ , and the epoch number $E$ Output: sub-model $\\beta_{1},\\beta_{2},\\dots ,\\beta_{M}$ \nfor $i = 1$ to $E$ do Run Targeted-attack Transformation and obtain $P_{1},P_{2},\\dots ,P_{M}$ . for $j = 1$ to $M$ do train $\\beta_{j}$ on $P_{j}$ \nend for \nend for", + "guess_lang": "txt", + "bbox": [ + 173, + 657, + 712, + 770 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Algorithm 2 shows the details of targeted-attack transformation method on the whole dataset. For each specific image $x$ , we obtain the targeted class according to the allocation scheme mentioned in \"Stage (1)\". Then, we use targeted PGD attack to obtain the adversarial sample $x'$ . After that, we perform Fourier Transform on $x$ and $x'$ , and we can obtain the low-amplitude frequencies and high-amplitude frequencies of $x$ by setting an amplitude threshold. Next, we generate two masks $(M_1$ and $M_2$ ) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of $x'$ (i.e., $M_2(x')$ ) to high-amplitude frequencies of $x$ (i.e., $M_1(x)$ ), and obtain the transformation of $x$ (denoted as $\\pi(x)$ ). Finally, we transform $\\pi(x)$ to time domain by inverse Fourier transform.", + "bbox": [ + 169, + 797, + 826, + 924 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 2 Targeted-attack Transformation" + ], + "code_body": "Input: dataset $P_{ori}$ , number M, steps s, class number k \nOutput: Transformed data $P_{1},P_{2},\\dots ,P_{M}$ \nDivide the dataset $P_{ori}$ into k parts $\\{C_1,C_2,\\dots ,C_k\\}$ according to labels \nRandomly partition the dataset $C_j$ equally into disjoint $k - 1$ parts $\\{C_{j,1},C_{j,2},\\dots ,C_{j,k - 1}\\}$ \nInitialize $P_{1},P_{2},\\dots ,P_{M}$ with empty set; \n $m\\gets 0$ \nfor $j = 1$ to k do for $i = 1$ to $k - 1$ do $C_{j,i}^{\\prime}\\gets$ calculate targeted attack example in $C_{j,i}$ with label $i + j$ mod $k$ and perform data transformation on each image; for $s = 1$ to $\\lceil \\frac{M}{2}\\rceil +1$ do $m\\gets m + 1$ mod M; Append $C_{j,i}^{\\prime}$ to $P_{m}$ end for end for end for", + "guess_lang": "txt", + "bbox": [ + 173, + 119, + 823, + 344 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E IMPLEMENT", + "text_level": 1, + "bbox": [ + 171, + 375, + 315, + 390 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we provide more experimental details. In our work, we utilize the CIFAR-10 (Krizhevsky & Hinton, 2009), CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). In the testing process, the primary reason for selecting FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), CW(Carlini & Wagner, 2017) as attack methods is to keep consistent with the baseline methods from the literature. Further, we select AA (Croce & Hein, 2020) because it is also a popular attack method and more powerful than those base methods. To reduce the computational complexity of targeted attacks, we leverage the transferability of adversarial examples and utilize a pre-trained simple network (VGG11(Simonyan & Zisserman, 2015)) structure for targeted attacks.", + "bbox": [ + 169, + 410, + 826, + 536 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Further, we introduce the implement of \"FDT-random\", \"FDT-target\" and \"FDT-hybrid\" here. For \"FDT-random\", we perform Fourier Transform on $x$ and also on a randomly sampled standard Gaussian noise $n_0$ . Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of $x$ by setting an amplitude threshold. Next, we generate two masks $(M_1$ and $M_2$ ) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of $n_0$ (i.e., $M_2(n_0)$ ) to high-amplitude frequencies of $x$ (i.e., $M_1(x)$ ), and obtain the transformation of $x$ (denoted as $\\pi(x)$ ). Finally, we transform $\\pi(x)$ to time domain by inverse Fourier transform and train the model with $\\pi(x)$ . For \"FDT-target\", we obtain the targeted class according to the allocation scheme mentioned in \"Stage (1)\". Then, we use targeted PGD attack to obtain the adversarial sample $x'$ . After that, perform the same steps as with FDT-random (we substitute $n_0$ with $x'$ ). For FDT-hybrid, we set two frequency selection thresholds $\\tau_1$ and $\\tau_2$ ( $\\tau_1 < \\tau_2$ ), and generate three masks to select the frequencies: $M_1$ for the high-amplitude frequencies (amplitude $> \\tau_2$ ), $M_2$ for the middle part ( $\\tau_1 <$ amplitude $< \\tau_2$ ), and $M_3$ for the small part (amplitude $< \\tau_1$ ). Next, we combine $M_1(x), M_2(x')$ and $M_3(n_0)$ to obtain the transformation $\\pi(x)$ .", + "bbox": [ + 169, + 541, + 826, + 739 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "F ADDITIONAL EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 763, + 529, + 779 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we provide more experimental results. Firstly, we extend our experiments to SVHN, Tiny-ImageNet-200, and WideResNet-28-10 in Appendix F.1. We also conduct the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture in Appendix F.1. Then, we evaluate the performance of FDT under black-box attacks on the CIFAR-10 and CIFAR-100 in Appendix F.2. Then we present the trade-off between clean accuracy and robust accuracy on the CIFAR-100 using FDT method in Appendix F.3. This trade-off sheds light on the effectiveness of FDT with changing the trade-off parameter. Additionally, in Appendix F.4, we compare the transferability across various sub-models with the baseline methods. Furthermore, we compare our method with more related methods in Appendix F.5.", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "F.1 ABLATION STUDIES", + "text_level": 1, + "bbox": [ + 171, + 104, + 352, + 118 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this section, we extend our experiments to additional datasets (SVHN, Tiny-ImageNet-200) and architecture (WideResNet-28-10). We also explore the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture.", + "bbox": [ + 169, + 130, + 823, + 172 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 3 presents the performance of ensemble methods trained with ResNet-20 on SVHN against several widely used white-box attacks. The experimental results demonstrate that all ensemble models achieve comparable levels of clean accuracy. Specifically, the FDT approach exhibits better robust accuracy than the other methods. These observations highlight the effectiveness of FDT in achieving favorable clean accuracy and robustness of ensemble models.", + "bbox": [ + 169, + 179, + 826, + 250 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/ea730086b6dc727654d80fe2f9d4344644e34be0554cfdd816145d2404b70897.jpg", + "table_caption": [ + "Table 3: Robust Accuracy (%) of different ensemble methods against white-box attacks on SVHN. The $\\epsilon$ and $\\lambda$ stand for the $l_{\\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid." + ], + "table_footnote": [], + "table_body": "
SVHNADPGALDVERGETRSFDT-hybrid
clean accuracy96.8394.6696.2894.5296.73 ± 0.12
FGSM (ε=0.01)84.3880.285.672.8790.13 ± 0.09
FGSM (ε=0.02)78.0841.581.453.986.78± 0.07
PGD (ε= 0.01)51.0150.153.3154.4359.42 ± 0.07
PGD (ε = 0.02)17.748.2417.4218.8622.74 ± 0.04
BIM (ε= 0.01)54.3847.7352.0853.7157.91± 0.08
BIM (ε = 0.02)21.268.114.5818.0520.23 ± 0.05
MIM (ε= 0.01)61.2451.9658.5156.3262.14± 0.08
MIM (ε= 0.02)24.845.1423.2221.9525.37 ± 0.04
AA (ε= 0.01)49.9248.3952.0252.8357.54± 0.09
AA (ε= 0.02)16.136.9016.9517.4820.12 ± 0.05
C&W (λ = 0.1)55.8149.9466.8252.7472.14± 0.11
", + "bbox": [ + 254, + 329, + 743, + 522 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We also extend our experiment to the sub-models trained with WideResNet-28-10 on CIFAR-10. Table 4 shows the performance of the models facing various whitebox attacks. The results indicate that FDT maintains good performance even on more complex network structures. We also evaluated the robustness of an ensemble of eight sub-models, with the results presented in Table 5.", + "bbox": [ + 169, + 539, + 826, + 597 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/14d0117ad636af5d0c656f9cd150fadeed1db5e0a79e3e327248db4c1840eb39.jpg", + "table_caption": [ + "Table 4: Robust Accuracy $(\\%)$ of different ensemble methods against white-box attacks on CIFAR-10. The $\\epsilon$ and $\\lambda$ stand for the $l_{\\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10." + ], + "table_footnote": [], + "table_body": "
CIFAR-10ADPGALDVERGEFDT-hybrid
clean accuracy92.9982.1494.3294.18 ± 0.06
FGSM (ε=0.01)60.0444.9471.0180.64 ± 0.05
FGSM (ε=0.02)51.6936.8350.4360.09 ± 0.05
PGD (ε=0.01)11.0922.1044.2564.64 ± 0.07
PGD (ε=0.02)2.545.0613.2726.0 ± 0.03
BIM (ε=0.01)15.8122.6246.5367.36 ± 0.10
BIM (ε=0.02)4.505.4317.3832.36 ± 0.06
MIM (ε=0.01)18.1825.9744.2164.36 ± 0.08
MIM (ε=0.02)4.727.8112.8325.64 ± 0.05
AA (ε=0.01)9.3819.3443.2363.45 ± 0.08
AA (ε=0.02)1.173.9312.4925.23 ± 0.04
C&W (λ=0.1)37.8119.0546.3247.23 ± 0.10
", + "bbox": [ + 277, + 676, + 718, + 869 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 6 is the result of ensemble methods trained with WideResNet-28-10 on Tiny-ImageNet-200. We test the robustness of different methods under widely used white-box attacks. Due to the high", + "bbox": [ + 169, + 895, + 828, + 926 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/229c236fc1f0db8d2b1319a5fcb120c20f1c96eba1c0f330a07fa65fe8a8d57e.jpg", + "table_caption": [ + "Table 5: Robust Accuracy (\\%) of an ensemble of eight sub-models against white-box attacks on CIFAR-10. The $\\epsilon$ and $\\lambda$ stand for the $l_{\\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10." + ], + "table_footnote": [], + "table_body": "
CIFAR-10FDT-hybrid
clean accuracy93.72 ± 0.11
FGSM (ε=0.01)86.31± 0.07
FGSM (ε=0.02)67.29 ± 0.06
PGD (ε= 0.01)72.02 ± 0.07
PGD (ε = 0.02)45.42± 0.05
BIM (ε= 0.01)73.68 ± 0.10
BIM (ε = 0.02)44.53± 0.06
MIM (ε= 0.01)71.36 ± 0.06
MIM (ε= 0.02)45.24 ± 0.06
AA (ε= 0.01)70.45 ± 0.08
AA (ε= 0.02)44.23 ± 0.07
C&W (λ = 0.172.37 ± 0.11
", + "bbox": [ + 379, + 166, + 614, + 359 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "time complexity of the TRS, we do not compare with it here. The experimental results show that all ensemble models achieve comparable levels of clean accuracy while FDT-hybrid achieves better robust accuracy than other methods.", + "bbox": [ + 169, + 395, + 823, + 439 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/f6d2c34f416926c50bd779fa548c7f14db9a4d7374cd62334fcf7aab8309d2c6.jpg", + "table_caption": [ + "Table 6: Robust Accuracy (%) of different ensemble methods against white-box attacks on TinyImageNet-200. The $\\epsilon$ and $\\lambda$ stand for the $l_{\\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid." + ], + "table_footnote": [], + "table_body": "
Tiny/ImageNet-200ADPGALDVERGEFDT-hybrid
clean accuracy49.8845.751.4664.21 ± 0.06
FGSM (ε = 0.01)10.461.2422.8221.73 ± 0.04
FGSM (ε = 0.02)4.380.5918.4219.28 ± 0.04
PGD (ε = 0.01)0.020.023.64.76 ± 0.02
PGD (ε = 0.02)0.020.010.340.45 ± 0.01
BIM (ε = 0.01)0.070.023.354.81 ± 0.03
BIM (ε = 0.02)0.030.010.280.32± 0.00
MIM (ε = 0.01)0.110.024.366.13 ± 0.03
MIN (ε = 0.02)0.030.010.410.48 ± 0.00
AA (ε = 0.01)0002.66 ± 0.02
AA (ε = 0.02)0000.02± 0.00
CW (λ = 0.01)2.360.139.5419.47± 0.06
", + "bbox": [ + 267, + 513, + 725, + 704 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Ablation study on model architectures. Table 7 presents the results across different model architectures, including ResNet20, ResNet50, WRN28-10, and WRN34-10. While larger models generally achieve higher clean and robust accuracy, the results suggest that our method consistently enhances robustness under various attack scenarios, demonstrating its applicability across diverse architectures.", + "bbox": [ + 169, + 728, + 826, + 787 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Ablation study on allocation methods. Table 8 compares the performance of FDT-hybrid with different weakness set allocation methods on CIFAR-10. The results indicate that our proposed allocation method achieves better clean accuracy and robustness under various attack scenarios than randomly uniform allocation.", + "bbox": [ + 169, + 790, + 823, + 848 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Ablation study on $\\tau_{1}$ and $\\tau_{2}$ . Table 9 presents the results of FDT-hybrid with various combinations of selection thresholds $\\tau_{1}$ and $\\tau_{2}$ on CIFAR-10. The experiments reveal the impact of different thresholds on both clean accuracy and robustness under adversarial attacks. As $\\tau_{2}$ increases, robustness improves across all metrics, but clean accuracy decreases. For a fixed $\\tau_{2}$ , increasing $\\tau_{1}$ generally leads to a trade-off between clean accuracy and robustness. Setting $\\tau_{1} = 0.2$ and $\\tau_{2} = 0.8$ achieves a relatively", + "bbox": [ + 169, + 854, + 825, + 926 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "balanced performance, maintaining both competitive clean accuracy and robust accuracy under various attacks.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/7e18fe84f6195a634b498bafd1f5b8551dba52b6460020cc8b879d3c9ab43d44.jpg", + "table_caption": [ + "Table 7: Robust Accuracy $(\\%)$ of different model architectures against white-box attacks on Cifar10. The $\\epsilon$ and $\\lambda$ stand for the $l_{\\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively." + ], + "table_footnote": [], + "table_body": "
CIFAR10ResNet20ResNet50WRN28-10WRN34-10
clean accuracy90.0293.2394.1894.63
FGSM (ε = 0.01)72.2476.6580.6481.04
FGSM (ε = 0.02)58.0458.5960.0960.92
PGD (ε = 0.01)48.4860.2364.6465.38
PGD (ε = 0.02)20.0124.3526.0027.42
BIM (ε = 0.01)48.5760.4367.3668.29
BIM (ε = 0.02)16.6323.5732.3633.86
MIM (ε = 0.01)51.4860.8164.3664.71
MIN (ε = 0.02)20.0924.5425.6426.42
AA (ε = 0.01)51.5660.4863.4564.01
AA (ε = 0.02)19.4224.2125.2326.39
CW (λ = 0.01)56.0856.5557.2357.52
", + "bbox": [ + 248, + 202, + 750, + 393 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/7a88a1aa78ff51829267b334b6794a941fd03d34e267f681384df3b676da0d0b.jpg", + "table_caption": [ + "Table 8: Performance of FDT-hybrid with different weakness set allocation method on CIFAR-10. The other settings are consistent with those in Table 1." + ], + "table_footnote": [], + "table_body": "
Allocation methodClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
Uniform Random89.3256.2018.2417.89
Ours90.2058.0420.0119.42
", + "bbox": [ + 184, + 468, + 808, + 518 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/322f7bd9a01798dfc54a38e98d6770546d8cdddde92530762c95146418592f2d.jpg", + "table_caption": [ + "Table 9: Performance of FDT-hybrid with different selection thresholds $\\tau_{1}$ and $\\tau_{2}$ on CIFAR-10. The other settings are consistent with those in Table 1." + ], + "table_footnote": [], + "table_body": "
ThresholdsClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
τ1=0.2,τ2=0.791.0356.6217.7417.60
τ1=0.2,τ2=0.890.2058.0420.0119.42
τ1=0.2,τ2=0.989.4658.4820.1219.57
τ1=0.4,τ2=0.789.7556.8917.9317.82
τ1=0.4,τ2=0.889.0858.2120.0119.47
τ1=0.4,τ2=0.988.4458.5320.0919.61
τ1=0.6,τ2=0.789.6253.3515.2714.63
τ1=0.6,τ2=0.888.8455.3315.4215.24
τ1=0.6,τ2=0.988.1255.4615.8315.47
", + "bbox": [ + 189, + 593, + 805, + 741 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F.2 RESULTS FOR BLACK-BOX ATTACK", + "text_level": 1, + "bbox": [ + 171, + 784, + 455, + 797 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In the black-box setting, the attacker's knowledge usually is limited to the original training dataset and has no information about the model. This setting represents a more practical attack scenario. The attacker can train a surrogate model to generate transferable adversarial examples and transfer them to the target ensemble model. We utilize a single ResNet-20 model as the surrogate model. Adversarial examples are generated on the surrogate model using the SPSA algorithm (Spall, 1992). Figure 5 shows the robust accuracy of ensemble models against black-box attacks under different degrees of perturbation. As we can see, FDT-hybrid ensemble training strategies outperform the other ensemble training strategy against black-box attacks both on CIFAR10 and CIFAR100.", + "bbox": [ + 169, + 811, + 826, + 924 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/3a9bbed4c976150c6e83147e047a6e57ab5122f2f57653ad1f7a35b88ed612c5.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 181, + 128, + 491, + 265 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/41ef6eb8c4ed9db8cc8dfb81e794e29dc4f266be742cbaf143ed3a489bfd37bb.jpg", + "image_caption": [ + "(b)", + "Figure 5: Robust Accuracy for different ensemble models against black-box attack with different perturbation scale $\\epsilon$ ." + ], + "image_footnote": [], + "bbox": [ + 500, + 127, + 812, + 265 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.3 TRADE-OFF BETWEEN CLEAN AND ROBUST ACCURACY", + "text_level": 1, + "bbox": [ + 171, + 351, + 596, + 364 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this section, we explore the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold $\\tau_{2}$ (as mentioned in Section 4.2). And we set $\\tau_{1}$ to be 0.1. To assess the adversarial robustness, we utilize the PGD attack under $l_{\\infty}$ perturbations of size $\\epsilon = 0.01$ as a benchmark. We train a set of ResNet-20 FDT-hybrid models on CIFAR-10 and CIFAR-100 with various frequency selection threshold $\\tau_{2} \\in \\{0.4, 0.6, 0.8, 1.0, 1.2, 1.6\\}$ . Figure 6 shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of $\\tau_{2}$ .", + "bbox": [ + 169, + 380, + 823, + 465 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/96556ece08cecab0da72a8399ad072088c7c47234f3802bd3d1de1d7d1b1dad8.jpg", + "image_caption": [ + "(a)", + "Figure 6: (a) shows the trade-off on CIFAR-10 while (b) on CIFAR-100. From left to right, we decrease the trade-off parameter $\\tau_{2}$ for FDT." + ], + "image_footnote": [], + "bbox": [ + 179, + 513, + 488, + 667 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/1840eb354f1c0d1bf3c15cbd742aca09b7c5a17ae284709ea962b17eb2e3a2b4.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 500, + 512, + 810, + 667 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.4 TRANSFERABILITY ACROSS VARIOUS SUB-MODELS", + "text_level": 1, + "bbox": [ + 171, + 768, + 570, + 782 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To further investigate the diversity between sub-models, we conduct an analysis by generating adversarial examples using one sub-model and evaluating their accuracy on other target sub-models. The transferability of these adversarial examples among sub-models is visualized in Figure 7, considering different ensemble training methods on the CIFAR10 dataset. We generate adversarial examples from \"base model\" and test the accuracy of \"target model\". The experimental results indicate that FDT exhibits comparable performance to DVERGE and TRS in reducing the transferability of adversarial samples across different sub-models. This demonstrates that FDT not only enhances the diversity of weaknesses within the dataset but also weakens the transferability of adversarial examples between sub-models.", + "bbox": [ + 169, + 797, + 826, + 922 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/487827d0491099c648319eac4b498797d48312a2c70fa6583e223a6fb92e0575.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 241, + 108, + 411, + 244 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/57ddf9ef19e594195508f3ae1c10e54a8dbe8b1e70cc0fa19353407b75b3f5fb.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 415, + 108, + 584, + 244 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e2d764b96904022fe7482aa65ff6f3445097fd2fb937afc6e30e43965fbcf8b6.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 586, + 108, + 756, + 244 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/2ad9216a92efe39f3176dc41ec4a12ae77535776bdeea00c191a7bbbed2cc1c2.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 238, + 276, + 408, + 414 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/509f5d101d695c95d266c4e7aed497ab6ccf09a4c35a5562d07b8530693c888f.jpg", + "image_caption": [ + "(e)" + ], + "image_footnote": [], + "bbox": [ + 413, + 277, + 581, + 412 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/9a7aa25d9254e905c5f8bc033b282eaab1dd2a4c750012ce267b3c828efa9408.jpg", + "image_caption": [ + "(f)", + "Figure 7: Pair-wise adversarial transferability between sub-models against PGD attack with $\\epsilon = 0.02$ on CIFAR-10. The value represents the success rate of adversarial examples generated by the base model in attacking the target model." + ], + "image_footnote": [], + "bbox": [ + 586, + 277, + 753, + 414 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "F.5 COMPARE WITH ADVERSARIAL TRAINING", + "text_level": 1, + "bbox": [ + 171, + 505, + 503, + 518 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We use target attacks in our data transformation, which differs significantly from adversarial training. First, we employ a simple pre-trained network (VGG11 in our experiment) to compute adversarial examples, thereby accelerating the training process. Second, we only utilize the low amplitude part of the adversarial examples for data transformation, which helps maintain the model's clean accuracy. We compare our method with several popular approaches (Wang et al., 2023b; Rade & Moosavi-Dezfooli, 2021; Xu et al., 2023) on CIFAR-10 using AutoAttack under $l_{\\infty}$ perturbations ( $\\epsilon = 8 / 255$ ). Wang et al. (2023b) generated training datasets using a diffusion model, followed by adversarial training on these datasets. For fairness, we compare our method with the version proposed by Wang et al. (2023b) that uses 50k generated images. Rade & Moosavi-Dezfooli (2021) used \"helper example\" to help the adversarial training. Xu et al. (2023) proposed Dynamics-Aware Robust Training, which encourages the decision boundary to adjust in a way that prioritizes increasing smaller margins. We use WideResNet-28-10 as the sub-model and ensemble eight sub-models without using generated data. The results in Table 10 indicate that, although the robustness of our method is not the highest, it maintains clean accuracy with almost no decline. Moreover, our method does not require additional generated data or adversarial training, and even with the need for ensembling, the training efficiency remains relatively high. This suggests a potential way to enhance robustness while minimizing the decrease in clean accuracy.", + "bbox": [ + 169, + 531, + 826, + 768 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To further illustrate our method's advantage, we conduct additional experiments to compare the \"robustness-clean accuracy\" trade-off curves of our method and AT under different settings. Fig. 8 compares the trade-off curves obtained by HAT Rade & Moosavi-Dezfooli (2021) with that of FDT-hybrid. For HAT, we fix $\\gamma = 0.25$ and vary $\\beta \\in \\{0.1, 0.5, 2.5, 3.0, 4.0, 5.0\\}$ ( $\\beta$ is the coefficient of the robustness loss, and higher $\\beta$ indicates higher robust accuracy); for FDT-hybrid, we fix $\\tau_{1} = 0.2$ and vary $\\tau_{2} \\in \\{0.5, 0.7, 0.9, 1.1, 1.3, 1.5\\}$ . We observe that HAT's robustness declines rapidly when the $\\beta$ parameter is small (as increasing the clean accuracy). This result shows the significant advantage of our method when a clean accuracy above $90\\%$ is required.", + "bbox": [ + 169, + 773, + 826, + 886 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/6373b177569abf83ef1d4ee46c795b8921d8725a1ce03bdf96e33c8e603fc9a8.jpg", + "image_caption": [ + "Trade-off between clean accuracy and robust accuracy", + "Figure 8: It shows the trade-off curves on CIFAR-10. From left to right, we decrease the trade-off parameter $\\tau_{2}$ for FDT, and decrease the trade-off parameter $\\beta$ for HAT." + ], + "image_footnote": [], + "bbox": [ + 344, + 252, + 625, + 398 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/2c93b3a471c98b16baae6a344fd0b7bf0b48024ec8c1e2dca6182ec63da46113.jpg", + "table_caption": [ + "Table 10: Clean accuracy and robust accuracy (\\%) of different methods against AutoAttack under $l_{\\infty}$ perturbations ( $\\epsilon = 8/255$ ) on CIFAR-10." + ], + "table_footnote": [], + "table_body": "
CIFAR-10clean accuracyrobust accuracy
(Wang et al., 2023b)86.1555.71
(Rade & Moosavi-Dezfooli, 2021)84.9049.08
(Xu et al., 2023)85.5554.69
OURS (FDT-hybrid)93.7234.61
", + "bbox": [ + 254, + 729, + 743, + 809 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + } +] \ No newline at end of file diff --git a/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_model.json b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f67fbdfbcb41a03e6bd7f47b74ed4b84ac4d3a70 --- /dev/null +++ b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_model.json @@ -0,0 +1,3414 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.827, + 0.171 + ], + "angle": 0, + "content": "TO TACKLE ADVERSARIAL TRANSFERABILITY: A NOVEL ENSEMBLE TRAINING METHOD WITH FOURIER TRANSFORMATION" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.194, + 0.722, + 0.21 + ], + "angle": 0, + "content": "Wanlin Zhang\\(^{1,3}\\), Weichen Lin\\(^{2}\\), Ruomin Huang\\(^{4}\\), Shihong Song\\(^{1}\\), Hu Ding\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.21, + 0.836, + 0.282 + ], + "angle": 0, + "content": "\\(^{1}\\)School of Computer Science and Technology, University of Science and Technology of China \n\\(^{2}\\)School of Artificial Intelligence and Data Science, University of Science and Technology of China \n\\(^{3}\\)Shanghai Innovation Institute \\(^{4}\\)Department of Computer Science, Duke University \n{ideven, linweichen, shihongsong}@mail.ustc.edu.cn \nruomin.huang@duke.edu,HUDING@ustc.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.318, + 0.547, + 0.332 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.348, + 0.768, + 0.528 + ], + "angle": 0, + "content": "Ensemble methods are commonly used for enhancing robustness in machine learning. However, due to the \"transferability\" of adversarial examples, the performance of an ensemble model can be seriously affected even it contains a set of independently trained sub-models. To address this issue, we propose an efficient data transformation method based on a cute \"weakness allocation\" strategy, to diversify non-robust features. Our approach relies on a fine-grained analysis on the relation between non-robust features and adversarial attack directions. Moreover, our approach enjoys several other advantages, e.g., it does not require any communication between sub-models and the construction complexity is also quite low. We conduct a set of experiments to evaluate the performance of our proposed method and compare it with several popular baselines. The results suggest that our approach can achieve significantly improved robust accuracy over most existing ensemble methods, and meanwhile preserve high clean accuracy." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.554, + 0.338, + 0.568 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.575, + 0.827, + 0.66 + ], + "angle": 0, + "content": "In the past decade, Deep neural networks (DNNs) have achieved prominent performance on a broad range of real-world tasks (Goodfellow et al., 2016). However, a number of previous works show that DNNs are susceptible to carefully-crafted manipulations, where the manipulated data are called \"adversarial examples\" (Szegedy et al., 2014; Zhou et al., 2018; Heaven, 2019). The existence of adversarial examples severely impedes the application of DNNs in security-conscious scenarios, such as self-driving car (Rossolini et al., 2023; Zhu et al., 2021) and heath care (Newaz et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.666, + 0.825, + 0.75 + ], + "angle": 0, + "content": "The adversarial training approach (Wang et al., 2023a; Madry et al., 2018) has gained significant attention due to its great effectiveness for defending against adversarial examples. However, the adversarial training approach often necessitates considerably high training time and large training dataset (Gowal et al., 2021; Carmon et al., 2019). Moreover, it has been observed that adversarial training is likely to incur certain decline in the accuracy on clean data, which also hinders the trained model to be applied for many practical tasks (Tsipras et al., 2018; Zhang et al., 2019)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.757, + 0.827, + 0.868 + ], + "angle": 0, + "content": "Another important approach to enhance adversarial robustness is ensemble training (Tramér et al., 2018). But recent studies (Yang et al., 2025; Gao et al., 2022; Waseda et al., 2023) demonstrated that an adversarial example can attack different models even they are trained independently, and this phenomenon is the so-called \"transferability\" of adversarial examples. Hence, the strategy that simply integrates different models trained on the same original dataset is not sufficient to guarantee the overall robustness. To resolve this issue, different approaches have been proposed for maximizing the \"diversity\" among sub-models; in general, these approaches can be categorized into two classes: \"simultaneous training\" and \"individual training\" (Pang et al., 2019)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.875, + 0.825, + 0.904 + ], + "angle": 0, + "content": "To reduce the similarity among sub-models, most existing \"simultaneous training\" methods attempt to incorporate some penalty during each epoch of parameter updates. Kariyappa & Qureshi (2019)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.911, + 0.334, + 0.925 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.257 + ], + "angle": 0, + "content": "proposed the \"Gradient Alignment Loss (GAL)\" method to minimize the gradient similarity between sub-models directly. Further, Yang et al. (2021) proposed the \"Transferability Reduced Smooth (TRS)\" method to improve GAL by adding a regularization term to increase the smoothness, as the models with a smoother loss function can reduce the \"transferability\" of attacks. Yang et al. (2020) aimed to isolate the adversarial vulnerability in each sub-model by distilling non-robust features, where the sub-models can then generate diverse outputs being resilient against transfer attacks. Despite their effectiveness for defending adversarial attacks, the simultaneous training methods often require a substantial amount of memory since all the sub-models need to be stored in the GPUs in the training stage, which could be prohibitive if the number of sub-models is not small (say, more than 10) and/or their sizes are large. Additionally, the information interaction in parallel training can also cause extra large communication cost." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.264, + 0.828, + 0.419 + ], + "angle": 0, + "content": "Different from simultaneous training, most \"individual training\" methods train each sub-model independently on a randomly transformed version of the given training dataset (Pang et al., 2019; AprilPyone & Kiya, 2021). This \"random transformation\" strategy yields diverse datasets, and thus different sub-models trained on these datasets can present diverse performances when confronting an adversarial attack. The individual training approach has higher flexibility and also requires less GPU memory, because the sub-models do not need to be stored simultaneously. Since there is no communication between sub-models, individual training methods are more suitable for parallel training with multiple GPUs. But unfortunately, recent studies showed that the commonly used random transformations (e.g. image cropping and rescaling) are not that effective under adversarial attacks (Athalye et al., 2018). The major cause of suppressing the performance of individual training is that the \"transferability\" problem is still not well addressed." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.424, + 0.825, + 0.452 + ], + "angle": 0, + "content": "Our contributions. To tackle the transferability obstacle, we consider developing a new data transformation method for ensemble training. Our main contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.459, + 0.825, + 0.53 + ], + "angle": 0, + "content": "- First, we propose a fine-grained analysis on the relation between non-robust features and adversarial attack directions (Section 3). Being different from the previous analysis on non-robust features, our new analysis provides us the hints that are particularly useful to allocate the potential vulnerability directions to a set of sub-models, and therefore paves the way for designing our ensemble training strategy." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.536, + 0.827, + 0.621 + ], + "angle": 0, + "content": "- Second, we propose a data transform framework that can effectively promote the diversity of training data for robust ensemble training. The framework consists of two steps: \"frequency selection\" and \"frequency transformation\", where the frequency is based on the Fourier transformation on the images. We propose two efficient frequency transformations with low complexities on the identified non-robust features. The first one is based on simple random noise, and the second one is a cute \"targeted attack transformation\" that can modify the non-robust features more effectively (Section 4.2)." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.627, + 0.827, + 0.711 + ], + "angle": 0, + "content": "- Finally, we conduct a set of experiments to evaluate the adversarial robustness of our approach on several benchmark datasets under the widely used attack algorithms. We also compare our approach with several open-source ensemble methods, such as ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021). Compared with those baselines, the experimental results suggest that our proposed approach can significantly outperform most of them in robust accuracy and also preserve comparable high clean accuracy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.726, + 0.388, + 0.74 + ], + "angle": 0, + "content": "1.1 OTHER RELATED WORKS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.75, + 0.828, + 0.835 + ], + "angle": 0, + "content": "Data transformation for ensemble training. Guo et al. (2018) and Raff et al. (2019) proposed the transformations that preserve semantic information to reduce the impact of adversarial perturbation. AprilPyone & Kiya (2021) developed a training method that employs block-wise data transformations, where the input image is partitioned into blocks based on some private key. LINAC (Rusu et al., 2022) uses a predetermined random seed (private key) to initialize and train a DNN to encode the input data, serving as an encrypted input transformation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.841, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Adversarial attack from frequency perspective. Wang et al. (2020) explained that the model's vulnerability to small distortions may be due to its dependence on high-frequency features. Yucel et al. (2023) proposed a data augmentation method that reduces the reliance on high-frequency components, so as to improve model's robustness while maintaining clean accuracy. Maiya et al. (2021) and Bernhard et al. (2021) respectively showed that to fully understand the vulnerability, we should consider the distribution of the entire dataset with high and low frequencies." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.342, + 0.119 + ], + "angle": 0, + "content": "2 PRELIMINARIES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.125, + 0.827, + 0.239 + ], + "angle": 0, + "content": "Some notations. We consider the \\(k\\)-classification task: \\(\\mathcal{X} \\to \\mathcal{Y}\\) where \\(\\mathcal{X}\\) is the input data space and \\(\\mathcal{Y} = \\{1,2,\\dots,k\\}\\) is the set of labels. A soft-classification model \\(f(\\cdot;\\beta)\\) maps each \\(x \\in \\mathcal{X}\\) to a vector \\(f(x;\\beta) \\in \\mathbb{R}^k\\), where \\(\\beta\\) is the parameter vector that needs to be trained. Its associated hard-classification model is \\(F(x;\\beta) = \\arg \\max_i [f(x;\\beta)]_i\\) where \\([\\cdot]_i\\) stands for the \\(i\\)-th coordinate. The model \\(f\\) is usually equipped with a loss function \\(\\ell(f(x;\\beta), y)\\), \\(x \\in \\mathcal{X}\\) and \\(y \\in \\mathcal{Y}\\), which is differentiable on \\(\\beta\\) (e.g., cross-entropy loss). We refer to the accuracy on the original dataset as \"clean accuracy\" and the accuracy on adversarial examples as \"robust accuracy\". We denote the one-hot \\(k\\)-dimensional vector that corresponds to the target label \\(y\\) as \\(h(y)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.828, + 0.274 + ], + "angle": 0, + "content": "Definition 2.1 (Ensemble Model) Let \\(\\mathcal{M} = \\{f_1, \\dots, f_M\\}\\) be a set of sub-models for a \\(k\\)-classification task. We build the ensemble model with the following function:" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.276, + 0.826, + 0.315 + ], + "angle": 0, + "content": "\\[\nf _ {\\mathrm {E}} (x; \\beta_ {[ 1: M ]}) = \\frac {1}{M} \\sum_ {m \\in [ M ]} \\widehat {F} _ {m} (x; \\beta_ {m}), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.357 + ], + "angle": 0, + "content": "where \\(\\beta_{[1:M]} = \\{\\beta_m \\mid 1 \\leq m \\leq M\\}\\), and \\(\\widehat{F}_m(x; \\beta_m)\\) is the one-hot \\(k\\)-dimensional vector of the hard-classification model \\(F_m(x; \\beta_m)\\) of \\(f_m\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.826, + 0.439 + ], + "angle": 0, + "content": "Definition 2.2 (Adversarial Attack and Targeted Attack) Given a model \\( f(\\cdot; \\beta) \\) and an input \\( (x, y) \\in \\mathcal{X} \\times \\mathcal{Y} \\), the adversarial attack algorithm \\( \\mathcal{A} \\) returns a perturbed data \\( x' \\) inside the \\( l_p \\) ball of radius \\( \\epsilon > 0 \\), which maximizes the loss function \\( \\ell(f(\\cdot; \\beta), \\cdot) \\), or minimizes the loss function \\( \\ell(f(\\cdot; \\beta), y_t) \\) if given a target label \\( y_t \\neq y \\). For the latter one, we say it is a \"targeted attack from \\( y \\) to \\( y_t \\)\". Usually we set \\( p = 2 \\) or \\( p = \\infty \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.442, + 0.827, + 0.485 + ], + "angle": 0, + "content": "As mentioned in Section 1, because our proposed approach is based on Fourier transform, we introduce several necessary notations below. Given an image \\( x \\) of size \\( L \\times N \\), the corresponding two-dimensional discrete Fourier transform can be written as: for any \\( 0 \\leq u \\leq L - 1 \\) and \\( 0 \\leq v \\leq N - 1 \\)," + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.494, + 0.826, + 0.535 + ], + "angle": 0, + "content": "\\[\n\\tilde {x} [ u, v ] = \\sum_ {s = 0} ^ {L - 1} \\sum_ {t = 0} ^ {N - 1} x [ s, t ] \\cdot e ^ {- 2 \\mathrm {j} \\pi \\left(\\frac {u _ {s}}{L} + \\frac {v t}{N}\\right)}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.828, + 0.603 + ], + "angle": 0, + "content": "where “\\(j\\)” denotes the imaginary unit, and “\\(\\tilde{x}[u,v]\\)” is the entry in the \\(u\\)-th column and \\(v\\)-th row of the Fourier matrix \\(\\tilde{x}\\) (“\\(x[s,t]\\)” is defined similarly for the original image \\(x\\)). The pixels of the image \\(x\\) form the time domain, and the entries of \\(\\tilde{x}\\) form the frequency domain. For a frequency \\((u,v)\\), the amplitude is the absolute value \\(|\\tilde{x}[u,v]|\\). We call a frequency \\((u,v)\\) as a frequency feature." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.611, + 0.755, + 0.627 + ], + "angle": 0, + "content": "3 FINE-GRAINED ANALYSIS ON ENSEMBLE MODEL VULNERABILITY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.637, + 0.827, + 0.764 + ], + "angle": 0, + "content": "The previous work (Ilyas et al., 2019) categorizes the features learned by a model into robust and non-robust features. It shows that adversarial vulnerability is a natural consequence of the presence of highly predictive but non-robust features. Moreover, different models trained on the same dataset often have similar non-robust features, and therefore an adversarial example usually exhibits the \"transferability\" property among them. Several other works also presented detailed discussions on the impact of non-robust features (Benz et al., 2021; Springer et al., 2021). Following those studies, a natural idea for tackling the transferability issue is to ensure that the sub-models should have diverse non-robust features. In this section, we provide a fine-grained analysis on the vulnerability of ensemble models and then conclude two important hints for achieving this \"diversity\" goal." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.769, + 0.826, + 0.868 + ], + "angle": 0, + "content": "The following definitions are inspired by (Ilyas et al., 2019). Note that different from the term \"feature\" used in their article, we use \"feature extractor\" instead in our paper, since \"feature\" will be particularly used for referring to image feature in time or frequency domain. Specifically, we define a \"feature extractor\" as a function that maps the input \\( x \\in \\mathcal{X} \\) to a vector in \\( \\mathbb{R}^k \\). A model \\( f \\) is composed of a set of different feature extractors, with each feature extractor focusing on distinct feature. The combination of outputs from these feature extractors forms the model's final output. We then further define the \"useful feature extractors\"." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.874, + 0.825, + 0.902 + ], + "angle": 0, + "content": "Definition 3.1 (Useful feature extractor) For a given data distribution \\(\\mathcal{D} = \\mathcal{X}\\times \\mathcal{Y}\\), a feature extractor \\(\\theta :\\mathcal{X}\\to \\mathbb{R}^k\\) is useful, if we have" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.901, + 0.826, + 0.93 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} [ h (y) ^ {\\top} \\theta (x) ] > \\frac {1}{k}. \\tag {3}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "Recall that \\( h(y) \\) is the one-hot \\( k \\)-dimensional vector of the label \\( y \\). Roughly speaking, the inequality (3) implies that the expected contribution of a useful feature extractor to the model's correct prediction is higher than the average contribution over all the \\( k \\) classes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.158, + 0.826, + 0.201 + ], + "angle": 0, + "content": "Definition 3.2 (robust and non-robust feature extractor) We use \\(\\mathcal{A}(x)\\) to denote the adversarial example of a data item \\(x\\) as described in Definition 2.2. Let \\(\\theta\\) be a useful feature extractor. (1) We say \\(\\theta\\) is robust if the following condition holds for any \\(i\\) (\\(1 \\leq i \\leq k\\)):" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.202, + 0.589, + 0.23 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {(x, y) \\sim \\mathcal {D} _ {i}} \\left[ \\theta (\\mathcal {A} (x)) \\right] _ {i} > \\frac {1}{k}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.234, + 0.825, + 0.251 + ], + "angle": 0, + "content": "where \\(\\mathcal{D}_i\\) represents the \\(i\\)-th class data. We denote the set of these robust feature extractors as \\(\\Theta_{R}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.256, + 0.827, + 0.311 + ], + "angle": 0, + "content": "(2) The remaining useful feature extractors are non-robust. We assign these non-robust extractors to \\( k(k - 1) \\) sets: \\( \\{\\Theta_{i,j} \\mid 1 \\leq i \\neq j \\leq k\\} \\) as follows. Initially, all these \\( k(k - 1) \\) sets are empty. Then we go through all the non-robust feature extractors. For each non-robust \\( \\theta \\), there must exist at least an index \"i\" such that" + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.313, + 0.579, + 0.33 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {(x, y) \\sim \\mathcal {D} _ {i}} [ \\theta (\\mathcal {A} (x)) ] _ {i} \\leq 1 / k;\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.409 + ], + "angle": 0, + "content": "we let \\( j = \\arg \\max_{s} \\mathbb{E}_{(x,y) \\sim \\mathcal{D}_i}[\\theta(\\mathcal{A}(x))]_s \\) and assign \\( \\theta \\) to \\( \\Theta_{i,j} \\) (note that \\( j \\) should be not equal to \\( i \\), or there are multiple indices achieving the maximum expectation and at least one is not equal to \\( i \\), since otherwise \\( \\sum_{s=1}^{k} \\mathbb{E}_{(x,y) \\sim \\mathcal{D}_i}[\\theta(\\mathcal{A}(x))]_s \\) will less than 1). Eventually, these \\( k(k-1) \\) sets are constructed, where each \\( \\Theta_{i,j} \\) contains the feature extractors that are not robust to the attack from \\( i \\) to \\( j \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.42, + 0.826, + 0.465 + ], + "angle": 0, + "content": "Remark 3.3 Intuitively, if a feature extractor is robust, it should have the capability to preserve its contribution to the correct prediction even under perturbation. It is also worth noting that a non-robust feature extractor \\(\\theta\\) could be assigned to multiple \\(\\Theta_{i,j}s\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.826, + 0.518 + ], + "angle": 0, + "content": "Assume we have a standardly trained model \\( f \\) consisting of a set of useful feature extractors, and we denote it as \\( \\Theta_f \\). Each of them can be classified as robust or non-robust as Definition 3.2. Similar with the formulation proposed in (Ilyas et al., 2019), we can represent the model as" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.524, + 0.826, + 0.568 + ], + "angle": 0, + "content": "\\[\nf (x) = \\sum_ {\\theta \\in \\Theta_ {R} \\cap \\Theta_ {f}} w _ {\\theta} \\theta (x) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} \\cap \\Theta_ {f}} w _ {\\theta} \\theta (x), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.573, + 0.827, + 0.7 + ], + "angle": 0, + "content": "where each \\(\\theta\\) has a coefficient \\(w_{\\theta} \\in \\mathbb{R}\\). We then conduct our analysis based on Equation (4). Some recent works reveal that adversarial training method can obtain robust model through reducing the dependence on non-robust feature extractors (Allen-Zhu & Li, 2022; Tsipras et al., 2018). However, this strategy may cause certain downgrade performance on clean accuracy (because the non-robust feature extractors also contribute to obtaining correct prediction). Fortunately, we are able to avoid this dilemma in the context of ensemble training. Namely, we just need to keep the non-robust features as diverse as possible, instead of entirely eliminating the dependence on those non-robust feature extractors. To pave the way for realizing this goal, we introduce the definition of vulnerability of ensemble model below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.71, + 0.826, + 0.755 + ], + "angle": 0, + "content": "Definition 3.4 (Vulnerability of ensemble model) Suppose \\( f_{\\mathrm{E}} \\) is an ensemble model as described in Definition 2.1, and its associated hard-classification model is denoted by \\( F_{\\mathrm{E}} \\): \\( \\forall x \\), \\( F_{\\mathrm{E}}(x) = \\arg \\max_{i}[f_{\\mathrm{E}}(x)]_{i} \\). Given the data distribution \\( \\mathcal{D} = \\mathcal{X} \\times \\mathcal{Y} \\), the vulnerability of \\( F_{\\mathrm{E}} \\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.312, + 0.759, + 0.826, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\right\\} \\right], \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.79, + 0.825, + 0.83 + ], + "angle": 0, + "content": "where \\(\\mathbb{I}(\\cdot)\\) represents the indicator function. Furthermore, for any target class \\(y_{t}\\), we can define the vulnerability towards \\(y_{t}\\) as \\(\\mathrm{Vr}(F_{\\mathrm{E}}, y_{t}) = \\mathbb{E}_{(x,y) \\sim \\mathcal{D}}\\left[\\mathbb{I}\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_{t}\\}\\right]\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.826, + 0.883 + ], + "angle": 0, + "content": "The vulnerability of Definition 3.4 describes the success probability of an attack \\(\\mathcal{A}\\) to the ensemble model \\(F_{\\mathrm{E}}\\). We have the following key inequality, which indicates that \\(\\operatorname{Vr}(F_{\\mathrm{E}})\\) is bounded by considering all the attack directions, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.888, + 0.826, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) \\leq \\sum_ {y _ {t} \\in \\mathcal {Y}} \\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right). \\tag {6}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.104, + 0.827, + 0.165 + ], + "angle": 0, + "content": "The proof of Inequality (6) is placed in Appendix A.1. Moreover, if \\( F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t \\), there are at least \\( M / k \\) sub-models returning the wrong label \\( y_t \\) due to the pigeonhole principle. Namely, \" \\( \\sum_{m=1}^{M} \\mathbb{I}\\left([f_m(\\mathcal{A}(x))]_y < [f_m(\\mathcal{A}(x))]_{y_t}\\right) > \\frac{M}{k} \\)\" should be a necessary condition for successfully attacking from \\( y \\) to \\( y_t \\). So it implies" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.171, + 0.826, + 0.211 + ], + "angle": 0, + "content": "\\[\n\\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right) \\leq \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left(\\sum_ {m = 1} ^ {M} \\mathbb {I} \\left([ f _ {m} (\\mathcal {A} (x)) ] _ {y} < [ f _ {m} (\\mathcal {A} (x)) ] _ {y _ {t}}\\right) > \\frac {M}{k}\\right) \\right]. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.218, + 0.825, + 0.281 + ], + "angle": 0, + "content": "From the upper bound (6), we can decrease the total vulnerability by reducing \\(\\mathrm{Vr}(F_{\\mathrm{E}},y_t)\\) for each \\(y_{t}\\). Also, from (7) we know that \\(\\mathrm{Vr}(F_{\\mathrm{E}},y_{t})\\) can be reduced by decreasing the chance of “\\([f_m(\\mathcal{A}(x))]_y < [f_m(\\mathcal{A}(x))]_{y_t}\\)” over \\(m\\in \\{1,2,\\dots ,M\\}\\). According to the Equation (4), the inequality \\(\\left[f_m(\\mathcal{A}(x))\\right]_y < \\left[f_m(\\mathcal{A}(x))\\right]_{y_t}\\)” can be rewritten as" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.287, + 0.826, + 0.332 + ], + "angle": 0, + "content": "\\[\n\\left[ \\sum_ {\\theta \\in \\Theta_ {R} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) \\right] _ {y} < \\left[ \\sum_ {\\theta \\in \\Theta_ {R} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) \\right] _ {y _ {t}}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.338, + 0.827, + 0.399 + ], + "angle": 0, + "content": "where \\(\\Theta_R^m\\) and \\(\\Theta_{i,j}^{m}\\) respectively denote the sets of robust and non-robust feature extractors for the sub-model \\(f_{m}\\). Moreover, the set \\(\\Theta_{y,y_t}^m\\) should have relatively larger influence to the right-hand side of (8) than other feature extractor set \\(\\Theta_{y,j}^{m}\\) with \\(j\\neq y_{t}\\), due to the outer operator “\\([\\cdot ]_{y_t}\\). Therefore, we conclude our first hint as an intuition for reducing \\(\\mathrm{Vr}(F_{\\mathrm{E}})\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.405, + 0.825, + 0.45 + ], + "angle": 0, + "content": "Hint (i): To decrease the vulnerability in the attack direction \\( y_{t} \\) (i.e., each term \\( \\mathrm{Vr}(F_{\\mathrm{E}},y_{t}) \\) in the upper bound of (6)), it is reasonable to decrease the influence from the non-robust feature extractors of \\( \\Theta_{y,y_t}^m \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.454, + 0.826, + 0.538 + ], + "angle": 0, + "content": "In Hint (i), a major difference from the previous analysis (Ilyas et al., 2019; Allen-Zhu & Li, 2022) is that, we in particular relate each attack direction \\( y_{t} \\) to some specific non-robust feature extractors, where the benefit is that these correspondences can effectively help us to build the diverse ensemble model. Moreover, According to the principle of ensemble methods, as long as at least \\( M / 2 + 1 \\) sub-models are not successfully attacked, the ensemble model will successfully defend against the attack. So we conclude the second hint that is also important for designing our approach." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.544, + 0.827, + 0.573 + ], + "angle": 0, + "content": "Hint (ii): For each attack direction \\( y_{t} \\), we only need to consider manipulating the training data of \\( M / 2 + 1 \\) sub-models instead of all the \\( M \\) sub-models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.58, + 0.825, + 0.607 + ], + "angle": 0, + "content": "Overall, the above Hint (i) & (ii) play the key roles for inspiring our data transformation method in Section 4." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.619, + 0.515, + 0.634 + ], + "angle": 0, + "content": "4 OUR ENSEMBLE TRAINING METHOD" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.641, + 0.825, + 0.67 + ], + "angle": 0, + "content": "We first introduce our model and high-level idea in Section 4.1, and then elaborate on the technical details for the data transformations in Section 4.2." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.687, + 0.449, + 0.7 + ], + "angle": 0, + "content": "4.1 OVERVIEW OF OUR FRAMEWORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.712, + 0.825, + 0.839 + ], + "angle": 0, + "content": "Note that the feature extractors of a model depend on the given training data. Namely, any modification on the features of the training data can implicitly influence the model. Thus, in this section we follow the Hint (i) & (ii) of Section 3 to design an effective data transformation method. The transformation is expected to modify the features of the training data, so as to enhance the robustness of the trained ensemble model. We train a set of distinct sub-models on the transformed training data; these sub-models can be integrated into an ensemble model being robust against adversarial attacks, while preserving the clean accuracy of each sub-model as much as possible. We use “\\(\\pi_{m}\\)” to denote the transformation for the \\(m\\)-th sub-model, \\(1 \\leq m \\leq M\\), and formulate the following problem by slightly modifying Definition 2.1 (replace \\(x\\) by the adversarial example \\(\\mathcal{A}(x)\\) for each sub-model):" + }, + { + "type": "equation", + "bbox": [ + 0.334, + 0.844, + 0.826, + 0.883 + ], + "angle": 0, + "content": "\\[\n\\min \\mathbb {E} _ {(x, y) \\sim \\mathcal {X} \\times \\mathcal {Y}} \\ell \\left(\\frac {1}{M} \\sum_ {m \\in [ M ]} \\widehat {F} _ {m} (\\mathcal {A} (x); \\beta_ {m}), y\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.888, + 0.825, + 0.927 + ], + "angle": 0, + "content": "where \\(\\beta_{m}\\) is obtained by training on the transformed data, i.e., \\(\\beta_{m} = \\operatorname*{argmin}_{\\beta}\\mathbb{E}_{(x,y)\\sim \\mathcal{X}\\times \\mathcal{Y}}\\ell (f_{m}(\\pi_{m}(x);\\beta),y)\\) for each \\(m\\in \\{1,2,\\dots ,M\\}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.23 + ], + "angle": 0, + "content": "The major challenge for solving the above problem (9) is how to design a set of appropriate transformations \\(\\{\\pi_m\\mid 1\\leq m\\leq M\\}\\), so that the obtained parameters \\(\\beta_{[1:M]}\\) can yield sufficiently diverse sub-models. To address this issue, we leverage the transformation from frequency domain to guide the non-robust features of each sub-model to be as diverse as possible. Specifically, we introduce a method called \"Frequency Domain Transformation (FDT)\" for constructing the set of diverse training datasets \\(\\{\\pi_1(\\mathcal{X}),\\pi_2(\\mathcal{X}),\\dots ,\\pi_M(\\mathcal{X})\\}\\). FDT relies on a key \"weakness allocation\" strategy. Roughly speaking, the strategy aims to promote the diversity of the constructed datasets, and meanwhile preserve that the overall clean accuracy should not be sacrificed in the ensemble. The details are presented in the next section." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.245, + 0.5, + 0.259 + ], + "angle": 0, + "content": "4.2 FREQUENCY DOMAIN TRANSFORMATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.265, + 0.828, + 0.363 + ], + "angle": 0, + "content": "Before performing the transformation, we need to select a set of non-robust features. In time domain, a simple observation is that an image feature is usually invariant under spatial translation, e.g., it can appear at different positions in images. This property causes the challenge for directly identifying and representing non-robust features in time domain. Thus we turn our attention to the frequency domain. Moreover, some previous studies on robust learning already revealed that robust and non-robust features are often deeply related to frequency domain (Wang et al., 2020; Bernhard et al., 2021; Maiya et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.369, + 0.828, + 0.593 + ], + "angle": 0, + "content": "Amplitude based selection. To identify the non-robust frequencies, a straightforward idea is to test the robustness of each individual frequency and select the non-robust ones. Nevertheless, it may take a large computational cost since the number of frequencies is high (e.g., if the input image is \\(64 \\times 64\\), the number of frequencies is also \\(64 \\times 64 \\approx 4 \\times 10^{3}\\)). We propose an easy-to-implement selection idea based on the amplitudes, since the amplitudes can be directly obtained via the Fourier transformation with low complexity. According to the previous research (Ilyas et al., 2019; Benz et al., 2021; Springer et al., 2021), a feature can be regarded as \"robust\" if it cannot be easily manipulated by small perturbations. We observe that high-amplitude frequency features usually dominate the ground truth of an image. Figure 4 in our Appendix illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image. So in our following approach, we maintain high-amplitude frequency features as \"robust features\", and select the frequencies with low amplitudes (by setting a threshold \"\\(\\tau\\)\" to transform. Moreover, we can conveniently observe the performance changing through varying the threshold \\(\\tau\\) in our experiment. Figure1 illustrates the amplitude-based selection." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.613, + 0.498, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.709, + 0.504, + 0.807 + ], + "angle": 0, + "content": "Figure 1: We use a \\(5 \\times 5\\) image as a toy example, where the intensity of the color indicates the magnitude of the amplitude. In our amplitude-based selection, we retain the high-amplitude frequencies (i.e., the darker regions) and perform data transformations on the low-amplitude frequencies (i.e., the white regions)." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.598, + 0.828, + 0.82 + ], + "angle": 0, + "content": "Following our frequency selection, we propose two transformation methods for promoting the diversity by using the identified non-robust features. Our first approach is from a straightforward idea, which is just to replace the non-robust features by random noise (due to the space limit, we leave the details to Appendix C). This method is very easy to implement in practice. Though it can achieve certain degree of improvement upon previous ensemble training methods, the performance is still not very promising (as shown in our experiments). To further improve the effectiveness, we propose a more sophisticated approach called \"targetedattack transformation\", which constructs a set of different \"substitute\" features through attack" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.819, + 0.826, + 0.849 + ], + "angle": 0, + "content": "ing the images to different targeted classes, and then use them to replace the selected non-robust frequencies." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Targeted-attack transformation: We briefly explain our intuition first. It was shown that adversarial attacks have the capability to manipulate non-robust features (Ilyas et al., 2019; Yang et al., 2020). In particular, a targeted attack as introduced in Definition 2.2, aims at modifying non-robust features that are associated with a specific target label. For instance, let us consider a data point \\((x,y)\\) in the original dataset \\(\\mathcal{X} \\times \\mathcal{Y}\\); we set the target label as \\(y_{t}\\) and obtain the corresponding adversarial example" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.175 + ], + "angle": 0, + "content": "\\(x^{\\prime}\\) (\\(x^{\\prime}\\) contains the modified non-robust features that are associated with \\(y_{t}\\)). When training a model using \\((x^{\\prime}, y)\\), intuitively it can be viewed as an \"immunization\" for defending the attack from \\(y\\) to \\(y_{t}\\); and consequently, the chance that obtaining the wrong label \\(y_{t}\\) for the data with label \\(y\\) decreases. In other words, it becomes more difficult to attack the images with label \\(y\\) to \\(y_{t}\\) than to the other classes. We call the modified non-robust feature as a \"substitute\" feature derived by the targeted attack." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.181, + 0.828, + 0.293 + ], + "angle": 0, + "content": "Motivated by this observation, we can construct different transformations by using \\( k \\times (k - 1) \\) targeted attacks (since each label can be attacked to be the other \\( k - 1 \\) labels); these attacks can yield different substitute features, and then we use these features to replace the corresponding non-robust features in the original dataset (based on Hint (i) in Section 3); finally, the \\( M \\) transformed datasets are obtained via an allocation algorithm, where each substitute feature is captured by at least \\( M / 2 + 1 \\) datasets (based on Hint (ii) in Section 3). Overall, due to the completeness of the \\( k \\times (k - 1) \\) targeted attacks, the \\( M \\) sub-models trained on those datasets can guarantee the robustness of the final ensemble solution. We introduce some definitions for our transformation first." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.305, + 0.826, + 0.349 + ], + "angle": 0, + "content": "Definition 4.1 (Strengthen a dataset) Let \\( y_{1} \\neq y_{2} \\in \\mathcal{Y} \\). If a given training dataset \\( P \\) contains at least one adversarial example who has the original label \\( y_{1} \\) but is misclassified as \\( y_{2} \\), we say that \\( P \\) has been strengthened by the attack direction from \\( y_{1} \\) to \\( y_{2} \\) (\"\\( \\overrightarrow{y_{1}y_{2}} \\)-direction\" for short)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.36, + 0.825, + 0.418 + ], + "angle": 0, + "content": "In other words, if \\( P \\) is not strengthened in \\( \\overrightarrow{y_1y_2} \\)-direction, the model trained on \\( P \\) is more likely to be fragile to the targeted attacks from \\( y_1 \\) to \\( y_2 \\). Also, the dataset \\( P \\) may have not been strengthened in multiple different directions. So we define its \"weakness set\" \\( \\mathcal{W} = \\{\\overrightarrow{y_1y_2} \\mid 1 \\leq y_1, y_2 \\leq k, y_1 \\neq y_2, \\text{ and } P \\text{ has not been strengthened in } \\overrightarrow{y_1y_2} \\text{-direction}\\} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.827, + 0.459 + ], + "angle": 0, + "content": "Definition 4.2 (Diversity of weakness sets) Given \\(M\\) datasets \\(\\{P_1, P_2, \\dots, P_M\\}\\) with their corresponding weakness sets \\(\\{\\mathcal{W}_1, \\mathcal{W}_2, \\dots, \\mathcal{W}_M\\}\\), we define their diversity:" + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.465, + 0.697, + 0.5 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {D i v} (P _ {1}, P _ {2}, \\dots , P _ {M}) = 1 - \\frac {| \\mathcal {W} _ {1} \\cap \\mathcal {W} _ {2} \\cap \\cdots \\cap \\mathcal {W} _ {M} |}{\\max \\{| \\mathcal {W} _ {1} | , | \\mathcal {W} _ {2} | , \\cdots , | \\mathcal {W} _ {M} | \\}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.513, + 0.825, + 0.584 + ], + "angle": 0, + "content": "It is easy to see that the higher the value \\( \\mathbf{Div}(P_1, P_2, \\dots, P_M) \\), the more diverse the corresponding weakness sets. A higher diversity suggests that the vulnerabilities of the \\( M \\) sub-models trained on those datasets are more likely to be different. To achieve a nice performance in terms of both accuracy and robustness, we need to take account of the diversity function \"Div\" for designing the transformations. The basic principle is:" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.59, + 0.825, + 0.661 + ], + "angle": 0, + "content": "On the one hand, our transformed datasets should have a sufficiently large number of diverse substitute features, so that one adversarial attack cannot easily capture more than half of the \\(M\\) sub-models. On the other hand, the datasets should also maintain the major information of the original input as much as possible, since otherwise the clean accuracy may decline due to the added substitute features." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.666, + 0.825, + 0.699 + ], + "angle": 0, + "content": "To provide an appropriate trade-off, we propose the following constrained optimization objective: let \\(\\mathbb{C}\\) be the set of all the \\(\\binom{M}{\\lceil M/2 \\rceil}\\) combinations of \\(\\lceil M/2 \\rceil\\)-size subsets from \\(\\{1, 2, \\dots, M\\}\\), and then" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.699, + 0.825, + 0.72 + ], + "angle": 0, + "content": "\\[\n\\max _ {P _ {1}, P _ {2}, \\dots , P _ {M}} \\quad \\min \\left\\{\\left| \\mathcal {W} _ {1} \\right|, \\left| \\mathcal {W} _ {2} \\right|, \\dots , \\left| \\mathcal {W} _ {M} \\right| \\right\\} \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.724, + 0.825, + 0.742 + ], + "angle": 0, + "content": "\\[\n\\mathrm {s . t .} \\forall \\left\\{i _ {1}, i _ {2}, \\dots , i _ {\\lceil M / 2 \\rceil} \\right\\} \\in \\mathbb {C}, \\quad \\boldsymbol {D i v} \\left(P _ {i _ {1}}, P _ {i _ {2}}, \\dots , P _ {i _ {\\lceil M / 2 \\rceil}}\\right) = 1. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.746, + 0.827, + 0.858 + ], + "angle": 0, + "content": "We maximize the objective function of (10) because we want to minimize the modification degree for each transformed dataset. Intuitively, a large weakness set indicates that the corresponding dataset is not changed significantly by the transformation, and thus the clean accuracy is likely to be well preserved. The constraint (11) guarantees that any \\(\\lceil M/2\\rceil\\) datasets have the intersection \\(\\mathcal{W}_{i_1} \\cap \\mathcal{W}_{i_2} \\cap \\dots \\cap \\mathcal{W}_{i_{\\lceil M/2 \\rceil}} = \\emptyset\\), that is, they do not share any common direction. Consequently, the ensemble solution should be robust to any attack direction. To achieve this twofold goal, we design an efficient allocation strategy together with an attack-guided transformation on the training data. Specifically, the procedure consists of the following two stages." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.864, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Stage (1): allocating the weakness sets to the sub-models. For each \\(\\overrightarrow{y_1y_2}\\)-direction, \\(1 \\leq y_1, y_2 \\leq k\\), there are at most \\(\\lceil \\frac{M}{2} \\rceil - 1\\) sets that contain this direction (due to the constraint (11)), so the sum \\(\\sum_{1 \\leq i \\leq M} |\\mathcal{W}_i|\\) is no larger than \\(k(k - 1) * (\\lceil \\frac{M}{2} \\rceil - 1)\\). Therefore, the maximum value of Eq (10) is no larger than" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.1, + 0.826, + 0.129 + ], + "angle": 0, + "content": "\\[\nk (k - 1) * \\left(\\left\\lceil \\frac {M}{2} \\right\\rceil - 1\\right) / M \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.132, + 0.827, + 0.178 + ], + "angle": 0, + "content": "based on the pigeonhole principle. We assign the total \\( k(k - 1)*\\left(\\lceil \\frac{M}{2}\\rceil -1\\right) \\) directions (each direction is duplicated to be \\( \\lceil \\frac{M}{2}\\rceil -1 \\) copies) to \\( M \\) sets in a round-robin way, where the number of directions assigned to each set is no larger than the upper bound (12). Please refer to Figure 2 for an example." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.189, + 0.825, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.192, + 0.381, + 0.799, + 0.397 + ], + "angle": 0, + "content": "Figure 2: Assign the attack directions to five sub-models for a three-class classification task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.413, + 0.828, + 0.633 + ], + "angle": 0, + "content": "Stage (2): constructing the new datasets. Following the allocation, we transform the original dataset, denoted by \\( P_{\\mathrm{ori}} \\), to align with the assigned weakness sets for the \\( M \\) sub-models correspondingly. Using the same notations of Definition 4.2, we denote the waiting-to-construct dataset for the \\( m \\)-th sub-model as \\( P_{m} \\) (which is initialized to be \\( \\emptyset \\)), \\( 1 \\leq m \\leq M \\). First, we divide \\( P_{\\mathrm{ori}} \\) into \\( k \\) subsets \\( C_1, C_2, \\dots, C_k \\), where each \\( C_j \\) corresponds to the label \\( j \\), for \\( 1 \\leq j \\leq k \\); further, each \\( C_j \\) is equally partitioned to \\( k - 1 \\) disjoint parts \\( \\{C_{j,1}, C_{j,2}, \\dots, C_{j,k - 1}\\} \\) at random. For each data \\( (x,j) \\) in \\( C_{j,i} \\), we attack it from \\( j \\) to \\( h \\) (let \\( h = i + j \\mod k \\)) to obtain the adversarial perturbation; then we only substitute the low-amplitude frequencies of \\( x \\) with the perturbation, and other frequencies (which have their amplitudes higher than the aforementioned threshold \\( \\tau \\)) remain unchanged. We denote the new dataset as \\( C_{j,i}' \\). Finally, we add \\( C_{j,i}' \\) to \\( P_{m} \\) if the \\( i \\overrightarrow{h} \\)-direction is not in the weakness set \\( \\mathcal{W}_m \\). From the construction method of the weakness sets, we know that the \\( i \\overrightarrow{h} \\)-direction can appear in at most \\( \\lceil \\frac{M}{2} \\rceil - 1 \\) weakness sets. So, the set \\( C_{i,j}' \\) can be added to at least \\( \\lceil \\frac{M}{2} \\rceil \\) different \\( P_m s \\). Consequently, the completeness for defending the \\( k(k - 1) \\) attack directions can be guaranteed, i.e., the constraint (11) is satisfied. Figure 3 shows the schematic diagram of the construction process, and the full details are shown in Appendix D." + }, + { + "type": "image", + "bbox": [ + 0.236, + 0.643, + 0.763, + 0.832 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.843, + 0.825, + 0.879 + ], + "angle": 0, + "content": "Figure 3: A schematic diagram of the construction process. In the allocation stage, each \\( C_{j,i}^{\\prime} \\) is added to \\( P_{m} \\) if the \\( i \\vec{h} \\)-direction is not in the weakness set \\( \\mathcal{W}_{m} \\), \\( h = i + j \\mod k \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Remark 4.3 We are aware of some previous robust learning approaches that also depend on data modification (Allen-Zhu & Li, 2022; Tsipras et al., 2018). But their approaches usually tend to" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.204 + ], + "angle": 0, + "content": "completely eliminate non-robust features. Our method is quite different, where the goal is to leverage the carefully selected non-robust features to weaken the transferability among sub-models. For each sub-model, we only modify the non-robust features corresponding to certain directions, rather than all non-robust features, and therefore the modification yields relatively lower impact on clean accuracy. Moreover, we partition each class \\( C_j \\) into \\( k - 1 \\) subsets \\( \\{C_{j,1}, C_{j,2}, \\dots, C_{j,k - 1}\\} \\), with each subset being attacked to a specified class. This step eliminates the need to attack each data point across all classes, thereby reducing the computational complexity of constructing the new datasets." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.222, + 0.33, + 0.238 + ], + "angle": 0, + "content": "5 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.243, + 0.828, + 0.385 + ], + "angle": 0, + "content": "We conduct our experiments on the widely used image datasets CIFAR-10, CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). As for the baselines, we reproduce the existing ensemble models including ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021), with their released codes and recommended hyperparameter settings. As for our approach, \"FDT-random\" and \"FDT-target\" respectively denote the methods utilizing random noise based transformation and target-attack transformation; \"FDT-hybrid\" represents the method that combines both, that is, we set two frequency selection thresholds \\(\\tau_{1}\\) and \\(\\tau_{2}\\) (\\(\\tau_{1} < \\tau_{2}\\)), and perform random and target-attack transformations on the frequencies less than \\(\\tau_{1}\\) and the frequencies between \\(\\tau_{1}\\) and \\(\\tau_{2}\\), respectively (due to the space limit, more details are shown in Appendix E). Our code will be available at https://github.com/ideven123/FDT." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.389, + 0.827, + 0.502 + ], + "angle": 0, + "content": "We train each sub-model based on ResNet-20 (He et al., 2016) and use Adam optimizer (Kingma & Ba, 2015) with an initial learning rate of 0.001 for 200 epochs. To further test their performance on neural network with larger scale, we also use WideResNet28-10 (Zagoruyko & Komodakis, 2016) to train the sub-models and the results are placed in our supplement. All the experiments are implemented with PyTorch (Paszke et al., 2017) on a single NVIDIA GeForce RTX 3090 with 24GB of memory and 1TB of storage. We assess the performance of our models through 5 repeated runs and compute error bars. Utilizing the numpy library, we calculate the standard deviation and subsequently derive the standard error of the mean (SEM)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.508, + 0.828, + 0.607 + ], + "angle": 0, + "content": "Varying the number of sub-models. We take the ResNet-20 model trained on CIFAR-10 as an example and test the performance of FDT with different numbers of sub-models in the ensemble. In this experiment, we set the frequency selection threshold \\(\\tau_{1}\\) to be 0.2 and \\(\\tau_{2}\\) to be 0.8. Then we evaluate the performance of FDT-hybrid under FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), and AutoAttack (AA) (Croce & Hein, 2020) attack methods with \\(l_{\\infty}\\) perturbations of size \\(\\epsilon = 0.02\\). The results in Table 1 indicate that our clean accuracy has relatively smaller change as the number increases, while the robust accuracy can be substantially improved from 3 to 20 sub-models." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.62, + 0.783, + 0.634 + ], + "angle": 0, + "content": "Table 1: Performance of FDT-hybrid with different sub-model numbers on CIFAR-10." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.635, + 0.819, + 0.713 + ], + "angle": 0, + "content": "
Sub-model numbers3581220
Clean accuracy90.20 ± 0.0390.75±0.0391.35±0.0591.51 ± 0.0691.86±0.07
FGSM (ε=0.02)58.04 ± 0.1361.66± 0.1562.41 ± 0.1163.96 ± 0.1264.27 ± 0.14
PGD (ε=0.02)20.01 ± 0.0426.10± 0.0729.20± 0.0529.78 ± 0.0829.71± 0.07
AutoAttack (ε=0.02)19.42± 0.0425.37± 0.0527.33± 0.0428.12± 0.0728.92± 0.07
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.722, + 0.827, + 0.849 + ], + "angle": 0, + "content": "Results for white-box attack. To maintain consistency with the baseline ensemble methods from the literature, we ensemble three ResNet-20 sub-models here and evaluate the robust accuracy using \\(\\epsilon = 0.01\\) and \\(\\epsilon = 0.02\\). In this experiment, we set the frequency selection threshold \\(\\tau_{1}\\) to be 0.2 and \\(\\tau_{2}\\) to be 0.8. In the white-box attack setting, the attacker has full knowledge of the models, including model parameters, architecture, and ensemble training strategy. To evaluate the adversarial robustness of the ensemble, we conduct the following white-box attacks: PGD, FGSM, BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), C&W (Carlini & Wagner, 2017) and AutoAttack (AA). The attacks are implemented using AdverTorch (Ding et al., 2019). We take the robust and clean accuracies, and average training time per epoch as the evaluation metrics." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Table 2 presents the obtained robust accuracies of the baseline ensemble methods on CIFAR-10 and CIFAR-100. In addition, we show the average training time per epoch of different ensemble methods. The experimental results suggest that our FDT-random method can achieve higher adversarial robustness over other baselines on both CIFAR-10 and CIFAR-100, with the training time only higher than ADP (and much lower than other baselines). Furthermore, the FDT-hybrid ensemble method" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.169 + ], + "angle": 0, + "content": "Table 2: Robust and Clean Accuracy (\\%) and average training time of different ensemble methods against white-box attacks on CIFAR-10 and CIFAR-100. “\\(\\epsilon\\)” and “\\(\\lambda\\)” stand for the \\(l_{\\infty}\\) norm of the adversarial perturbation and the coefficient of C&W attack respectively. The TRS results are reported in the original paper Yang et al. (2021), with “-” indicating results not provided." + }, + { + "type": "table", + "bbox": [ + 0.186, + 0.17, + 0.812, + 0.554 + ], + "angle": 0, + "content": "
CIFAR-10ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy91.8491.8191.37-89.88±0.0290.16±0.0490.20±0.03
FGSM (ε=0.01)59.4844.9770.05-66.96±0.1272.88±0.1272.24±0.12
FGSM (ε=0.02)53.3830.5856.3344.246.28±0.1055.54±0.0958.04±0.13
PGD (ε=0.01)14.451.3540.5550.545.42±0.0946.58±0.0748.48±0.09
PGD (ε=0.02)2.950.3411.4915.112.24±0.0315.08±0.0520.01±0.04
BIM (ε=0.01)14.151.3740.5150.645.24±0.0346.86±0.0448.57±0.05
BIM (ε=0.02)3.010.2710.6515.811.68±0.0314.86±0.0316.63±0.02
MIM (ε=0.01)20.382.0544.7451.547.73±0.0549.97±0.0651.50±0.07
MIM (ε=0.02)5.110.6914.7617.215.14±0.0418.27±0.0220.09±0.03
AA (ε=0.01)1.800.0043.34-46.09±0.0948.83±0.0851.56±0.08
AA (ε=0.02)0.000.0013.72-9.38±0.0515.70±0.0519.42±0.04
C&W (λ=0.1)20.9631.5752.3558.145.01±0.1055.48±0.1056.08±0.11
CIFAR-100ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy67.0467.7066.16-66.29±0.1167.64±0.0866.70±0.09
FGSM (ε=0.01)17.8216.8933.94-35.42±0.1240.46±0.1439.85±0.14
FGSM (ε=0.02)10.537.8026.6119.322.40±0.0532.30±0.0630.27±0.08
PGD (ε=0.01)0.800.1114.6223.021.54±0.0622.19±0.0524.93±0.05
PGD (ε=0.02)0.010.024.255.34.84±0.037.27±0.028.63±0.03
BIM (ε=0.01)0.680.2314.8522.921.10±0.0922.39±0.0724.35±0.06
BIM (ε=0.02)0.020.04.075.44.80±0.046.80±0.058.40±0.05
MIM (ε=0.01)0.780.1216.8223.423.14±0.0624.68±0.1027.09±0.09
MIM (ε=0.02)0.010.025.316.26.47±0.038.87±0.0410.19±0.05
AA (ε=0.01)0.010.0011.23-16.02±0.0916.03±0.0916.41±0.12
AA (ε=0.02)0.000.002.72-3.12±0.044.54±0.055.47±0.07
C&W (λ=0.1)0.743.7010.6826.925.07±0.1029.43±0.0930.66±0.13
" + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.558, + 0.787, + 0.608 + ], + "angle": 0, + "content": "
Time (s)ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
CIFAR-1030.1569.92134.33350.4237.04108.22114.23
CIFAR-10030.3469.71129.25344.9237.12108.43113.87
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.635, + 0.825, + 0.663 + ], + "angle": 0, + "content": "achieves an even better robustness than FDT-random, though its running time is higher since it needs to perform the target-attack transformation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.827, + 0.782 + ], + "angle": 0, + "content": "Summary on other experimental results placed in Appendix F. We also conduct the experiments to examine the performance of FDT under black-box attack, and assess the transferability of our method across various sub-models. The results indicate the competitive robustness of our method in defending against black-box attacks. Then, we evaluate the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold \\(\\tau\\). The result shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of \\(\\tau\\). Moreover, we included some ablation studies on datasets and model architectures. These experiments demonstrate that our method performs the best among ensemble-based baseline methods." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.8, + 0.496, + 0.815 + ], + "angle": 0, + "content": "6 CONCLUSION AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In this paper, we present a novel data transformation approach to improve the robustness of ensemble models against adversarial attacks. By leveraging the frequency based features and strategically allocating adversarial examples, we demonstrate the effectiveness of our method in enhancing adversarial robustness while maintaining high accuracy on clean data. As for the future work, we can consider other types of transformation methods (e.g., beyond using frequency) to improve the ensemble robustness. Also, it is interesting to consider more complicated scenarios for ensemble training, such as federated learning with concerning the privacy issue." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.105, + 0.33, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.129, + 0.829, + 0.187 + ], + "angle": 0, + "content": "The authors would like to thank the reviewers for their constructive comments and suggestions. This work was partially supported by the National Natural Science Foundation of China (No. 62272432 and No. 62432016), the National Key Research and Development Program of China (No. 2021YFA1000900), and the Natural Science Foundation of Anhui Province (No. 2208085MF163)." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.206, + 0.289, + 0.222 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.228, + 0.826, + 0.273 + ], + "angle": 0, + "content": "Zeyuan Allen-Zhu and Yanzhi Li. Feature purification: How adversarial training performs robust deep learning. In 2021 IEEE 62nd Annual Symposium on Foundations of Computer Science (FOCS), pp. 977-988. IEEE, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.281, + 0.827, + 0.325 + ], + "angle": 0, + "content": "MaungMaung AprilPyone and Hitoshi Kiya. Block-wise image transformation with secret key for adversarially robust defense. IEEE Transactions on Information Forensics and Security, 16: 2709-2723, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.333, + 0.826, + 0.377 + ], + "angle": 0, + "content": "Anish Athalye, Nicholas Carlini, and David Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International conference on machine learning, pp. 274-283. PMLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.827, + 0.442 + ], + "angle": 0, + "content": "Philipp Benz, Chaoning Zhang, and In So Kweon. Batch normalization increases adversarial vulnerability and decreases adversarial transferability: A non-robust feature perspective. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7818-7827, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.452, + 0.827, + 0.508 + ], + "angle": 0, + "content": "Rémi Bernhard, Pierre-Alain Moëllic, Martial Mermillod, Yannick Bourrier, Romain Cohendet, Miguel Solinas, and Marina Reyboz. Impact of spatial frequency based constraints on adversarial robustness. In 2021 International Joint Conference on Neural Networks (IJCNN), pp. 1-8. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.518, + 0.826, + 0.549 + ], + "angle": 0, + "content": "Nicholas Carlini and David Wagner. Towards evaluating the robustness of neural networks. In 2017, IEEE symposium on security and privacy (sp), pp. 39-57. IEEE, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.557, + 0.827, + 0.601 + ], + "angle": 0, + "content": "Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian Goodfellow, Aleksander Madry, and Alexey Kurakin. On evaluating adversarial robustness. arXiv preprint arXiv:1902.06705, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.609, + 0.827, + 0.652 + ], + "angle": 0, + "content": "Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, John C Duchi, and Percy S Liang. Unlabeled data improves adversarial robustness. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.661, + 0.827, + 0.704 + ], + "angle": 0, + "content": "Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International conference on machine learning, pp. 2206-2216. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.713, + 0.827, + 0.758 + ], + "angle": 0, + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.766, + 0.825, + 0.797 + ], + "angle": 0, + "content": "Gavin Weiguang Ding, Luyu Wang, and Xiaomeng Jin. Advertorch v0.1: An adversarial robustness toolbox based on pytorch. arXiv preprint arXiv:1902.07623, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.804, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Yinpeng Dong, Fangzhou Liao, Tianyu Pang, Hang Su, Jun Zhu, Xiaolin Hu, and Jianguo Li. Boosting adversarial attacks with momentum. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 9185-9193, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.857, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Xitong Gao, Cheng-Zhong Xu, et al. Mora: Improving ensemble robustness evaluation with model reweighing attack. Advances in Neural Information Processing Systems, 35:26955-26965, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 1050:20, 2015." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.228, + 0.827, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Ian J. Goodfellow, Yoshua Bengio, and Aaron C. Courville. Deep Learning. Adaptive computation and machine learning. MIT Press, 2016. ISBN 978-0-262-03561-3." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.142, + 0.826, + 0.185 + ], + "angle": 0, + "content": "Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, and Timothy A Mann. Improving robustness using generated data. Advances in Neural Information Processing Systems, 34:4218-4233, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.194, + 0.826, + 0.252 + ], + "angle": 0, + "content": "Chuan Guo, Mayank Rana, Moustapha Cissé, and Laurens van der Maaten. Countering adversarial images using input transformations. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.26, + 0.826, + 0.305 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.312, + 0.803, + 0.328 + ], + "angle": 0, + "content": "Douglas Heaven. Why deep-learning ais are so easy to fool. Nature, 574(7777):163-166, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.336, + 0.826, + 0.38 + ], + "angle": 0, + "content": "Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.388, + 0.826, + 0.419 + ], + "angle": 0, + "content": "Sanjay Kariyappa and Moinuddin K Qureshi. Improving adversarial robustness of ensembles with diversity training. arXiv e-prints, pp. arXiv-1901, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.427, + 0.826, + 0.471 + ], + "angle": 0, + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.478, + 0.826, + 0.495 + ], + "angle": 0, + "content": "Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.503, + 0.826, + 0.56 + ], + "angle": 0, + "content": "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.569, + 0.826, + 0.599 + ], + "angle": 0, + "content": "Shishira R. Maiya, Max Ehrlich, Vatsal Agarwal, Ser-Nam Lim, Tom Goldstein, and Abhinav Shrivastava. A frequency perspective of adversarial robustness. CoRR, abs/2111.00861, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.607, + 0.826, + 0.651 + ], + "angle": 0, + "content": "AKM Iqtidar Newaz, Nur Imtiazul Haque, Amit Kumar Sikder, Mohammad Ashiqur Rahman, and A Selcuk Uluagac. Adversarial attacks to machine learning-based smart healthcare systems. In GLOBECOM 2020-2020 IEEE Global Communications Conference, pp. 1-6. IEEE, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.66, + 0.826, + 0.703 + ], + "angle": 0, + "content": "Tianyu Pang, Kun Xu, Chao Du, Ning Chen, and Jun Zhu. Improving adversarial robustness via promoting ensemble diversity. In International Conference on Machine Learning, pp. 4970-4979. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.712, + 0.826, + 0.755 + ], + "angle": 0, + "content": "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.763, + 0.826, + 0.807 + ], + "angle": 0, + "content": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli. *Helper-based adversarial training: Reducing excessive margin to achieve a better accuracy vs. robustness trade-off*. In ICML 2021 Workshop on Adversarial Machine Learning, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.816, + 0.826, + 0.859 + ], + "angle": 0, + "content": "Edward Raff, Jared Sylvester, Steven Forsyth, and Mark McLean. Barrage of random transforms for adversarially robust defense. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6528-6537, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.826, + 0.924 + ], + "angle": 0, + "content": "Giulio Rossolini, Federico Nesti, Gianluca D'Amico, Saasha Nair, Alessandro Biondi, and Giorgio Buttazzo. On the real-world adversarial robustness of real-time semantic segmentation models for autonomous driving. IEEE Transactions on Neural Networks and Learning Systems, pp. 1-15, 2023. doi: 10.1109/TNNLS.2023.3314512." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.175 + ], + "angle": 0, + "content": "Andrei A. Rusu, Dan Andrei Calian, Sven Gowal, and Raia Hadsell. Hinding adversarial attacks with implicit neural representations. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvári, Gang Niu, and Sivan Sabato (eds.), International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pp. 18910-18934. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.184, + 0.827, + 0.24 + ], + "angle": 0, + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.251, + 0.825, + 0.28 + ], + "angle": 0, + "content": "James C Spall. Multivariate stochastic approximation using a simultaneous perturbation gradient approximation. IEEE transactions on automatic control, 37(3):332-341, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.29, + 0.827, + 0.332 + ], + "angle": 0, + "content": "Jacob Springer, Melanie Mitchell, and Garrett Kenyon. A little robustness goes a long way: Leveraging robust features for targeted transfer attacks. Advances in Neural Information Processing Systems, 34:9759-9773, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.342, + 0.827, + 0.385 + ], + "angle": 0, + "content": "Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In 2nd International Conference on Learning Representations, ICLR 2014, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.395, + 0.827, + 0.438 + ], + "angle": 0, + "content": "Florian Tramér, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.447, + 0.827, + 0.489 + ], + "angle": 0, + "content": "Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.5, + 0.827, + 0.571 + ], + "angle": 0, + "content": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 36246-36263. PMLR, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.581, + 0.827, + 0.623 + ], + "angle": 0, + "content": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In International Conference on Machine Learning, pp. 36246-36263. PMLR, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.633, + 0.825, + 0.662 + ], + "angle": 0, + "content": "Zifan Wang, Yilin Yang, Ankit Shrivastava, Varun Rawal, and Zihao Ding. Towards frequency-based explanation for robust cnn. arXiv preprint arXiv:2005.03141, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.672, + 0.827, + 0.728 + ], + "angle": 0, + "content": "Futa Waseda, Sosuke Nishikawa, Trung-Nghia Le, Huy H Nguyen, and Isao Echizen. Closer look at the transferability of adversarial examples: How they fool different models differently. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 1360-1368, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.738, + 0.827, + 0.753 + ], + "angle": 0, + "content": "Sven-Ake Wegner. Lecture notes on high-dimensional data. arXiv preprint arXiv:2101.05841, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.763, + 0.825, + 0.807 + ], + "angle": 0, + "content": "Yuancheng Xu, Yanchao Sun, Micah Goldblum, Tom Goldstein, and Furong Huang. Exploring and exploiting decision boundary dynamics for adversarial robustness. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.816, + 0.827, + 0.872 + ], + "angle": 0, + "content": "Huanrui Yang, Jingyang Zhang, Hongliang Dong, Nathan Inkawich, Andrew Gardner, Andrew Touchet, Wesley Wilkes, Heath Berry, and Hai Li. Diverse: diversifying vulnerabilities for enhanced robust generation of ensembles. Advances in Neural Information Processing Systems, 33:5505-5515, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.827, + 0.924 + ], + "angle": 0, + "content": "Ruijie Yang, Yuanfang Guo, Junfu Wang, Jiantao Zhou, and Yunhong Wang. Common knowledge learning for generating transferable adversarial examples. Frontiers Comput. Sci., 19(10):1910359, 2025. doi: 10.1007/S11704-024-40533-4." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.148 + ], + "angle": 0, + "content": "Zhuolin Yang, Linyi Li, Xiaojun Xu, Shiliang Zuo, Qian Chen, Pan Zhou, Benjamin Rubinstein, Ce Zhang, and Bo Li. Trs: Transferability reduced ensemble via promoting gradient diversity and model smoothness. Advances in Neural Information Processing Systems, 34:17642-17655, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.829, + 0.2 + ], + "angle": 0, + "content": "Mehmet Kerim Yucel, Ramazan Gokberk Cinbis, and Pinar Duygulu. Hybridaugment++: Unified frequency spectra perturbations for model robustness. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5718-5728, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.819, + 0.224 + ], + "angle": 0, + "content": "Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. CoRR, abs/1605.07146, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.231, + 0.829, + 0.303 + ], + "angle": 0, + "content": "Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric P. Xing, Laurent El Ghaoui, and Michael I. Jordan. Theoretically principled trade-off between robustness and accuracy. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 7472-7482. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.31, + 0.829, + 0.354 + ], + "angle": 0, + "content": "Wen Zhou, Xin Hou, Yongjun Chen, Mengyun Tang, Xiangqi Huang, Xiang Gan, and Yong Yang. Transferable adversarial perturbations. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-467, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.362, + 0.829, + 0.405 + ], + "angle": 0, + "content": "Yi Zhu, Chenglin Miao, Tianhang Zheng, Foad Hajiaghajani, Lu Su, and Chunming Qiao. Can we use arbitrary objects to attack lidar perception in autonomous driving? In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pp. 1945-1960, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.364, + 0.119 + ], + "angle": 0, + "content": "A OMITTED PROOFS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.138, + 0.414, + 0.154 + ], + "angle": 0, + "content": "A.1 PROOF OF INEQUALITY (6):" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.166, + 0.589, + 0.232 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sum_ {y _ {t} \\in \\mathcal {Y}} \\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right) \\\\ = \\sum_ {y _ {t} \\in \\mathcal {Y}} \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right] \\\\ = \\sum_ {y _ {t} \\in \\mathcal {Y}} \\sum_ {(x, y) \\in \\mathcal {D}} p _ {(x, y)} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right]. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.237, + 0.724, + 0.252 + ], + "angle": 0, + "content": "Then, we interchange the order of summation, and so the above equation is equal to" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.259, + 0.569, + 0.315 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left. \\sum_ {(x, y) \\in \\mathcal {D}} p _ {(x, y)} \\sum_ {y _ {t} \\in \\mathcal {Y}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right] \\right. \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\Big [ \\sum_ {y _ {t} \\in \\mathcal {Y}} \\mathbb {I} \\big \\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\big \\} \\Big ]. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.321, + 0.825, + 0.368 + ], + "angle": 0, + "content": "For each \\((x,y)\\), without loss of generality, let \\(F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_0\\). For \\(y_t \\neq y_0\\), \\(\\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t\\big\\} = 0\\). For \\(y_t = y_0\\), \\(\\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t\\big\\} = \\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y\\big\\}\\). So the above equation is equal to" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.374, + 0.72, + 0.461 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\right\\} \\right] \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge \\left(F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\vee F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y\\right) \\right\\} \\right] \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\right\\} + \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y \\right\\} \\right]. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.467, + 0.825, + 0.496 + ], + "angle": 0, + "content": "We split \\(\\mathbb{I}(.)\\) because \\(F_{\\mathrm{E}}(\\mathcal{A}(x)) \\neq y\\) and \\(F_{\\mathrm{E}}(\\mathcal{A}(x)) = y\\) are mutually exclusive. Then, the above equation is equal to" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.503, + 0.533, + 0.549 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) + \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y \\right\\} \\right] \\\\ \\geq \\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.554, + 0.644, + 0.573 + ], + "angle": 0, + "content": "Overall, we obtain the inequality (6): \\(\\sum_{y_t\\in \\mathcal{Y}}\\mathrm{Vr}(F_{\\mathrm{E}},y_t)\\geq \\mathrm{Vr}(F_{\\mathrm{E}})\\)" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.596, + 0.414, + 0.612 + ], + "angle": 0, + "content": "B FREQUENCY SELECTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.827, + 0.71 + ], + "angle": 0, + "content": "Figure 4 illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). On the other hand, if we keep the low-amplitude frequencies only, the semantic information is almost missing. This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.737, + 0.566, + 0.751 + ], + "angle": 0, + "content": "C RANDOM NOISE BASED TRANSFORMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Random noise based transformation: This approach substitutes the identified non-robust frequencies with Gaussian noise. For an \\( N \\times N \\) image, we take the non-robust frequencies based on the pre-specified threshold \\( \\tau \\), and replace them with random vector for each sub-model in our experiment. In particular, to further increase the randomness, we perform this transformation for each epoch in the training stage. If we select the top \\( s \\) non-robust frequencies, the overall dimensionality of the edited random feature should be \\( s \\times E \\) (we concatenate those \\( s \\)-dimensional features together), where \\( E \\) is the number of epochs. For example, if \\( N = 32 \\), \\( s = N^2 / 2 \\), and \\( E = 200 \\), the overall dimensionality can be as large as \\( 10^5 \\). Because these \\( M \\) features are random and have high dimensions, they are very likely to be nearly orthogonal with each other (this phenomenon in high-dimensional geometry can be proved by the central limit theorem (Wegner, 2021)). As a consequence, they tend to yield diverse training results for the sub-models." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.105, + 0.691, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.286, + 0.828, + 0.373 + ], + "angle": 0, + "content": "Figure 4: The first and second rows are the figures by adding random noise to high-amplitude and low-amplitude frequencies, respectively. \"20% changed\" for the first row means we remove the 20% lowest-amplitudes frequencies, and add small noise to the remaining high-amplitude frequencies. \"20% changed\" for the second row means we remove the 20% highest-amplitude frequencies, and add small noise to the remaining low-amplitude frequencies. \"50% changed\" and \"80% changed\" follow the same procedure as \"20% changed\"." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.414, + 0.828, + 0.513 + ], + "angle": 0, + "content": "The implementation details are as follows. Given an image \\( x \\), we perform Fourier Transform on \\( x \\) and also on a generated Gaussian noise \\( n_0 \\). Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of \\( x \\) by setting an amplitude threshold. Next, we generate two masks \\( (M_1 \\) and \\( M_2 \\)) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of \\( n_0 \\) (i.e., \\( M_2(n_0) \\)) to high-amplitude frequencies of \\( x \\) (i.e., \\( M_1(x) \\)), and obtain the transformation of \\( x \\) (denoted as \\( \\pi(x) \\)). Finally, we transform \\( \\pi(x) \\) to time domain by inverse Fourier transform and train the model with \\( \\pi(x) \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.546, + 0.388, + 0.561 + ], + "angle": 0, + "content": "D ALGORITHM OF FDT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.585, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Algorithm 1 shows the overall framework of training an ensemble model with FDT. It illustrates that our data transformation is performed at each iteration." + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.642, + 0.512, + 0.656 + ], + "angle": 0, + "content": "Algorithm 1 Training ensemble model with FDT" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.658, + 0.713, + 0.771 + ], + "angle": 0, + "content": "Input: dataset \\(\\mathcal{X}\\times \\mathcal{Y}\\) , the number of sub-models \\(M\\) , and the epoch number \\(E\\) Output: sub-model \\(\\beta_{1},\\beta_{2},\\dots ,\\beta_{M}\\) \nfor \\(i = 1\\) to \\(E\\) do Run Targeted-attack Transformation and obtain \\(P_{1},P_{2},\\dots ,P_{M}\\) . for \\(j = 1\\) to \\(M\\) do train \\(\\beta_{j}\\) on \\(P_{j}\\) \nend for \nend for" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.828, + 0.925 + ], + "angle": 0, + "content": "Algorithm 2 shows the details of targeted-attack transformation method on the whole dataset. For each specific image \\( x \\), we obtain the targeted class according to the allocation scheme mentioned in \"Stage (1)\". Then, we use targeted PGD attack to obtain the adversarial sample \\( x' \\). After that, we perform Fourier Transform on \\( x \\) and \\( x' \\), and we can obtain the low-amplitude frequencies and high-amplitude frequencies of \\( x \\) by setting an amplitude threshold. Next, we generate two masks \\( (M_1 \\) and \\( M_2 \\)) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of \\( x' \\) (i.e., \\( M_2(x') \\)) to high-amplitude frequencies of \\( x \\) (i.e., \\( M_1(x) \\)), and obtain the transformation of \\( x \\) (denoted as \\( \\pi(x) \\)). Finally, we transform \\( \\pi(x) \\) to time domain by inverse Fourier transform." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.104, + 0.489, + 0.119 + ], + "angle": 0, + "content": "Algorithm 2 Targeted-attack Transformation" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.121, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Input: dataset \\(P_{ori}\\) , number M, steps s, class number k \nOutput: Transformed data \\(P_{1},P_{2},\\dots ,P_{M}\\) \nDivide the dataset \\(P_{ori}\\) into k parts \\(\\{C_1,C_2,\\dots ,C_k\\}\\) according to labels \nRandomly partition the dataset \\(C_j\\) equally into disjoint \\(k - 1\\) parts \\(\\{C_{j,1},C_{j,2},\\dots ,C_{j,k - 1}\\}\\) \nInitialize \\(P_{1},P_{2},\\dots ,P_{M}\\) with empty set; \n\\(m\\gets 0\\) \nfor \\(j = 1\\) to k do for \\(i = 1\\) to \\(k - 1\\) do \\(C_{j,i}^{\\prime}\\gets\\) calculate targeted attack example in \\(C_{j,i}\\) with label \\(i + j\\) mod \\(k\\) and perform data transformation on each image; for \\(s = 1\\) to \\(\\lceil \\frac{M}{2}\\rceil +1\\) do \\(m\\gets m + 1\\) mod M; Append \\(C_{j,i}^{\\prime}\\) to \\(P_{m}\\) end for end for end for" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.376, + 0.316, + 0.391 + ], + "angle": 0, + "content": "E IMPLEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.411, + 0.828, + 0.537 + ], + "angle": 0, + "content": "In this section, we provide more experimental details. In our work, we utilize the CIFAR-10 (Krizhevsky & Hinton, 2009), CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). In the testing process, the primary reason for selecting FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), CW(Carlini & Wagner, 2017) as attack methods is to keep consistent with the baseline methods from the literature. Further, we select AA (Croce & Hein, 2020) because it is also a popular attack method and more powerful than those base methods. To reduce the computational complexity of targeted attacks, we leverage the transferability of adversarial examples and utilize a pre-trained simple network (VGG11(Simonyan & Zisserman, 2015)) structure for targeted attacks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.828, + 0.74 + ], + "angle": 0, + "content": "Further, we introduce the implement of \"FDT-random\", \"FDT-target\" and \"FDT-hybrid\" here. For \"FDT-random\", we perform Fourier Transform on \\( x \\) and also on a randomly sampled standard Gaussian noise \\( n_0 \\). Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of \\( x \\) by setting an amplitude threshold. Next, we generate two masks \\( (M_1 \\) and \\( M_2 \\)) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of \\( n_0 \\) (i.e., \\( M_2(n_0) \\)) to high-amplitude frequencies of \\( x \\) (i.e., \\( M_1(x) \\)), and obtain the transformation of \\( x \\) (denoted as \\( \\pi(x) \\)). Finally, we transform \\( \\pi(x) \\) to time domain by inverse Fourier transform and train the model with \\( \\pi(x) \\). For \"FDT-target\", we obtain the targeted class according to the allocation scheme mentioned in \"Stage (1)\". Then, we use targeted PGD attack to obtain the adversarial sample \\( x' \\). After that, perform the same steps as with FDT-random (we substitute \\( n_0 \\) with \\( x' \\)). For FDT-hybrid, we set two frequency selection thresholds \\( \\tau_1 \\) and \\( \\tau_2 \\) (\\( \\tau_1 < \\tau_2 \\)), and generate three masks to select the frequencies: \\( M_1 \\) for the high-amplitude frequencies (amplitude \\( > \\tau_2 \\)), \\( M_2 \\) for the middle part (\\( \\tau_1 < \\) amplitude \\( < \\tau_2 \\)), and \\( M_3 \\) for the small part (amplitude \\( < \\tau_1 \\)). Next, we combine \\( M_1(x), M_2(x') \\) and \\( M_3(n_0) \\) to obtain the transformation \\( \\pi(x) \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.764, + 0.53, + 0.78 + ], + "angle": 0, + "content": "F ADDITIONAL EXPERIMENTAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In this section, we provide more experimental results. Firstly, we extend our experiments to SVHN, Tiny-ImageNet-200, and WideResNet-28-10 in Appendix F.1. We also conduct the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture in Appendix F.1. Then, we evaluate the performance of FDT under black-box attacks on the CIFAR-10 and CIFAR-100 in Appendix F.2. Then we present the trade-off between clean accuracy and robust accuracy on the CIFAR-100 using FDT method in Appendix F.3. This trade-off sheds light on the effectiveness of FDT with changing the trade-off parameter. Additionally, in Appendix F.4, we compare the transferability across various sub-models with the baseline methods. Furthermore, we compare our method with more related methods in Appendix F.5." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.353, + 0.119 + ], + "angle": 0, + "content": "F.1 ABLATION STUDIES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.131, + 0.825, + 0.174 + ], + "angle": 0, + "content": "In this section, we extend our experiments to additional datasets (SVHN, Tiny-ImageNet-200) and architecture (WideResNet-28-10). We also explore the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.18, + 0.827, + 0.251 + ], + "angle": 0, + "content": "Table 3 presents the performance of ensemble methods trained with ResNet-20 on SVHN against several widely used white-box attacks. The experimental results demonstrate that all ensemble models achieve comparable levels of clean accuracy. Specifically, the FDT approach exhibits better robust accuracy than the other methods. These observations highlight the effectiveness of FDT in achieving favorable clean accuracy and robustness of ensemble models." + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.275, + 0.828, + 0.319 + ], + "angle": 0, + "content": "Table 3: Robust Accuracy (%) of different ensemble methods against white-box attacks on SVHN. The \\(\\epsilon\\) and \\(\\lambda\\) stand for the \\(l_{\\infty}\\) norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid." + }, + { + "type": "table", + "bbox": [ + 0.256, + 0.33, + 0.744, + 0.523 + ], + "angle": 0, + "content": "
SVHNADPGALDVERGETRSFDT-hybrid
clean accuracy96.8394.6696.2894.5296.73 ± 0.12
FGSM (ε=0.01)84.3880.285.672.8790.13 ± 0.09
FGSM (ε=0.02)78.0841.581.453.986.78± 0.07
PGD (ε= 0.01)51.0150.153.3154.4359.42 ± 0.07
PGD (ε = 0.02)17.748.2417.4218.8622.74 ± 0.04
BIM (ε= 0.01)54.3847.7352.0853.7157.91± 0.08
BIM (ε = 0.02)21.268.114.5818.0520.23 ± 0.05
MIM (ε= 0.01)61.2451.9658.5156.3262.14± 0.08
MIM (ε= 0.02)24.845.1423.2221.9525.37 ± 0.04
AA (ε= 0.01)49.9248.3952.0252.8357.54± 0.09
AA (ε= 0.02)16.136.9016.9517.4820.12 ± 0.05
C&W (λ = 0.1)55.8149.9466.8252.7472.14± 0.11
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.54, + 0.828, + 0.598 + ], + "angle": 0, + "content": "We also extend our experiment to the sub-models trained with WideResNet-28-10 on CIFAR-10. Table 4 shows the performance of the models facing various whitebox attacks. The results indicate that FDT maintains good performance even on more complex network structures. We also evaluated the robustness of an ensemble of eight sub-models, with the results presented in Table 5." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.622, + 0.829, + 0.665 + ], + "angle": 0, + "content": "Table 4: Robust Accuracy \\((\\%)\\) of different ensemble methods against white-box attacks on CIFAR-10. The \\(\\epsilon\\) and \\(\\lambda\\) stand for the \\(l_{\\infty}\\) norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10." + }, + { + "type": "table", + "bbox": [ + 0.278, + 0.677, + 0.719, + 0.87 + ], + "angle": 0, + "content": "
CIFAR-10ADPGALDVERGEFDT-hybrid
clean accuracy92.9982.1494.3294.18 ± 0.06
FGSM (ε=0.01)60.0444.9471.0180.64 ± 0.05
FGSM (ε=0.02)51.6936.8350.4360.09 ± 0.05
PGD (ε=0.01)11.0922.1044.2564.64 ± 0.07
PGD (ε=0.02)2.545.0613.2726.0 ± 0.03
BIM (ε=0.01)15.8122.6246.5367.36 ± 0.10
BIM (ε=0.02)4.505.4317.3832.36 ± 0.06
MIM (ε=0.01)18.1825.9744.2164.36 ± 0.08
MIM (ε=0.02)4.727.8112.8325.64 ± 0.05
AA (ε=0.01)9.3819.3443.2363.45 ± 0.08
AA (ε=0.02)1.173.9312.4925.23 ± 0.04
C&W (λ=0.1)37.8119.0546.3247.23 ± 0.10
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Table 6 is the result of ensemble methods trained with WideResNet-28-10 on Tiny-ImageNet-200. We test the robustness of different methods under widely used white-box attacks. Due to the high" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.828, + 0.157 + ], + "angle": 0, + "content": "Table 5: Robust Accuracy (\\%) of an ensemble of eight sub-models against white-box attacks on CIFAR-10. The \\(\\epsilon\\) and \\(\\lambda\\) stand for the \\(l_{\\infty}\\) norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10." + }, + { + "type": "table", + "bbox": [ + 0.38, + 0.167, + 0.615, + 0.361 + ], + "angle": 0, + "content": "
CIFAR-10FDT-hybrid
clean accuracy93.72 ± 0.11
FGSM (ε=0.01)86.31± 0.07
FGSM (ε=0.02)67.29 ± 0.06
PGD (ε= 0.01)72.02 ± 0.07
PGD (ε = 0.02)45.42± 0.05
BIM (ε= 0.01)73.68 ± 0.10
BIM (ε = 0.02)44.53± 0.06
MIM (ε= 0.01)71.36 ± 0.06
MIM (ε= 0.02)45.24 ± 0.06
AA (ε= 0.01)70.45 ± 0.08
AA (ε= 0.02)44.23 ± 0.07
C&W (λ = 0.172.37 ± 0.11
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.396, + 0.825, + 0.44 + ], + "angle": 0, + "content": "time complexity of the TRS, we do not compare with it here. The experimental results show that all ensemble models achieve comparable levels of clean accuracy while FDT-hybrid achieves better robust accuracy than other methods." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.47, + 0.827, + 0.514 + ], + "angle": 0, + "content": "Table 6: Robust Accuracy (%) of different ensemble methods against white-box attacks on TinyImageNet-200. The \\(\\epsilon\\) and \\(\\lambda\\) stand for the \\(l_{\\infty}\\) norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid." + }, + { + "type": "table", + "bbox": [ + 0.268, + 0.514, + 0.727, + 0.705 + ], + "angle": 0, + "content": "
Tiny/ImageNet-200ADPGALDVERGEFDT-hybrid
clean accuracy49.8845.751.4664.21 ± 0.06
FGSM (ε = 0.01)10.461.2422.8221.73 ± 0.04
FGSM (ε = 0.02)4.380.5918.4219.28 ± 0.04
PGD (ε = 0.01)0.020.023.64.76 ± 0.02
PGD (ε = 0.02)0.020.010.340.45 ± 0.01
BIM (ε = 0.01)0.070.023.354.81 ± 0.03
BIM (ε = 0.02)0.030.010.280.32± 0.00
MIM (ε = 0.01)0.110.024.366.13 ± 0.03
MIN (ε = 0.02)0.030.010.410.48 ± 0.00
AA (ε = 0.01)0002.66 ± 0.02
AA (ε = 0.02)0000.02± 0.00
CW (λ = 0.01)2.360.139.5419.47± 0.06
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.729, + 0.828, + 0.788 + ], + "angle": 0, + "content": "Ablation study on model architectures. Table 7 presents the results across different model architectures, including ResNet20, ResNet50, WRN28-10, and WRN34-10. While larger models generally achieve higher clean and robust accuracy, the results suggest that our method consistently enhances robustness under various attack scenarios, demonstrating its applicability across diverse architectures." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Ablation study on allocation methods. Table 8 compares the performance of FDT-hybrid with different weakness set allocation methods on CIFAR-10. The results indicate that our proposed allocation method achieves better clean accuracy and robustness under various attack scenarios than randomly uniform allocation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.927 + ], + "angle": 0, + "content": "Ablation study on \\(\\tau_{1}\\) and \\(\\tau_{2}\\). Table 9 presents the results of FDT-hybrid with various combinations of selection thresholds \\(\\tau_{1}\\) and \\(\\tau_{2}\\) on CIFAR-10. The experiments reveal the impact of different thresholds on both clean accuracy and robustness under adversarial attacks. As \\(\\tau_{2}\\) increases, robustness improves across all metrics, but clean accuracy decreases. For a fixed \\(\\tau_{2}\\), increasing \\(\\tau_{1}\\) generally leads to a trade-off between clean accuracy and robustness. Setting \\(\\tau_{1} = 0.2\\) and \\(\\tau_{2} = 0.8\\) achieves a relatively" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "balanced performance, maintaining both competitive clean accuracy and robust accuracy under various attacks." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.16, + 0.827, + 0.202 + ], + "angle": 0, + "content": "Table 7: Robust Accuracy \\((\\%)\\) of different model architectures against white-box attacks on Cifar10. The \\(\\epsilon\\) and \\(\\lambda\\) stand for the \\(l_{\\infty}\\) norm of the adversarial perturbation and the coefficient of C&W attack respectively." + }, + { + "type": "table", + "bbox": [ + 0.249, + 0.203, + 0.75, + 0.394 + ], + "angle": 0, + "content": "
CIFAR10ResNet20ResNet50WRN28-10WRN34-10
clean accuracy90.0293.2394.1894.63
FGSM (ε = 0.01)72.2476.6580.6481.04
FGSM (ε = 0.02)58.0458.5960.0960.92
PGD (ε = 0.01)48.4860.2364.6465.38
PGD (ε = 0.02)20.0124.3526.0027.42
BIM (ε = 0.01)48.5760.4367.3668.29
BIM (ε = 0.02)16.6323.5732.3633.86
MIM (ε = 0.01)51.4860.8164.3664.71
MIN (ε = 0.02)20.0924.5425.6426.42
AA (ε = 0.01)51.5660.4863.4564.01
AA (ε = 0.02)19.4224.2125.2326.39
CW (λ = 0.01)56.0856.5557.2357.52
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.44, + 0.827, + 0.468 + ], + "angle": 0, + "content": "Table 8: Performance of FDT-hybrid with different weakness set allocation method on CIFAR-10. The other settings are consistent with those in Table 1." + }, + { + "type": "table", + "bbox": [ + 0.186, + 0.469, + 0.809, + 0.52 + ], + "angle": 0, + "content": "
Allocation methodClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
Uniform Random89.3256.2018.2417.89
Ours90.2058.0420.0119.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.565, + 0.825, + 0.594 + ], + "angle": 0, + "content": "Table 9: Performance of FDT-hybrid with different selection thresholds \\(\\tau_{1}\\) and \\(\\tau_{2}\\) on CIFAR-10. The other settings are consistent with those in Table 1." + }, + { + "type": "table", + "bbox": [ + 0.19, + 0.594, + 0.807, + 0.742 + ], + "angle": 0, + "content": "
ThresholdsClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
τ1=0.2,τ2=0.791.0356.6217.7417.60
τ1=0.2,τ2=0.890.2058.0420.0119.42
τ1=0.2,τ2=0.989.4658.4820.1219.57
τ1=0.4,τ2=0.789.7556.8917.9317.82
τ1=0.4,τ2=0.889.0858.2120.0119.47
τ1=0.4,τ2=0.988.4458.5320.0919.61
τ1=0.6,τ2=0.789.6253.3515.2714.63
τ1=0.6,τ2=0.888.8455.3315.4215.24
τ1=0.6,τ2=0.988.1255.4615.8315.47
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.785, + 0.457, + 0.799 + ], + "angle": 0, + "content": "F.2 RESULTS FOR BLACK-BOX ATTACK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.827, + 0.925 + ], + "angle": 0, + "content": "In the black-box setting, the attacker's knowledge usually is limited to the original training dataset and has no information about the model. This setting represents a more practical attack scenario. The attacker can train a surrogate model to generate transferable adversarial examples and transfer them to the target ensemble model. We utilize a single ResNet-20 model as the surrogate model. Adversarial examples are generated on the surrogate model using the SPSA algorithm (Spall, 1992). Figure 5 shows the robust accuracy of ensemble models against black-box attacks under different degrees of perturbation. As we can see, FDT-hybrid ensemble training strategies outperform the other ensemble training strategy against black-box attacks both on CIFAR10 and CIFAR100." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.129, + 0.493, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.324, + 0.272, + 0.344, + 0.284 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.128, + 0.813, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.643, + 0.272, + 0.664, + 0.284 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.301, + 0.825, + 0.33 + ], + "angle": 0, + "content": "Figure 5: Robust Accuracy for different ensemble models against black-box attack with different perturbation scale \\(\\epsilon\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.352, + 0.598, + 0.366 + ], + "angle": 0, + "content": "F.3 TRADE-OFF BETWEEN CLEAN AND ROBUST ACCURACY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.825, + 0.466 + ], + "angle": 0, + "content": "In this section, we explore the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold \\(\\tau_{2}\\) (as mentioned in Section 4.2). And we set \\(\\tau_{1}\\) to be 0.1. To assess the adversarial robustness, we utilize the PGD attack under \\(l_{\\infty}\\) perturbations of size \\(\\epsilon = 0.01\\) as a benchmark. We train a set of ResNet-20 FDT-hybrid models on CIFAR-10 and CIFAR-100 with various frequency selection threshold \\(\\tau_{2} \\in \\{0.4, 0.6, 0.8, 1.0, 1.2, 1.6\\}\\). Figure 6 shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of \\(\\tau_{2}\\)." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.514, + 0.49, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.325, + 0.675, + 0.344, + 0.687 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.513, + 0.811, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.643, + 0.674, + 0.664, + 0.687 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.703, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Figure 6: (a) shows the trade-off on CIFAR-10 while (b) on CIFAR-100. From left to right, we decrease the trade-off parameter \\(\\tau_{2}\\) for FDT." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.77, + 0.571, + 0.784 + ], + "angle": 0, + "content": "F.4 TRANSFERABILITY ACROSS VARIOUS SUB-MODELS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.827, + 0.924 + ], + "angle": 0, + "content": "To further investigate the diversity between sub-models, we conduct an analysis by generating adversarial examples using one sub-model and evaluating their accuracy on other target sub-models. The transferability of these adversarial examples among sub-models is visualized in Figure 7, considering different ensemble training methods on the CIFAR10 dataset. We generate adversarial examples from \"base model\" and test the accuracy of \"target model\". The experimental results indicate that FDT exhibits comparable performance to DVERGE and TRS in reducing the transferability of adversarial samples across different sub-models. This demonstrates that FDT not only enhances the diversity of weaknesses within the dataset but also weakens the transferability of adversarial examples between sub-models." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.242, + 0.109, + 0.412, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.315, + 0.253, + 0.334, + 0.264 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.416, + 0.109, + 0.585, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.487, + 0.253, + 0.506, + 0.264 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.109, + 0.757, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.66, + 0.253, + 0.678, + 0.265 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.277, + 0.41, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.421, + 0.332, + 0.434 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.278, + 0.583, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.486, + 0.421, + 0.504, + 0.434 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.278, + 0.754, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.421, + 0.675, + 0.434 + ], + "angle": 0, + "content": "(f)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.45, + 0.825, + 0.493 + ], + "angle": 0, + "content": "Figure 7: Pair-wise adversarial transferability between sub-models against PGD attack with \\(\\epsilon = 0.02\\) on CIFAR-10. The value represents the success rate of adversarial examples generated by the base model in attacking the target model." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.506, + 0.504, + 0.52 + ], + "angle": 0, + "content": "F.5 COMPARE WITH ADVERSARIAL TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.532, + 0.827, + 0.769 + ], + "angle": 0, + "content": "We use target attacks in our data transformation, which differs significantly from adversarial training. First, we employ a simple pre-trained network (VGG11 in our experiment) to compute adversarial examples, thereby accelerating the training process. Second, we only utilize the low amplitude part of the adversarial examples for data transformation, which helps maintain the model's clean accuracy. We compare our method with several popular approaches (Wang et al., 2023b; Rade & Moosavi-Dezfooli, 2021; Xu et al., 2023) on CIFAR-10 using AutoAttack under \\( l_{\\infty} \\) perturbations (\\( \\epsilon = 8 / 255 \\)). Wang et al. (2023b) generated training datasets using a diffusion model, followed by adversarial training on these datasets. For fairness, we compare our method with the version proposed by Wang et al. (2023b) that uses 50k generated images. Rade & Moosavi-Dezfooli (2021) used \"helper example\" to help the adversarial training. Xu et al. (2023) proposed Dynamics-Aware Robust Training, which encourages the decision boundary to adjust in a way that prioritizes increasing smaller margins. We use WideResNet-28-10 as the sub-model and ensemble eight sub-models without using generated data. The results in Table 10 indicate that, although the robustness of our method is not the highest, it maintains clean accuracy with almost no decline. Moreover, our method does not require additional generated data or adversarial training, and even with the need for ensembling, the training efficiency remains relatively high. This suggests a potential way to enhance robustness while minimizing the decrease in clean accuracy." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.775, + 0.828, + 0.887 + ], + "angle": 0, + "content": "To further illustrate our method's advantage, we conduct additional experiments to compare the \"robustness-clean accuracy\" trade-off curves of our method and AT under different settings. Fig. 8 compares the trade-off curves obtained by HAT Rade & Moosavi-Dezfooli (2021) with that of FDT-hybrid. For HAT, we fix \\(\\gamma = 0.25\\) and vary \\(\\beta \\in \\{0.1, 0.5, 2.5, 3.0, 4.0, 5.0\\}\\) (\\(\\beta\\) is the coefficient of the robustness loss, and higher \\(\\beta\\) indicates higher robust accuracy); for FDT-hybrid, we fix \\(\\tau_{1} = 0.2\\) and vary \\(\\tau_{2} \\in \\{0.5, 0.7, 0.9, 1.1, 1.3, 1.5\\}\\). We observe that HAT's robustness declines rapidly when the \\(\\beta\\) parameter is small (as increasing the clean accuracy). This result shows the significant advantage of our method when a clean accuracy above \\(90\\%\\) is required." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.345, + 0.24, + 0.654, + 0.253 + ], + "angle": 0, + "content": "Trade-off between clean accuracy and robust accuracy" + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.253, + 0.627, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.417, + 0.828, + 0.447 + ], + "angle": 0, + "content": "Figure 8: It shows the trade-off curves on CIFAR-10. From left to right, we decrease the trade-off parameter \\(\\tau_{2}\\) for FDT, and decrease the trade-off parameter \\(\\beta\\) for HAT." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.689, + 0.825, + 0.719 + ], + "angle": 0, + "content": "Table 10: Clean accuracy and robust accuracy (\\%) of different methods against AutoAttack under \\( l_{\\infty} \\) perturbations (\\( \\epsilon = 8/255 \\)) on CIFAR-10." + }, + { + "type": "table", + "bbox": [ + 0.256, + 0.731, + 0.744, + 0.81 + ], + "angle": 0, + "content": "
CIFAR-10clean accuracyrobust accuracy
(Wang et al., 2023b)86.1555.71
(Rade & Moosavi-Dezfooli, 2021)84.9049.08
(Xu et al., 2023)85.5554.69
OURS (FDT-hybrid)93.7234.61
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ] +] \ No newline at end of file diff --git a/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_origin.pdf b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..07b7dd71567c1da172df2dea90ffd0069cf19647 --- /dev/null +++ b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/11b8de53-d193-4b48-bf31-fc86f1bab485_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b33a94141a686bef595afe7b18417a17973e68cac348dc36f131eebb891c194 +size 4657239 diff --git a/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/full.md b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2b63ff2d102e03c707696818eaeed81eb7797491 --- /dev/null +++ b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/full.md @@ -0,0 +1,495 @@ +# TO TACKLE ADVERSARIAL TRANSFERABILITY: A NOVEL ENSEMBLE TRAINING METHOD WITH FOURIER TRANSFORMATION + +Wanlin Zhang $^{1,3}$ , Weichen Lin $^{2}$ , Ruomin Huang $^{4}$ , Shihong Song $^{1}$ , Hu Ding $^{1*}$ + +$^{1}$ School of Computer Science and Technology, University of Science and Technology of China + $^{2}$ School of Artificial Intelligence and Data Science, University of Science and Technology of China + $^{3}$ Shanghai Innovation Institute $^{4}$ Department of Computer Science, Duke University +{ideven, linweichen, shihongsong}@mail.ustc.edu.cn +ruomin.huang@duke.edu,HUDING@ustc.edu.cn + +# ABSTRACT + +Ensemble methods are commonly used for enhancing robustness in machine learning. However, due to the "transferability" of adversarial examples, the performance of an ensemble model can be seriously affected even it contains a set of independently trained sub-models. To address this issue, we propose an efficient data transformation method based on a cute "weakness allocation" strategy, to diversify non-robust features. Our approach relies on a fine-grained analysis on the relation between non-robust features and adversarial attack directions. Moreover, our approach enjoys several other advantages, e.g., it does not require any communication between sub-models and the construction complexity is also quite low. We conduct a set of experiments to evaluate the performance of our proposed method and compare it with several popular baselines. The results suggest that our approach can achieve significantly improved robust accuracy over most existing ensemble methods, and meanwhile preserve high clean accuracy. + +# 1 INTRODUCTION + +In the past decade, Deep neural networks (DNNs) have achieved prominent performance on a broad range of real-world tasks (Goodfellow et al., 2016). However, a number of previous works show that DNNs are susceptible to carefully-crafted manipulations, where the manipulated data are called "adversarial examples" (Szegedy et al., 2014; Zhou et al., 2018; Heaven, 2019). The existence of adversarial examples severely impedes the application of DNNs in security-conscious scenarios, such as self-driving car (Rossolini et al., 2023; Zhu et al., 2021) and heath care (Newaz et al., 2020). + +The adversarial training approach (Wang et al., 2023a; Madry et al., 2018) has gained significant attention due to its great effectiveness for defending against adversarial examples. However, the adversarial training approach often necessitates considerably high training time and large training dataset (Gowal et al., 2021; Carmon et al., 2019). Moreover, it has been observed that adversarial training is likely to incur certain decline in the accuracy on clean data, which also hinders the trained model to be applied for many practical tasks (Tsipras et al., 2018; Zhang et al., 2019). + +Another important approach to enhance adversarial robustness is ensemble training (Tramér et al., 2018). But recent studies (Yang et al., 2025; Gao et al., 2022; Waseda et al., 2023) demonstrated that an adversarial example can attack different models even they are trained independently, and this phenomenon is the so-called "transferability" of adversarial examples. Hence, the strategy that simply integrates different models trained on the same original dataset is not sufficient to guarantee the overall robustness. To resolve this issue, different approaches have been proposed for maximizing the "diversity" among sub-models; in general, these approaches can be categorized into two classes: "simultaneous training" and "individual training" (Pang et al., 2019). + +To reduce the similarity among sub-models, most existing "simultaneous training" methods attempt to incorporate some penalty during each epoch of parameter updates. Kariyappa & Qureshi (2019) + +proposed the "Gradient Alignment Loss (GAL)" method to minimize the gradient similarity between sub-models directly. Further, Yang et al. (2021) proposed the "Transferability Reduced Smooth (TRS)" method to improve GAL by adding a regularization term to increase the smoothness, as the models with a smoother loss function can reduce the "transferability" of attacks. Yang et al. (2020) aimed to isolate the adversarial vulnerability in each sub-model by distilling non-robust features, where the sub-models can then generate diverse outputs being resilient against transfer attacks. Despite their effectiveness for defending adversarial attacks, the simultaneous training methods often require a substantial amount of memory since all the sub-models need to be stored in the GPUs in the training stage, which could be prohibitive if the number of sub-models is not small (say, more than 10) and/or their sizes are large. Additionally, the information interaction in parallel training can also cause extra large communication cost. + +Different from simultaneous training, most "individual training" methods train each sub-model independently on a randomly transformed version of the given training dataset (Pang et al., 2019; AprilPyone & Kiya, 2021). This "random transformation" strategy yields diverse datasets, and thus different sub-models trained on these datasets can present diverse performances when confronting an adversarial attack. The individual training approach has higher flexibility and also requires less GPU memory, because the sub-models do not need to be stored simultaneously. Since there is no communication between sub-models, individual training methods are more suitable for parallel training with multiple GPUs. But unfortunately, recent studies showed that the commonly used random transformations (e.g. image cropping and rescaling) are not that effective under adversarial attacks (Athalye et al., 2018). The major cause of suppressing the performance of individual training is that the "transferability" problem is still not well addressed. + +Our contributions. To tackle the transferability obstacle, we consider developing a new data transformation method for ensemble training. Our main contributions are summarized as follows: + +- First, we propose a fine-grained analysis on the relation between non-robust features and adversarial attack directions (Section 3). Being different from the previous analysis on non-robust features, our new analysis provides us the hints that are particularly useful to allocate the potential vulnerability directions to a set of sub-models, and therefore paves the way for designing our ensemble training strategy. + +- Second, we propose a data transform framework that can effectively promote the diversity of training data for robust ensemble training. The framework consists of two steps: "frequency selection" and "frequency transformation", where the frequency is based on the Fourier transformation on the images. We propose two efficient frequency transformations with low complexities on the identified non-robust features. The first one is based on simple random noise, and the second one is a cute "targeted attack transformation" that can modify the non-robust features more effectively (Section 4.2). + +- Finally, we conduct a set of experiments to evaluate the adversarial robustness of our approach on several benchmark datasets under the widely used attack algorithms. We also compare our approach with several open-source ensemble methods, such as ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021). Compared with those baselines, the experimental results suggest that our proposed approach can significantly outperform most of them in robust accuracy and also preserve comparable high clean accuracy. + +# 1.1 OTHER RELATED WORKS + +Data transformation for ensemble training. Guo et al. (2018) and Raff et al. (2019) proposed the transformations that preserve semantic information to reduce the impact of adversarial perturbation. AprilPyone & Kiya (2021) developed a training method that employs block-wise data transformations, where the input image is partitioned into blocks based on some private key. LINAC (Rusu et al., 2022) uses a predetermined random seed (private key) to initialize and train a DNN to encode the input data, serving as an encrypted input transformation. + +Adversarial attack from frequency perspective. Wang et al. (2020) explained that the model's vulnerability to small distortions may be due to its dependence on high-frequency features. Yucel et al. (2023) proposed a data augmentation method that reduces the reliance on high-frequency components, so as to improve model's robustness while maintaining clean accuracy. Maiya et al. (2021) and Bernhard et al. (2021) respectively showed that to fully understand the vulnerability, we should consider the distribution of the entire dataset with high and low frequencies. + +# 2 PRELIMINARIES + +Some notations. We consider the $k$ -classification task: $\mathcal{X} \to \mathcal{Y}$ where $\mathcal{X}$ is the input data space and $\mathcal{Y} = \{1,2,\dots,k\}$ is the set of labels. A soft-classification model $f(\cdot;\beta)$ maps each $x \in \mathcal{X}$ to a vector $f(x;\beta) \in \mathbb{R}^k$ , where $\beta$ is the parameter vector that needs to be trained. Its associated hard-classification model is $F(x;\beta) = \arg \max_i [f(x;\beta)]_i$ where $[\cdot]_i$ stands for the $i$ -th coordinate. The model $f$ is usually equipped with a loss function $\ell(f(x;\beta), y)$ , $x \in \mathcal{X}$ and $y \in \mathcal{Y}$ , which is differentiable on $\beta$ (e.g., cross-entropy loss). We refer to the accuracy on the original dataset as "clean accuracy" and the accuracy on adversarial examples as "robust accuracy". We denote the one-hot $k$ -dimensional vector that corresponds to the target label $y$ as $h(y)$ . + +Definition 2.1 (Ensemble Model) Let $\mathcal{M} = \{f_1, \dots, f_M\}$ be a set of sub-models for a $k$ -classification task. We build the ensemble model with the following function: + +$$ +f _ {\mathrm {E}} (x; \beta_ {[ 1: M ]}) = \frac {1}{M} \sum_ {m \in [ M ]} \widehat {F} _ {m} (x; \beta_ {m}), \tag {1} +$$ + +where $\beta_{[1:M]} = \{\beta_m \mid 1 \leq m \leq M\}$ , and $\widehat{F}_m(x; \beta_m)$ is the one-hot $k$ -dimensional vector of the hard-classification model $F_m(x; \beta_m)$ of $f_m$ . + +Definition 2.2 (Adversarial Attack and Targeted Attack) Given a model $f(\cdot; \beta)$ and an input $(x, y) \in \mathcal{X} \times \mathcal{Y}$ , the adversarial attack algorithm $\mathcal{A}$ returns a perturbed data $x'$ inside the $l_p$ ball of radius $\epsilon > 0$ , which maximizes the loss function $\ell(f(\cdot; \beta), \cdot)$ , or minimizes the loss function $\ell(f(\cdot; \beta), y_t)$ if given a target label $y_t \neq y$ . For the latter one, we say it is a "targeted attack from $y$ to $y_t$ ". Usually we set $p = 2$ or $p = \infty$ . + +As mentioned in Section 1, because our proposed approach is based on Fourier transform, we introduce several necessary notations below. Given an image $x$ of size $L \times N$ , the corresponding two-dimensional discrete Fourier transform can be written as: for any $0 \leq u \leq L - 1$ and $0 \leq v \leq N - 1$ , + +$$ +\tilde {x} [ u, v ] = \sum_ {s = 0} ^ {L - 1} \sum_ {t = 0} ^ {N - 1} x [ s, t ] \cdot e ^ {- 2 \mathrm {j} \pi \left(\frac {u _ {s}}{L} + \frac {v t}{N}\right)}, \tag {2} +$$ + +where “ $j$ ” denotes the imaginary unit, and “ $\tilde{x}[u,v]$ ” is the entry in the $u$ -th column and $v$ -th row of the Fourier matrix $\tilde{x}$ (“ $x[s,t]$ ” is defined similarly for the original image $x$ ). The pixels of the image $x$ form the time domain, and the entries of $\tilde{x}$ form the frequency domain. For a frequency $(u,v)$ , the amplitude is the absolute value $|\tilde{x}[u,v]|$ . We call a frequency $(u,v)$ as a frequency feature. + +# 3 FINE-GRAINED ANALYSIS ON ENSEMBLE MODEL VULNERABILITY + +The previous work (Ilyas et al., 2019) categorizes the features learned by a model into robust and non-robust features. It shows that adversarial vulnerability is a natural consequence of the presence of highly predictive but non-robust features. Moreover, different models trained on the same dataset often have similar non-robust features, and therefore an adversarial example usually exhibits the "transferability" property among them. Several other works also presented detailed discussions on the impact of non-robust features (Benz et al., 2021; Springer et al., 2021). Following those studies, a natural idea for tackling the transferability issue is to ensure that the sub-models should have diverse non-robust features. In this section, we provide a fine-grained analysis on the vulnerability of ensemble models and then conclude two important hints for achieving this "diversity" goal. + +The following definitions are inspired by (Ilyas et al., 2019). Note that different from the term "feature" used in their article, we use "feature extractor" instead in our paper, since "feature" will be particularly used for referring to image feature in time or frequency domain. Specifically, we define a "feature extractor" as a function that maps the input $x \in \mathcal{X}$ to a vector in $\mathbb{R}^k$ . A model $f$ is composed of a set of different feature extractors, with each feature extractor focusing on distinct feature. The combination of outputs from these feature extractors forms the model's final output. We then further define the "useful feature extractors". + +Definition 3.1 (Useful feature extractor) For a given data distribution $\mathcal{D} = \mathcal{X}\times \mathcal{Y}$ , a feature extractor $\theta :\mathcal{X}\to \mathbb{R}^k$ is useful, if we have + +$$ +\mathbb {E} _ {(x, y) \sim \mathcal {D}} [ h (y) ^ {\top} \theta (x) ] > \frac {1}{k}. \tag {3} +$$ + +Recall that $h(y)$ is the one-hot $k$ -dimensional vector of the label $y$ . Roughly speaking, the inequality (3) implies that the expected contribution of a useful feature extractor to the model's correct prediction is higher than the average contribution over all the $k$ classes. + +Definition 3.2 (robust and non-robust feature extractor) We use $\mathcal{A}(x)$ to denote the adversarial example of a data item $x$ as described in Definition 2.2. Let $\theta$ be a useful feature extractor. (1) We say $\theta$ is robust if the following condition holds for any $i$ ( $1 \leq i \leq k$ ): + +$$ +\mathbb {E} _ {(x, y) \sim \mathcal {D} _ {i}} \left[ \theta (\mathcal {A} (x)) \right] _ {i} > \frac {1}{k} +$$ + +where $\mathcal{D}_i$ represents the $i$ -th class data. We denote the set of these robust feature extractors as $\Theta_{R}$ . + +(2) The remaining useful feature extractors are non-robust. We assign these non-robust extractors to $k(k - 1)$ sets: $\{\Theta_{i,j} \mid 1 \leq i \neq j \leq k\}$ as follows. Initially, all these $k(k - 1)$ sets are empty. Then we go through all the non-robust feature extractors. For each non-robust $\theta$ , there must exist at least an index "i" such that + +$$ +\mathbb {E} _ {(x, y) \sim \mathcal {D} _ {i}} [ \theta (\mathcal {A} (x)) ] _ {i} \leq 1 / k; +$$ + +we let $j = \arg \max_{s} \mathbb{E}_{(x,y) \sim \mathcal{D}_i}[\theta(\mathcal{A}(x))]_s$ and assign $\theta$ to $\Theta_{i,j}$ (note that $j$ should be not equal to $i$ , or there are multiple indices achieving the maximum expectation and at least one is not equal to $i$ , since otherwise $\sum_{s=1}^{k} \mathbb{E}_{(x,y) \sim \mathcal{D}_i}[\theta(\mathcal{A}(x))]_s$ will less than 1). Eventually, these $k(k-1)$ sets are constructed, where each $\Theta_{i,j}$ contains the feature extractors that are not robust to the attack from $i$ to $j$ . + +Remark 3.3 Intuitively, if a feature extractor is robust, it should have the capability to preserve its contribution to the correct prediction even under perturbation. It is also worth noting that a non-robust feature extractor $\theta$ could be assigned to multiple $\Theta_{i,j}s$ . + +Assume we have a standardly trained model $f$ consisting of a set of useful feature extractors, and we denote it as $\Theta_f$ . Each of them can be classified as robust or non-robust as Definition 3.2. Similar with the formulation proposed in (Ilyas et al., 2019), we can represent the model as + +$$ +f (x) = \sum_ {\theta \in \Theta_ {R} \cap \Theta_ {f}} w _ {\theta} \theta (x) + \sum_ {i, j = 1, i \neq j} ^ {k} \sum_ {\theta \in \Theta_ {i, j} \cap \Theta_ {f}} w _ {\theta} \theta (x), \tag {4} +$$ + +where each $\theta$ has a coefficient $w_{\theta} \in \mathbb{R}$ . We then conduct our analysis based on Equation (4). Some recent works reveal that adversarial training method can obtain robust model through reducing the dependence on non-robust feature extractors (Allen-Zhu & Li, 2022; Tsipras et al., 2018). However, this strategy may cause certain downgrade performance on clean accuracy (because the non-robust feature extractors also contribute to obtaining correct prediction). Fortunately, we are able to avoid this dilemma in the context of ensemble training. Namely, we just need to keep the non-robust features as diverse as possible, instead of entirely eliminating the dependence on those non-robust feature extractors. To pave the way for realizing this goal, we introduce the definition of vulnerability of ensemble model below. + +Definition 3.4 (Vulnerability of ensemble model) Suppose $f_{\mathrm{E}}$ is an ensemble model as described in Definition 2.1, and its associated hard-classification model is denoted by $F_{\mathrm{E}}$ : $\forall x$ , $F_{\mathrm{E}}(x) = \arg \max_{i}[f_{\mathrm{E}}(x)]_{i}$ . Given the data distribution $\mathcal{D} = \mathcal{X} \times \mathcal{Y}$ , the vulnerability of $F_{\mathrm{E}}$ is defined as: + +$$ +\operatorname {V r} \left(F _ {\mathrm {E}}\right) = \mathbb {E} _ {(x, y) \sim \mathcal {D}} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) \neq y \right\} \right], \tag {5} +$$ + +where $\mathbb{I}(\cdot)$ represents the indicator function. Furthermore, for any target class $y_{t}$ , we can define the vulnerability towards $y_{t}$ as $\mathrm{Vr}(F_{\mathrm{E}}, y_{t}) = \mathbb{E}_{(x,y) \sim \mathcal{D}}\left[\mathbb{I}\{F_{\mathrm{E}}(x) = y \wedge F_{\mathrm{E}}(\mathcal{A}(x)) = y_{t}\}\right]$ . + +The vulnerability of Definition 3.4 describes the success probability of an attack $\mathcal{A}$ to the ensemble model $F_{\mathrm{E}}$ . We have the following key inequality, which indicates that $\operatorname{Vr}(F_{\mathrm{E}})$ is bounded by considering all the attack directions, i.e., + +$$ +\operatorname {V r} \left(F _ {\mathrm {E}}\right) \leq \sum_ {y _ {t} \in \mathcal {Y}} \operatorname {V r} \left(F _ {\mathrm {E}}, y _ {t}\right). \tag {6} +$$ + +The proof of Inequality (6) is placed in Appendix A.1. Moreover, if $F_{\mathrm{E}}(\mathcal{A}(x)) = y_t$ , there are at least $M / k$ sub-models returning the wrong label $y_t$ due to the pigeonhole principle. Namely, " $\sum_{m=1}^{M} \mathbb{I}\left([f_m(\mathcal{A}(x))]_y < [f_m(\mathcal{A}(x))]_{y_t}\right) > \frac{M}{k}$ " should be a necessary condition for successfully attacking from $y$ to $y_t$ . So it implies + +$$ +\operatorname {V r} \left(F _ {\mathrm {E}}, y _ {t}\right) \leq \mathbb {E} _ {(x, y) \sim \mathcal {D}} \left[ \mathbb {I} \left(\sum_ {m = 1} ^ {M} \mathbb {I} \left([ f _ {m} (\mathcal {A} (x)) ] _ {y} < [ f _ {m} (\mathcal {A} (x)) ] _ {y _ {t}}\right) > \frac {M}{k}\right) \right]. \tag {7} +$$ + +From the upper bound (6), we can decrease the total vulnerability by reducing $\mathrm{Vr}(F_{\mathrm{E}},y_t)$ for each $y_{t}$ . Also, from (7) we know that $\mathrm{Vr}(F_{\mathrm{E}},y_{t})$ can be reduced by decreasing the chance of “ $[f_m(\mathcal{A}(x))]_y < [f_m(\mathcal{A}(x))]_{y_t}$ ” over $m\in \{1,2,\dots ,M\}$ . According to the Equation (4), the inequality $\left[f_m(\mathcal{A}(x))\right]_y < \left[f_m(\mathcal{A}(x))\right]_{y_t}$ ” can be rewritten as + +$$ +\left[ \sum_ {\theta \in \Theta_ {R} ^ {m}} w _ {\theta} \theta (\mathcal {A} (x)) + \sum_ {i, j = 1, i \neq j} ^ {k} \sum_ {\theta \in \Theta_ {i, j} ^ {m}} w _ {\theta} \theta (\mathcal {A} (x)) \right] _ {y} < \left[ \sum_ {\theta \in \Theta_ {R} ^ {m}} w _ {\theta} \theta (\mathcal {A} (x)) + \sum_ {i, j = 1, i \neq j} ^ {k} \sum_ {\theta \in \Theta_ {i, j} ^ {m}} w _ {\theta} \theta (\mathcal {A} (x)) \right] _ {y _ {t}}, \tag {8} +$$ + +where $\Theta_R^m$ and $\Theta_{i,j}^{m}$ respectively denote the sets of robust and non-robust feature extractors for the sub-model $f_{m}$ . Moreover, the set $\Theta_{y,y_t}^m$ should have relatively larger influence to the right-hand side of (8) than other feature extractor set $\Theta_{y,j}^{m}$ with $j\neq y_{t}$ , due to the outer operator “ $[\cdot ]_{y_t}$ . Therefore, we conclude our first hint as an intuition for reducing $\mathrm{Vr}(F_{\mathrm{E}})$ . + +Hint (i): To decrease the vulnerability in the attack direction $y_{t}$ (i.e., each term $\mathrm{Vr}(F_{\mathrm{E}},y_{t})$ in the upper bound of (6)), it is reasonable to decrease the influence from the non-robust feature extractors of $\Theta_{y,y_t}^m$ . + +In Hint (i), a major difference from the previous analysis (Ilyas et al., 2019; Allen-Zhu & Li, 2022) is that, we in particular relate each attack direction $y_{t}$ to some specific non-robust feature extractors, where the benefit is that these correspondences can effectively help us to build the diverse ensemble model. Moreover, According to the principle of ensemble methods, as long as at least $M / 2 + 1$ sub-models are not successfully attacked, the ensemble model will successfully defend against the attack. So we conclude the second hint that is also important for designing our approach. + +Hint (ii): For each attack direction $y_{t}$ , we only need to consider manipulating the training data of $M / 2 + 1$ sub-models instead of all the $M$ sub-models. + +Overall, the above Hint (i) & (ii) play the key roles for inspiring our data transformation method in Section 4. + +# 4 OUR ENSEMBLE TRAINING METHOD + +We first introduce our model and high-level idea in Section 4.1, and then elaborate on the technical details for the data transformations in Section 4.2. + +# 4.1 OVERVIEW OF OUR FRAMEWORK + +Note that the feature extractors of a model depend on the given training data. Namely, any modification on the features of the training data can implicitly influence the model. Thus, in this section we follow the Hint (i) & (ii) of Section 3 to design an effective data transformation method. The transformation is expected to modify the features of the training data, so as to enhance the robustness of the trained ensemble model. We train a set of distinct sub-models on the transformed training data; these sub-models can be integrated into an ensemble model being robust against adversarial attacks, while preserving the clean accuracy of each sub-model as much as possible. We use “ $\pi_{m}$ ” to denote the transformation for the $m$ -th sub-model, $1 \leq m \leq M$ , and formulate the following problem by slightly modifying Definition 2.1 (replace $x$ by the adversarial example $\mathcal{A}(x)$ for each sub-model): + +$$ +\min \mathbb {E} _ {(x, y) \sim \mathcal {X} \times \mathcal {Y}} \ell \left(\frac {1}{M} \sum_ {m \in [ M ]} \widehat {F} _ {m} (\mathcal {A} (x); \beta_ {m}), y\right) \tag {9} +$$ + +where $\beta_{m}$ is obtained by training on the transformed data, i.e., $\beta_{m} = \operatorname*{argmin}_{\beta}\mathbb{E}_{(x,y)\sim \mathcal{X}\times \mathcal{Y}}\ell (f_{m}(\pi_{m}(x);\beta),y)$ for each $m\in \{1,2,\dots ,M\}$ . + +The major challenge for solving the above problem (9) is how to design a set of appropriate transformations $\{\pi_m\mid 1\leq m\leq M\}$ , so that the obtained parameters $\beta_{[1:M]}$ can yield sufficiently diverse sub-models. To address this issue, we leverage the transformation from frequency domain to guide the non-robust features of each sub-model to be as diverse as possible. Specifically, we introduce a method called "Frequency Domain Transformation (FDT)" for constructing the set of diverse training datasets $\{\pi_1(\mathcal{X}),\pi_2(\mathcal{X}),\dots ,\pi_M(\mathcal{X})\}$ . FDT relies on a key "weakness allocation" strategy. Roughly speaking, the strategy aims to promote the diversity of the constructed datasets, and meanwhile preserve that the overall clean accuracy should not be sacrificed in the ensemble. The details are presented in the next section. + +# 4.2 FREQUENCY DOMAIN TRANSFORMATION + +Before performing the transformation, we need to select a set of non-robust features. In time domain, a simple observation is that an image feature is usually invariant under spatial translation, e.g., it can appear at different positions in images. This property causes the challenge for directly identifying and representing non-robust features in time domain. Thus we turn our attention to the frequency domain. Moreover, some previous studies on robust learning already revealed that robust and non-robust features are often deeply related to frequency domain (Wang et al., 2020; Bernhard et al., 2021; Maiya et al., 2021). + +Amplitude based selection. To identify the non-robust frequencies, a straightforward idea is to test the robustness of each individual frequency and select the non-robust ones. Nevertheless, it may take a large computational cost since the number of frequencies is high (e.g., if the input image is $64 \times 64$ , the number of frequencies is also $64 \times 64 \approx 4 \times 10^{3}$ ). We propose an easy-to-implement selection idea based on the amplitudes, since the amplitudes can be directly obtained via the Fourier transformation with low complexity. According to the previous research (Ilyas et al., 2019; Benz et al., 2021; Springer et al., 2021), a feature can be regarded as "robust" if it cannot be easily manipulated by small perturbations. We observe that high-amplitude frequency features usually dominate the ground truth of an image. Figure 4 in our Appendix illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image. So in our following approach, we maintain high-amplitude frequency features as "robust features", and select the frequencies with low amplitudes (by setting a threshold " $\tau$ " to transform. Moreover, we can conveniently observe the performance changing through varying the threshold $\tau$ in our experiment. Figure1 illustrates the amplitude-based selection. + +![](images/0d7e14b81dd688209bf8bee950ed47a465fdf9705d30a2b85cd269279036a2fb.jpg) +Figure 1: We use a $5 \times 5$ image as a toy example, where the intensity of the color indicates the magnitude of the amplitude. In our amplitude-based selection, we retain the high-amplitude frequencies (i.e., the darker regions) and perform data transformations on the low-amplitude frequencies (i.e., the white regions). + +Following our frequency selection, we propose two transformation methods for promoting the diversity by using the identified non-robust features. Our first approach is from a straightforward idea, which is just to replace the non-robust features by random noise (due to the space limit, we leave the details to Appendix C). This method is very easy to implement in practice. Though it can achieve certain degree of improvement upon previous ensemble training methods, the performance is still not very promising (as shown in our experiments). To further improve the effectiveness, we propose a more sophisticated approach called "targetedattack transformation", which constructs a set of different "substitute" features through attack + +ing the images to different targeted classes, and then use them to replace the selected non-robust frequencies. + +Targeted-attack transformation: We briefly explain our intuition first. It was shown that adversarial attacks have the capability to manipulate non-robust features (Ilyas et al., 2019; Yang et al., 2020). In particular, a targeted attack as introduced in Definition 2.2, aims at modifying non-robust features that are associated with a specific target label. For instance, let us consider a data point $(x,y)$ in the original dataset $\mathcal{X} \times \mathcal{Y}$ ; we set the target label as $y_{t}$ and obtain the corresponding adversarial example + +$x^{\prime}$ ( $x^{\prime}$ contains the modified non-robust features that are associated with $y_{t}$ ). When training a model using $(x^{\prime}, y)$ , intuitively it can be viewed as an "immunization" for defending the attack from $y$ to $y_{t}$ ; and consequently, the chance that obtaining the wrong label $y_{t}$ for the data with label $y$ decreases. In other words, it becomes more difficult to attack the images with label $y$ to $y_{t}$ than to the other classes. We call the modified non-robust feature as a "substitute" feature derived by the targeted attack. + +Motivated by this observation, we can construct different transformations by using $k \times (k - 1)$ targeted attacks (since each label can be attacked to be the other $k - 1$ labels); these attacks can yield different substitute features, and then we use these features to replace the corresponding non-robust features in the original dataset (based on Hint (i) in Section 3); finally, the $M$ transformed datasets are obtained via an allocation algorithm, where each substitute feature is captured by at least $M / 2 + 1$ datasets (based on Hint (ii) in Section 3). Overall, due to the completeness of the $k \times (k - 1)$ targeted attacks, the $M$ sub-models trained on those datasets can guarantee the robustness of the final ensemble solution. We introduce some definitions for our transformation first. + +Definition 4.1 (Strengthen a dataset) Let $y_{1} \neq y_{2} \in \mathcal{Y}$ . If a given training dataset $P$ contains at least one adversarial example who has the original label $y_{1}$ but is misclassified as $y_{2}$ , we say that $P$ has been strengthened by the attack direction from $y_{1}$ to $y_{2}$ (" $\overrightarrow{y_{1}y_{2}}$ -direction" for short). + +In other words, if $P$ is not strengthened in $\overrightarrow{y_1y_2}$ -direction, the model trained on $P$ is more likely to be fragile to the targeted attacks from $y_1$ to $y_2$ . Also, the dataset $P$ may have not been strengthened in multiple different directions. So we define its "weakness set" $\mathcal{W} = \{\overrightarrow{y_1y_2} \mid 1 \leq y_1, y_2 \leq k, y_1 \neq y_2, \text{ and } P \text{ has not been strengthened in } \overrightarrow{y_1y_2} \text{-direction}\}$ . + +Definition 4.2 (Diversity of weakness sets) Given $M$ datasets $\{P_1, P_2, \dots, P_M\}$ with their corresponding weakness sets $\{\mathcal{W}_1, \mathcal{W}_2, \dots, \mathcal{W}_M\}$ , we define their diversity: + +$$ +\boldsymbol {D i v} (P _ {1}, P _ {2}, \dots , P _ {M}) = 1 - \frac {| \mathcal {W} _ {1} \cap \mathcal {W} _ {2} \cap \cdots \cap \mathcal {W} _ {M} |}{\max \{| \mathcal {W} _ {1} | , | \mathcal {W} _ {2} | , \cdots , | \mathcal {W} _ {M} | \}}. +$$ + +It is easy to see that the higher the value $\mathbf{Div}(P_1, P_2, \dots, P_M)$ , the more diverse the corresponding weakness sets. A higher diversity suggests that the vulnerabilities of the $M$ sub-models trained on those datasets are more likely to be different. To achieve a nice performance in terms of both accuracy and robustness, we need to take account of the diversity function "Div" for designing the transformations. The basic principle is: + +On the one hand, our transformed datasets should have a sufficiently large number of diverse substitute features, so that one adversarial attack cannot easily capture more than half of the $M$ sub-models. On the other hand, the datasets should also maintain the major information of the original input as much as possible, since otherwise the clean accuracy may decline due to the added substitute features. + +To provide an appropriate trade-off, we propose the following constrained optimization objective: let $\mathbb{C}$ be the set of all the $\binom{M}{\lceil M/2 \rceil}$ combinations of $\lceil M/2 \rceil$ -size subsets from $\{1, 2, \dots, M\}$ , and then + +$$ +\max _ {P _ {1}, P _ {2}, \dots , P _ {M}} \quad \min \left\{\left| \mathcal {W} _ {1} \right|, \left| \mathcal {W} _ {2} \right|, \dots , \left| \mathcal {W} _ {M} \right| \right\} \tag {10} +$$ + +$$ +\mathrm {s . t .} \forall \left\{i _ {1}, i _ {2}, \dots , i _ {\lceil M / 2 \rceil} \right\} \in \mathbb {C}, \quad \boldsymbol {D i v} \left(P _ {i _ {1}}, P _ {i _ {2}}, \dots , P _ {i _ {\lceil M / 2 \rceil}}\right) = 1. \tag {11} +$$ + +We maximize the objective function of (10) because we want to minimize the modification degree for each transformed dataset. Intuitively, a large weakness set indicates that the corresponding dataset is not changed significantly by the transformation, and thus the clean accuracy is likely to be well preserved. The constraint (11) guarantees that any $\lceil M/2\rceil$ datasets have the intersection $\mathcal{W}_{i_1} \cap \mathcal{W}_{i_2} \cap \dots \cap \mathcal{W}_{i_{\lceil M/2 \rceil}} = \emptyset$ , that is, they do not share any common direction. Consequently, the ensemble solution should be robust to any attack direction. To achieve this twofold goal, we design an efficient allocation strategy together with an attack-guided transformation on the training data. Specifically, the procedure consists of the following two stages. + +Stage (1): allocating the weakness sets to the sub-models. For each $\overrightarrow{y_1y_2}$ -direction, $1 \leq y_1, y_2 \leq k$ , there are at most $\lceil \frac{M}{2} \rceil - 1$ sets that contain this direction (due to the constraint (11)), so the sum $\sum_{1 \leq i \leq M} |\mathcal{W}_i|$ is no larger than $k(k - 1) * (\lceil \frac{M}{2} \rceil - 1)$ . Therefore, the maximum value of Eq (10) is no larger than + +$$ +k (k - 1) * \left(\left\lceil \frac {M}{2} \right\rceil - 1\right) / M \tag {12} +$$ + +based on the pigeonhole principle. We assign the total $k(k - 1)*\left(\lceil \frac{M}{2}\rceil -1\right)$ directions (each direction is duplicated to be $\lceil \frac{M}{2}\rceil -1$ copies) to $M$ sets in a round-robin way, where the number of directions assigned to each set is no larger than the upper bound (12). Please refer to Figure 2 for an example. + +![](images/81da266492f525e16623c8d9c4838cae2ed0700f7989ac6d5225781028064ca7.jpg) +Figure 2: Assign the attack directions to five sub-models for a three-class classification task. + +Stage (2): constructing the new datasets. Following the allocation, we transform the original dataset, denoted by $P_{\mathrm{ori}}$ , to align with the assigned weakness sets for the $M$ sub-models correspondingly. Using the same notations of Definition 4.2, we denote the waiting-to-construct dataset for the $m$ -th sub-model as $P_{m}$ (which is initialized to be $\emptyset$ ), $1 \leq m \leq M$ . First, we divide $P_{\mathrm{ori}}$ into $k$ subsets $C_1, C_2, \dots, C_k$ , where each $C_j$ corresponds to the label $j$ , for $1 \leq j \leq k$ ; further, each $C_j$ is equally partitioned to $k - 1$ disjoint parts $\{C_{j,1}, C_{j,2}, \dots, C_{j,k - 1}\}$ at random. For each data $(x,j)$ in $C_{j,i}$ , we attack it from $j$ to $h$ (let $h = i + j \mod k$ ) to obtain the adversarial perturbation; then we only substitute the low-amplitude frequencies of $x$ with the perturbation, and other frequencies (which have their amplitudes higher than the aforementioned threshold $\tau$ ) remain unchanged. We denote the new dataset as $C_{j,i}'$ . Finally, we add $C_{j,i}'$ to $P_{m}$ if the $i \overrightarrow{h}$ -direction is not in the weakness set $\mathcal{W}_m$ . From the construction method of the weakness sets, we know that the $i \overrightarrow{h}$ -direction can appear in at most $\lceil \frac{M}{2} \rceil - 1$ weakness sets. So, the set $C_{i,j}'$ can be added to at least $\lceil \frac{M}{2} \rceil$ different $P_m s$ . Consequently, the completeness for defending the $k(k - 1)$ attack directions can be guaranteed, i.e., the constraint (11) is satisfied. Figure 3 shows the schematic diagram of the construction process, and the full details are shown in Appendix D. + +![](images/59d02788a1cae134e8012c9728787df7de52ced148d3576dd7f45ebf2927d6cd.jpg) +Figure 3: A schematic diagram of the construction process. In the allocation stage, each $C_{j,i}^{\prime}$ is added to $P_{m}$ if the $i \vec{h}$ -direction is not in the weakness set $\mathcal{W}_{m}$ , $h = i + j \mod k$ . + +Remark 4.3 We are aware of some previous robust learning approaches that also depend on data modification (Allen-Zhu & Li, 2022; Tsipras et al., 2018). But their approaches usually tend to + +completely eliminate non-robust features. Our method is quite different, where the goal is to leverage the carefully selected non-robust features to weaken the transferability among sub-models. For each sub-model, we only modify the non-robust features corresponding to certain directions, rather than all non-robust features, and therefore the modification yields relatively lower impact on clean accuracy. Moreover, we partition each class $C_j$ into $k - 1$ subsets $\{C_{j,1}, C_{j,2}, \dots, C_{j,k - 1}\}$ , with each subset being attacked to a specified class. This step eliminates the need to attack each data point across all classes, thereby reducing the computational complexity of constructing the new datasets. + +# 5 EXPERIMENTS + +We conduct our experiments on the widely used image datasets CIFAR-10, CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). As for the baselines, we reproduce the existing ensemble models including ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021), with their released codes and recommended hyperparameter settings. As for our approach, "FDT-random" and "FDT-target" respectively denote the methods utilizing random noise based transformation and target-attack transformation; "FDT-hybrid" represents the method that combines both, that is, we set two frequency selection thresholds $\tau_{1}$ and $\tau_{2}$ ( $\tau_{1} < \tau_{2}$ ), and perform random and target-attack transformations on the frequencies less than $\tau_{1}$ and the frequencies between $\tau_{1}$ and $\tau_{2}$ , respectively (due to the space limit, more details are shown in Appendix E). Our code will be available at https://github.com/ideven123/FDT. + +We train each sub-model based on ResNet-20 (He et al., 2016) and use Adam optimizer (Kingma & Ba, 2015) with an initial learning rate of 0.001 for 200 epochs. To further test their performance on neural network with larger scale, we also use WideResNet28-10 (Zagoruyko & Komodakis, 2016) to train the sub-models and the results are placed in our supplement. All the experiments are implemented with PyTorch (Paszke et al., 2017) on a single NVIDIA GeForce RTX 3090 with 24GB of memory and 1TB of storage. We assess the performance of our models through 5 repeated runs and compute error bars. Utilizing the numpy library, we calculate the standard deviation and subsequently derive the standard error of the mean (SEM). + +Varying the number of sub-models. We take the ResNet-20 model trained on CIFAR-10 as an example and test the performance of FDT with different numbers of sub-models in the ensemble. In this experiment, we set the frequency selection threshold $\tau_{1}$ to be 0.2 and $\tau_{2}$ to be 0.8. Then we evaluate the performance of FDT-hybrid under FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), and AutoAttack (AA) (Croce & Hein, 2020) attack methods with $l_{\infty}$ perturbations of size $\epsilon = 0.02$ . The results in Table 1 indicate that our clean accuracy has relatively smaller change as the number increases, while the robust accuracy can be substantially improved from 3 to 20 sub-models. + +Table 1: Performance of FDT-hybrid with different sub-model numbers on CIFAR-10. + +
Sub-model numbers3581220
Clean accuracy90.20 ± 0.0390.75±0.0391.35±0.0591.51 ± 0.0691.86±0.07
FGSM (ε=0.02)58.04 ± 0.1361.66± 0.1562.41 ± 0.1163.96 ± 0.1264.27 ± 0.14
PGD (ε=0.02)20.01 ± 0.0426.10± 0.0729.20± 0.0529.78 ± 0.0829.71± 0.07
AutoAttack (ε=0.02)19.42± 0.0425.37± 0.0527.33± 0.0428.12± 0.0728.92± 0.07
+ +Results for white-box attack. To maintain consistency with the baseline ensemble methods from the literature, we ensemble three ResNet-20 sub-models here and evaluate the robust accuracy using $\epsilon = 0.01$ and $\epsilon = 0.02$ . In this experiment, we set the frequency selection threshold $\tau_{1}$ to be 0.2 and $\tau_{2}$ to be 0.8. In the white-box attack setting, the attacker has full knowledge of the models, including model parameters, architecture, and ensemble training strategy. To evaluate the adversarial robustness of the ensemble, we conduct the following white-box attacks: PGD, FGSM, BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), C&W (Carlini & Wagner, 2017) and AutoAttack (AA). The attacks are implemented using AdverTorch (Ding et al., 2019). We take the robust and clean accuracies, and average training time per epoch as the evaluation metrics. + +Table 2 presents the obtained robust accuracies of the baseline ensemble methods on CIFAR-10 and CIFAR-100. In addition, we show the average training time per epoch of different ensemble methods. The experimental results suggest that our FDT-random method can achieve higher adversarial robustness over other baselines on both CIFAR-10 and CIFAR-100, with the training time only higher than ADP (and much lower than other baselines). Furthermore, the FDT-hybrid ensemble method + +Table 2: Robust and Clean Accuracy (\%) and average training time of different ensemble methods against white-box attacks on CIFAR-10 and CIFAR-100. “ $\epsilon$ ” and “ $\lambda$ ” stand for the $l_{\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The TRS results are reported in the original paper Yang et al. (2021), with “-” indicating results not provided. + +
CIFAR-10ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy91.8491.8191.37-89.88±0.0290.16±0.0490.20±0.03
FGSM (ε=0.01)59.4844.9770.05-66.96±0.1272.88±0.1272.24±0.12
FGSM (ε=0.02)53.3830.5856.3344.246.28±0.1055.54±0.0958.04±0.13
PGD (ε=0.01)14.451.3540.5550.545.42±0.0946.58±0.0748.48±0.09
PGD (ε=0.02)2.950.3411.4915.112.24±0.0315.08±0.0520.01±0.04
BIM (ε=0.01)14.151.3740.5150.645.24±0.0346.86±0.0448.57±0.05
BIM (ε=0.02)3.010.2710.6515.811.68±0.0314.86±0.0316.63±0.02
MIM (ε=0.01)20.382.0544.7451.547.73±0.0549.97±0.0651.50±0.07
MIM (ε=0.02)5.110.6914.7617.215.14±0.0418.27±0.0220.09±0.03
AA (ε=0.01)1.800.0043.34-46.09±0.0948.83±0.0851.56±0.08
AA (ε=0.02)0.000.0013.72-9.38±0.0515.70±0.0519.42±0.04
C&W (λ=0.1)20.9631.5752.3558.145.01±0.1055.48±0.1056.08±0.11
CIFAR-100ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy67.0467.7066.16-66.29±0.1167.64±0.0866.70±0.09
FGSM (ε=0.01)17.8216.8933.94-35.42±0.1240.46±0.1439.85±0.14
FGSM (ε=0.02)10.537.8026.6119.322.40±0.0532.30±0.0630.27±0.08
PGD (ε=0.01)0.800.1114.6223.021.54±0.0622.19±0.0524.93±0.05
PGD (ε=0.02)0.010.024.255.34.84±0.037.27±0.028.63±0.03
BIM (ε=0.01)0.680.2314.8522.921.10±0.0922.39±0.0724.35±0.06
BIM (ε=0.02)0.020.04.075.44.80±0.046.80±0.058.40±0.05
MIM (ε=0.01)0.780.1216.8223.423.14±0.0624.68±0.1027.09±0.09
MIM (ε=0.02)0.010.025.316.26.47±0.038.87±0.0410.19±0.05
AA (ε=0.01)0.010.0011.23-16.02±0.0916.03±0.0916.41±0.12
AA (ε=0.02)0.000.002.72-3.12±0.044.54±0.055.47±0.07
C&W (λ=0.1)0.743.7010.6826.925.07±0.1029.43±0.0930.66±0.13
+ +
Time (s)ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
CIFAR-1030.1569.92134.33350.4237.04108.22114.23
CIFAR-10030.3469.71129.25344.9237.12108.43113.87
+ +achieves an even better robustness than FDT-random, though its running time is higher since it needs to perform the target-attack transformation. + +Summary on other experimental results placed in Appendix F. We also conduct the experiments to examine the performance of FDT under black-box attack, and assess the transferability of our method across various sub-models. The results indicate the competitive robustness of our method in defending against black-box attacks. Then, we evaluate the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold $\tau$ . The result shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of $\tau$ . Moreover, we included some ablation studies on datasets and model architectures. These experiments demonstrate that our method performs the best among ensemble-based baseline methods. + +# 6 CONCLUSION AND FUTURE WORK + +In this paper, we present a novel data transformation approach to improve the robustness of ensemble models against adversarial attacks. By leveraging the frequency based features and strategically allocating adversarial examples, we demonstrate the effectiveness of our method in enhancing adversarial robustness while maintaining high accuracy on clean data. As for the future work, we can consider other types of transformation methods (e.g., beyond using frequency) to improve the ensemble robustness. Also, it is interesting to consider more complicated scenarios for ensemble training, such as federated learning with concerning the privacy issue. + +# ACKNOWLEDGMENTS + +The authors would like to thank the reviewers for their constructive comments and suggestions. This work was partially supported by the National Natural Science Foundation of China (No. 62272432 and No. 62432016), the National Key Research and Development Program of China (No. 2021YFA1000900), and the Natural Science Foundation of Anhui Province (No. 2208085MF163). + +# REFERENCES + +Zeyuan Allen-Zhu and Yanzhi Li. Feature purification: How adversarial training performs robust deep learning. In 2021 IEEE 62nd Annual Symposium on Foundations of Computer Science (FOCS), pp. 977-988. IEEE, 2022. +MaungMaung AprilPyone and Hitoshi Kiya. Block-wise image transformation with secret key for adversarially robust defense. IEEE Transactions on Information Forensics and Security, 16: 2709-2723, 2021. +Anish Athalye, Nicholas Carlini, and David Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International conference on machine learning, pp. 274-283. PMLR, 2018. +Philipp Benz, Chaoning Zhang, and In So Kweon. Batch normalization increases adversarial vulnerability and decreases adversarial transferability: A non-robust feature perspective. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7818-7827, 2021. +Rémi Bernhard, Pierre-Alain Moëllic, Martial Mermillod, Yannick Bourrier, Romain Cohendet, Miguel Solinas, and Marina Reyboz. Impact of spatial frequency based constraints on adversarial robustness. In 2021 International Joint Conference on Neural Networks (IJCNN), pp. 1-8. IEEE, 2021. +Nicholas Carlini and David Wagner. Towards evaluating the robustness of neural networks. In 2017, IEEE symposium on security and privacy (sp), pp. 39-57. IEEE, 2017. +Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian Goodfellow, Aleksander Madry, and Alexey Kurakin. On evaluating adversarial robustness. arXiv preprint arXiv:1902.06705, 2019. +Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, John C Duchi, and Percy S Liang. Unlabeled data improves adversarial robustness. Advances in Neural Information Processing Systems, 32, 2019. +Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International conference on machine learning, pp. 2206-2216. PMLR, 2020. +Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009. +Gavin Weiguang Ding, Luyu Wang, and Xiaomeng Jin. Advertorch v0.1: An adversarial robustness toolbox based on pytorch. arXiv preprint arXiv:1902.07623, 2019. +Yinpeng Dong, Fangzhou Liao, Tianyu Pang, Hang Su, Jun Zhu, Xiaolin Hu, and Jianguo Li. Boosting adversarial attacks with momentum. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 9185-9193, 2018. +Xitong Gao, Cheng-Zhong Xu, et al. Mora: Improving ensemble robustness evaluation with model reweighing attack. Advances in Neural Information Processing Systems, 35:26955-26965, 2022. +Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 1050:20, 2015. + +Ian J. Goodfellow, Yoshua Bengio, and Aaron C. Courville. Deep Learning. Adaptive computation and machine learning. MIT Press, 2016. ISBN 978-0-262-03561-3. +Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, and Timothy A Mann. Improving robustness using generated data. Advances in Neural Information Processing Systems, 34:4218-4233, 2021. +Chuan Guo, Mayank Rana, Moustapha Cissé, and Laurens van der Maaten. Countering adversarial images using input transformations. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Douglas Heaven. Why deep-learning ais are so easy to fool. Nature, 574(7777):163-166, 2019. +Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. Advances in neural information processing systems, 32, 2019. +Sanjay Kariyappa and Moinuddin K Qureshi. Improving adversarial robustness of ensembles with diversity training. arXiv e-prints, pp. arXiv-1901, 2019. +Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. +Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009. +Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. +Shishira R. Maiya, Max Ehrlich, Vatsal Agarwal, Ser-Nam Lim, Tom Goldstein, and Abhinav Shrivastava. A frequency perspective of adversarial robustness. CoRR, abs/2111.00861, 2021. +AKM Iqtidar Newaz, Nur Imtiazul Haque, Amit Kumar Sikder, Mohammad Ashiqur Rahman, and A Selcuk Uluagac. Adversarial attacks to machine learning-based smart healthcare systems. In GLOBECOM 2020-2020 IEEE Global Communications Conference, pp. 1-6. IEEE, 2020. +Tianyu Pang, Kun Xu, Chao Du, Ning Chen, and Jun Zhu. Improving adversarial robustness via promoting ensemble diversity. In International Conference on Machine Learning, pp. 4970-4979. PMLR, 2019. +Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. +Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli. *Helper-based adversarial training: Reducing excessive margin to achieve a better accuracy vs. robustness trade-off*. In ICML 2021 Workshop on Adversarial Machine Learning, 2021. +Edward Raff, Jared Sylvester, Steven Forsyth, and Mark McLean. Barrage of random transforms for adversarially robust defense. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6528-6537, 2019. +Giulio Rossolini, Federico Nesti, Gianluca D'Amico, Saasha Nair, Alessandro Biondi, and Giorgio Buttazzo. On the real-world adversarial robustness of real-time semantic segmentation models for autonomous driving. IEEE Transactions on Neural Networks and Learning Systems, pp. 1-15, 2023. doi: 10.1109/TNNLS.2023.3314512. + +Andrei A. Rusu, Dan Andrei Calian, Sven Gowal, and Raia Hadsell. Hinding adversarial attacks with implicit neural representations. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvári, Gang Niu, and Sivan Sabato (eds.), International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pp. 18910-18934. PMLR, 2022. +Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. +James C Spall. Multivariate stochastic approximation using a simultaneous perturbation gradient approximation. IEEE transactions on automatic control, 37(3):332-341, 1992. +Jacob Springer, Melanie Mitchell, and Garrett Kenyon. A little robustness goes a long way: Leveraging robust features for targeted transfer attacks. Advances in Neural Information Processing Systems, 34:9759-9773, 2021. +Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In 2nd International Conference on Learning Representations, ICLR 2014, 2014. +Florian Tramér, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018. +Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In International Conference on Learning Representations, 2018. +Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 36246-36263. PMLR, 2023a. +Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In International Conference on Machine Learning, pp. 36246-36263. PMLR, 2023b. +Zifan Wang, Yilin Yang, Ankit Shrivastava, Varun Rawal, and Zihao Ding. Towards frequency-based explanation for robust cnn. arXiv preprint arXiv:2005.03141, 2020. +Futa Waseda, Sosuke Nishikawa, Trung-Nghia Le, Huy H Nguyen, and Isao Echizen. Closer look at the transferability of adversarial examples: How they fool different models differently. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 1360-1368, 2023. +Sven-Ake Wegner. Lecture notes on high-dimensional data. arXiv preprint arXiv:2101.05841, 2021. +Yuancheng Xu, Yanchao Sun, Micah Goldblum, Tom Goldstein, and Furong Huang. Exploring and exploiting decision boundary dynamics for adversarial robustness. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023, 2023. +Huanrui Yang, Jingyang Zhang, Hongliang Dong, Nathan Inkawich, Andrew Gardner, Andrew Touchet, Wesley Wilkes, Heath Berry, and Hai Li. Diverse: diversifying vulnerabilities for enhanced robust generation of ensembles. Advances in Neural Information Processing Systems, 33:5505-5515, 2020. +Ruijie Yang, Yuanfang Guo, Junfu Wang, Jiantao Zhou, and Yunhong Wang. Common knowledge learning for generating transferable adversarial examples. Frontiers Comput. Sci., 19(10):1910359, 2025. doi: 10.1007/S11704-024-40533-4. + +Zhuolin Yang, Linyi Li, Xiaojun Xu, Shiliang Zuo, Qian Chen, Pan Zhou, Benjamin Rubinstein, Ce Zhang, and Bo Li. Trs: Transferability reduced ensemble via promoting gradient diversity and model smoothness. Advances in Neural Information Processing Systems, 34:17642-17655, 2021. +Mehmet Kerim Yucel, Ramazan Gokberk Cinbis, and Pinar Duygulu. Hybridaugment++: Unified frequency spectra perturbations for model robustness. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5718-5728, 2023. +Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. CoRR, abs/1605.07146, 2016. +Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric P. Xing, Laurent El Ghaoui, and Michael I. Jordan. Theoretically principled trade-off between robustness and accuracy. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 7472-7482. PMLR, 2019. +Wen Zhou, Xin Hou, Yongjun Chen, Mengyun Tang, Xiangqi Huang, Xiang Gan, and Yong Yang. Transferable adversarial perturbations. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-467, 2018. +Yi Zhu, Chenglin Miao, Tianhang Zheng, Foad Hajiaghajani, Lu Su, and Chunming Qiao. Can we use arbitrary objects to attack lidar perception in autonomous driving? In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pp. 1945-1960, 2021. + +# A OMITTED PROOFS + +A.1 PROOF OF INEQUALITY (6): + +$$ +\begin{array}{l} \sum_ {y _ {t} \in \mathcal {Y}} \operatorname {V r} \left(F _ {\mathrm {E}}, y _ {t}\right) \\ = \sum_ {y _ {t} \in \mathcal {Y}} \mathbb {E} _ {(x, y) \sim \mathcal {D}} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) = y _ {t} \right\} \right] \\ = \sum_ {y _ {t} \in \mathcal {Y}} \sum_ {(x, y) \in \mathcal {D}} p _ {(x, y)} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) = y _ {t} \right\} \right]. \\ \end{array} +$$ + +Then, we interchange the order of summation, and so the above equation is equal to + +$$ +\begin{array}{l} \left. \sum_ {(x, y) \in \mathcal {D}} p _ {(x, y)} \sum_ {y _ {t} \in \mathcal {Y}} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) = y _ {t} \right\} \right] \right. \\ = \mathbb {E} _ {(x, y) \sim \mathcal {D}} \Big [ \sum_ {y _ {t} \in \mathcal {Y}} \mathbb {I} \big \{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) = y _ {t} \big \} \Big ]. \\ \end{array} +$$ + +For each $(x,y)$ , without loss of generality, let $F_{\mathrm{E}}(\mathcal{A}(x)) = y_0$ . For $y_t \neq y_0$ , $\mathbb{I}\big\{F_{\mathrm{E}}(x) = y \wedge F_{\mathrm{E}}(\mathcal{A}(x)) = y_t\big\} = 0$ . For $y_t = y_0$ , $\mathbb{I}\big\{F_{\mathrm{E}}(x) = y \wedge F_{\mathrm{E}}(\mathcal{A}(x)) = y_t\big\} = \mathbb{I}\big\{F_{\mathrm{E}}(x) = y\big\}$ . So the above equation is equal to + +$$ +\begin{array}{l} \mathbb {E} _ {(x, y) \sim \mathcal {D}} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \right\} \right] \\ = \mathbb {E} _ {(x, y) \sim \mathcal {D}} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge \left(F _ {\mathrm {E}} (\mathcal {A} (x)) \neq y \vee F _ {\mathrm {E}} (\mathcal {A} (x)) = y\right) \right\} \right] \\ = \mathbb {E} _ {(x, y) \sim \mathcal {D}} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) \neq y \right\} + \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) = y \right\} \right]. \\ \end{array} +$$ + +We split $\mathbb{I}(.)$ because $F_{\mathrm{E}}(\mathcal{A}(x)) \neq y$ and $F_{\mathrm{E}}(\mathcal{A}(x)) = y$ are mutually exclusive. Then, the above equation is equal to + +$$ +\begin{array}{l} \operatorname {V r} \left(F _ {\mathrm {E}}\right) + \mathbb {E} _ {(x, y) \sim \mathcal {D}} \left[ \mathbb {I} \left\{F _ {\mathrm {E}} (x) = y \wedge F _ {\mathrm {E}} (\mathcal {A} (x)) = y \right\} \right] \\ \geq \operatorname {V r} \left(F _ {\mathrm {E}}\right) \\ \end{array} +$$ + +Overall, we obtain the inequality (6): $\sum_{y_t\in \mathcal{Y}}\mathrm{Vr}(F_{\mathrm{E}},y_t)\geq \mathrm{Vr}(F_{\mathrm{E}})$ + +# B FREQUENCY SELECTION + +Figure 4 illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). On the other hand, if we keep the low-amplitude frequencies only, the semantic information is almost missing. This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image. + +# C RANDOM NOISE BASED TRANSFORMATION + +Random noise based transformation: This approach substitutes the identified non-robust frequencies with Gaussian noise. For an $N \times N$ image, we take the non-robust frequencies based on the pre-specified threshold $\tau$ , and replace them with random vector for each sub-model in our experiment. In particular, to further increase the randomness, we perform this transformation for each epoch in the training stage. If we select the top $s$ non-robust frequencies, the overall dimensionality of the edited random feature should be $s \times E$ (we concatenate those $s$ -dimensional features together), where $E$ is the number of epochs. For example, if $N = 32$ , $s = N^2 / 2$ , and $E = 200$ , the overall dimensionality can be as large as $10^5$ . Because these $M$ features are random and have high dimensions, they are very likely to be nearly orthogonal with each other (this phenomenon in high-dimensional geometry can be proved by the central limit theorem (Wegner, 2021)). As a consequence, they tend to yield diverse training results for the sub-models. + +![](images/4f4ad8a9ae15fd8ff6ef622e7b947bd3b49e9657db4fb8f2ef987138577adde8.jpg) +Figure 4: The first and second rows are the figures by adding random noise to high-amplitude and low-amplitude frequencies, respectively. "20% changed" for the first row means we remove the 20% lowest-amplitudes frequencies, and add small noise to the remaining high-amplitude frequencies. "20% changed" for the second row means we remove the 20% highest-amplitude frequencies, and add small noise to the remaining low-amplitude frequencies. "50% changed" and "80% changed" follow the same procedure as "20% changed". + +The implementation details are as follows. Given an image $x$ , we perform Fourier Transform on $x$ and also on a generated Gaussian noise $n_0$ . Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of $x$ by setting an amplitude threshold. Next, we generate two masks $(M_1$ and $M_2$ ) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of $n_0$ (i.e., $M_2(n_0)$ ) to high-amplitude frequencies of $x$ (i.e., $M_1(x)$ ), and obtain the transformation of $x$ (denoted as $\pi(x)$ ). Finally, we transform $\pi(x)$ to time domain by inverse Fourier transform and train the model with $\pi(x)$ . + +# D ALGORITHM OF FDT + +Algorithm 1 shows the overall framework of training an ensemble model with FDT. It illustrates that our data transformation is performed at each iteration. + +Algorithm 1 Training ensemble model with FDT +```txt +Input: dataset $\mathcal{X}\times \mathcal{Y}$ , the number of sub-models $M$ , and the epoch number $E$ Output: sub-model $\beta_{1},\beta_{2},\dots ,\beta_{M}$ +for $i = 1$ to $E$ do Run Targeted-attack Transformation and obtain $P_{1},P_{2},\dots ,P_{M}$ . for $j = 1$ to $M$ do train $\beta_{j}$ on $P_{j}$ +end for +end for +``` + +Algorithm 2 shows the details of targeted-attack transformation method on the whole dataset. For each specific image $x$ , we obtain the targeted class according to the allocation scheme mentioned in "Stage (1)". Then, we use targeted PGD attack to obtain the adversarial sample $x'$ . After that, we perform Fourier Transform on $x$ and $x'$ , and we can obtain the low-amplitude frequencies and high-amplitude frequencies of $x$ by setting an amplitude threshold. Next, we generate two masks $(M_1$ and $M_2$ ) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of $x'$ (i.e., $M_2(x')$ ) to high-amplitude frequencies of $x$ (i.e., $M_1(x)$ ), and obtain the transformation of $x$ (denoted as $\pi(x)$ ). Finally, we transform $\pi(x)$ to time domain by inverse Fourier transform. + +Algorithm 2 Targeted-attack Transformation +```txt +Input: dataset $P_{ori}$ , number M, steps s, class number k +Output: Transformed data $P_{1},P_{2},\dots ,P_{M}$ +Divide the dataset $P_{ori}$ into k parts $\{C_1,C_2,\dots ,C_k\}$ according to labels +Randomly partition the dataset $C_j$ equally into disjoint $k - 1$ parts $\{C_{j,1},C_{j,2},\dots ,C_{j,k - 1}\}$ +Initialize $P_{1},P_{2},\dots ,P_{M}$ with empty set; + $m\gets 0$ +for $j = 1$ to k do for $i = 1$ to $k - 1$ do $C_{j,i}^{\prime}\gets$ calculate targeted attack example in $C_{j,i}$ with label $i + j$ mod $k$ and perform data transformation on each image; for $s = 1$ to $\lceil \frac{M}{2}\rceil +1$ do $m\gets m + 1$ mod M; Append $C_{j,i}^{\prime}$ to $P_{m}$ end for end for end for +``` + +# E IMPLEMENT + +In this section, we provide more experimental details. In our work, we utilize the CIFAR-10 (Krizhevsky & Hinton, 2009), CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). In the testing process, the primary reason for selecting FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), CW(Carlini & Wagner, 2017) as attack methods is to keep consistent with the baseline methods from the literature. Further, we select AA (Croce & Hein, 2020) because it is also a popular attack method and more powerful than those base methods. To reduce the computational complexity of targeted attacks, we leverage the transferability of adversarial examples and utilize a pre-trained simple network (VGG11(Simonyan & Zisserman, 2015)) structure for targeted attacks. + +Further, we introduce the implement of "FDT-random", "FDT-target" and "FDT-hybrid" here. For "FDT-random", we perform Fourier Transform on $x$ and also on a randomly sampled standard Gaussian noise $n_0$ . Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of $x$ by setting an amplitude threshold. Next, we generate two masks $(M_1$ and $M_2$ ) to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of $n_0$ (i.e., $M_2(n_0)$ ) to high-amplitude frequencies of $x$ (i.e., $M_1(x)$ ), and obtain the transformation of $x$ (denoted as $\pi(x)$ ). Finally, we transform $\pi(x)$ to time domain by inverse Fourier transform and train the model with $\pi(x)$ . For "FDT-target", we obtain the targeted class according to the allocation scheme mentioned in "Stage (1)". Then, we use targeted PGD attack to obtain the adversarial sample $x'$ . After that, perform the same steps as with FDT-random (we substitute $n_0$ with $x'$ ). For FDT-hybrid, we set two frequency selection thresholds $\tau_1$ and $\tau_2$ ( $\tau_1 < \tau_2$ ), and generate three masks to select the frequencies: $M_1$ for the high-amplitude frequencies (amplitude $> \tau_2$ ), $M_2$ for the middle part ( $\tau_1 <$ amplitude $< \tau_2$ ), and $M_3$ for the small part (amplitude $< \tau_1$ ). Next, we combine $M_1(x), M_2(x')$ and $M_3(n_0)$ to obtain the transformation $\pi(x)$ . + +# F ADDITIONAL EXPERIMENTAL RESULTS + +In this section, we provide more experimental results. Firstly, we extend our experiments to SVHN, Tiny-ImageNet-200, and WideResNet-28-10 in Appendix F.1. We also conduct the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture in Appendix F.1. Then, we evaluate the performance of FDT under black-box attacks on the CIFAR-10 and CIFAR-100 in Appendix F.2. Then we present the trade-off between clean accuracy and robust accuracy on the CIFAR-100 using FDT method in Appendix F.3. This trade-off sheds light on the effectiveness of FDT with changing the trade-off parameter. Additionally, in Appendix F.4, we compare the transferability across various sub-models with the baseline methods. Furthermore, we compare our method with more related methods in Appendix F.5. + +# F.1 ABLATION STUDIES + +In this section, we extend our experiments to additional datasets (SVHN, Tiny-ImageNet-200) and architecture (WideResNet-28-10). We also explore the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture. + +Table 3 presents the performance of ensemble methods trained with ResNet-20 on SVHN against several widely used white-box attacks. The experimental results demonstrate that all ensemble models achieve comparable levels of clean accuracy. Specifically, the FDT approach exhibits better robust accuracy than the other methods. These observations highlight the effectiveness of FDT in achieving favorable clean accuracy and robustness of ensemble models. + +Table 3: Robust Accuracy (%) of different ensemble methods against white-box attacks on SVHN. The $\epsilon$ and $\lambda$ stand for the $l_{\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid. + +
SVHNADPGALDVERGETRSFDT-hybrid
clean accuracy96.8394.6696.2894.5296.73 ± 0.12
FGSM (ε=0.01)84.3880.285.672.8790.13 ± 0.09
FGSM (ε=0.02)78.0841.581.453.986.78± 0.07
PGD (ε= 0.01)51.0150.153.3154.4359.42 ± 0.07
PGD (ε = 0.02)17.748.2417.4218.8622.74 ± 0.04
BIM (ε= 0.01)54.3847.7352.0853.7157.91± 0.08
BIM (ε = 0.02)21.268.114.5818.0520.23 ± 0.05
MIM (ε= 0.01)61.2451.9658.5156.3262.14± 0.08
MIM (ε= 0.02)24.845.1423.2221.9525.37 ± 0.04
AA (ε= 0.01)49.9248.3952.0252.8357.54± 0.09
AA (ε= 0.02)16.136.9016.9517.4820.12 ± 0.05
C&W (λ = 0.1)55.8149.9466.8252.7472.14± 0.11
+ +We also extend our experiment to the sub-models trained with WideResNet-28-10 on CIFAR-10. Table 4 shows the performance of the models facing various whitebox attacks. The results indicate that FDT maintains good performance even on more complex network structures. We also evaluated the robustness of an ensemble of eight sub-models, with the results presented in Table 5. + +Table 4: Robust Accuracy $(\%)$ of different ensemble methods against white-box attacks on CIFAR-10. The $\epsilon$ and $\lambda$ stand for the $l_{\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10. + +
CIFAR-10ADPGALDVERGEFDT-hybrid
clean accuracy92.9982.1494.3294.18 ± 0.06
FGSM (ε=0.01)60.0444.9471.0180.64 ± 0.05
FGSM (ε=0.02)51.6936.8350.4360.09 ± 0.05
PGD (ε=0.01)11.0922.1044.2564.64 ± 0.07
PGD (ε=0.02)2.545.0613.2726.0 ± 0.03
BIM (ε=0.01)15.8122.6246.5367.36 ± 0.10
BIM (ε=0.02)4.505.4317.3832.36 ± 0.06
MIM (ε=0.01)18.1825.9744.2164.36 ± 0.08
MIM (ε=0.02)4.727.8112.8325.64 ± 0.05
AA (ε=0.01)9.3819.3443.2363.45 ± 0.08
AA (ε=0.02)1.173.9312.4925.23 ± 0.04
C&W (λ=0.1)37.8119.0546.3247.23 ± 0.10
+ +Table 6 is the result of ensemble methods trained with WideResNet-28-10 on Tiny-ImageNet-200. We test the robustness of different methods under widely used white-box attacks. Due to the high + +Table 5: Robust Accuracy (\%) of an ensemble of eight sub-models against white-box attacks on CIFAR-10. The $\epsilon$ and $\lambda$ stand for the $l_{\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10. + +
CIFAR-10FDT-hybrid
clean accuracy93.72 ± 0.11
FGSM (ε=0.01)86.31± 0.07
FGSM (ε=0.02)67.29 ± 0.06
PGD (ε= 0.01)72.02 ± 0.07
PGD (ε = 0.02)45.42± 0.05
BIM (ε= 0.01)73.68 ± 0.10
BIM (ε = 0.02)44.53± 0.06
MIM (ε= 0.01)71.36 ± 0.06
MIM (ε= 0.02)45.24 ± 0.06
AA (ε= 0.01)70.45 ± 0.08
AA (ε= 0.02)44.23 ± 0.07
C&W (λ = 0.172.37 ± 0.11
+ +time complexity of the TRS, we do not compare with it here. The experimental results show that all ensemble models achieve comparable levels of clean accuracy while FDT-hybrid achieves better robust accuracy than other methods. + +Table 6: Robust Accuracy (%) of different ensemble methods against white-box attacks on TinyImageNet-200. The $\epsilon$ and $\lambda$ stand for the $l_{\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid. + +
Tiny/ImageNet-200ADPGALDVERGEFDT-hybrid
clean accuracy49.8845.751.4664.21 ± 0.06
FGSM (ε = 0.01)10.461.2422.8221.73 ± 0.04
FGSM (ε = 0.02)4.380.5918.4219.28 ± 0.04
PGD (ε = 0.01)0.020.023.64.76 ± 0.02
PGD (ε = 0.02)0.020.010.340.45 ± 0.01
BIM (ε = 0.01)0.070.023.354.81 ± 0.03
BIM (ε = 0.02)0.030.010.280.32± 0.00
MIM (ε = 0.01)0.110.024.366.13 ± 0.03
MIN (ε = 0.02)0.030.010.410.48 ± 0.00
AA (ε = 0.01)0002.66 ± 0.02
AA (ε = 0.02)0000.02± 0.00
CW (λ = 0.01)2.360.139.5419.47± 0.06
+ +Ablation study on model architectures. Table 7 presents the results across different model architectures, including ResNet20, ResNet50, WRN28-10, and WRN34-10. While larger models generally achieve higher clean and robust accuracy, the results suggest that our method consistently enhances robustness under various attack scenarios, demonstrating its applicability across diverse architectures. + +Ablation study on allocation methods. Table 8 compares the performance of FDT-hybrid with different weakness set allocation methods on CIFAR-10. The results indicate that our proposed allocation method achieves better clean accuracy and robustness under various attack scenarios than randomly uniform allocation. + +Ablation study on $\tau_{1}$ and $\tau_{2}$ . Table 9 presents the results of FDT-hybrid with various combinations of selection thresholds $\tau_{1}$ and $\tau_{2}$ on CIFAR-10. The experiments reveal the impact of different thresholds on both clean accuracy and robustness under adversarial attacks. As $\tau_{2}$ increases, robustness improves across all metrics, but clean accuracy decreases. For a fixed $\tau_{2}$ , increasing $\tau_{1}$ generally leads to a trade-off between clean accuracy and robustness. Setting $\tau_{1} = 0.2$ and $\tau_{2} = 0.8$ achieves a relatively + +balanced performance, maintaining both competitive clean accuracy and robust accuracy under various attacks. + +Table 7: Robust Accuracy $(\%)$ of different model architectures against white-box attacks on Cifar10. The $\epsilon$ and $\lambda$ stand for the $l_{\infty}$ norm of the adversarial perturbation and the coefficient of C&W attack respectively. + +
CIFAR10ResNet20ResNet50WRN28-10WRN34-10
clean accuracy90.0293.2394.1894.63
FGSM (ε = 0.01)72.2476.6580.6481.04
FGSM (ε = 0.02)58.0458.5960.0960.92
PGD (ε = 0.01)48.4860.2364.6465.38
PGD (ε = 0.02)20.0124.3526.0027.42
BIM (ε = 0.01)48.5760.4367.3668.29
BIM (ε = 0.02)16.6323.5732.3633.86
MIM (ε = 0.01)51.4860.8164.3664.71
MIN (ε = 0.02)20.0924.5425.6426.42
AA (ε = 0.01)51.5660.4863.4564.01
AA (ε = 0.02)19.4224.2125.2326.39
CW (λ = 0.01)56.0856.5557.2357.52
+ +Table 8: Performance of FDT-hybrid with different weakness set allocation method on CIFAR-10. The other settings are consistent with those in Table 1. + +
Allocation methodClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
Uniform Random89.3256.2018.2417.89
Ours90.2058.0420.0119.42
+ +Table 9: Performance of FDT-hybrid with different selection thresholds $\tau_{1}$ and $\tau_{2}$ on CIFAR-10. The other settings are consistent with those in Table 1. + +
ThresholdsClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
τ1=0.2,τ2=0.791.0356.6217.7417.60
τ1=0.2,τ2=0.890.2058.0420.0119.42
τ1=0.2,τ2=0.989.4658.4820.1219.57
τ1=0.4,τ2=0.789.7556.8917.9317.82
τ1=0.4,τ2=0.889.0858.2120.0119.47
τ1=0.4,τ2=0.988.4458.5320.0919.61
τ1=0.6,τ2=0.789.6253.3515.2714.63
τ1=0.6,τ2=0.888.8455.3315.4215.24
τ1=0.6,τ2=0.988.1255.4615.8315.47
+ +# F.2 RESULTS FOR BLACK-BOX ATTACK + +In the black-box setting, the attacker's knowledge usually is limited to the original training dataset and has no information about the model. This setting represents a more practical attack scenario. The attacker can train a surrogate model to generate transferable adversarial examples and transfer them to the target ensemble model. We utilize a single ResNet-20 model as the surrogate model. Adversarial examples are generated on the surrogate model using the SPSA algorithm (Spall, 1992). Figure 5 shows the robust accuracy of ensemble models against black-box attacks under different degrees of perturbation. As we can see, FDT-hybrid ensemble training strategies outperform the other ensemble training strategy against black-box attacks both on CIFAR10 and CIFAR100. + +![](images/3a9bbed4c976150c6e83147e047a6e57ab5122f2f57653ad1f7a35b88ed612c5.jpg) +(a) + +![](images/41ef6eb8c4ed9db8cc8dfb81e794e29dc4f266be742cbaf143ed3a489bfd37bb.jpg) +(b) +Figure 5: Robust Accuracy for different ensemble models against black-box attack with different perturbation scale $\epsilon$ . + +# F.3 TRADE-OFF BETWEEN CLEAN AND ROBUST ACCURACY + +In this section, we explore the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold $\tau_{2}$ (as mentioned in Section 4.2). And we set $\tau_{1}$ to be 0.1. To assess the adversarial robustness, we utilize the PGD attack under $l_{\infty}$ perturbations of size $\epsilon = 0.01$ as a benchmark. We train a set of ResNet-20 FDT-hybrid models on CIFAR-10 and CIFAR-100 with various frequency selection threshold $\tau_{2} \in \{0.4, 0.6, 0.8, 1.0, 1.2, 1.6\}$ . Figure 6 shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of $\tau_{2}$ . + +![](images/96556ece08cecab0da72a8399ad072088c7c47234f3802bd3d1de1d7d1b1dad8.jpg) +(a) +Figure 6: (a) shows the trade-off on CIFAR-10 while (b) on CIFAR-100. From left to right, we decrease the trade-off parameter $\tau_{2}$ for FDT. + +![](images/1840eb354f1c0d1bf3c15cbd742aca09b7c5a17ae284709ea962b17eb2e3a2b4.jpg) +(b) + +# F.4 TRANSFERABILITY ACROSS VARIOUS SUB-MODELS + +To further investigate the diversity between sub-models, we conduct an analysis by generating adversarial examples using one sub-model and evaluating their accuracy on other target sub-models. The transferability of these adversarial examples among sub-models is visualized in Figure 7, considering different ensemble training methods on the CIFAR10 dataset. We generate adversarial examples from "base model" and test the accuracy of "target model". The experimental results indicate that FDT exhibits comparable performance to DVERGE and TRS in reducing the transferability of adversarial samples across different sub-models. This demonstrates that FDT not only enhances the diversity of weaknesses within the dataset but also weakens the transferability of adversarial examples between sub-models. + +![](images/487827d0491099c648319eac4b498797d48312a2c70fa6583e223a6fb92e0575.jpg) +(a) + +![](images/57ddf9ef19e594195508f3ae1c10e54a8dbe8b1e70cc0fa19353407b75b3f5fb.jpg) +(b) + +![](images/e2d764b96904022fe7482aa65ff6f3445097fd2fb937afc6e30e43965fbcf8b6.jpg) +(c) + +![](images/2ad9216a92efe39f3176dc41ec4a12ae77535776bdeea00c191a7bbbed2cc1c2.jpg) +(d) + +![](images/509f5d101d695c95d266c4e7aed497ab6ccf09a4c35a5562d07b8530693c888f.jpg) +(e) + +![](images/9a7aa25d9254e905c5f8bc033b282eaab1dd2a4c750012ce267b3c828efa9408.jpg) +(f) +Figure 7: Pair-wise adversarial transferability between sub-models against PGD attack with $\epsilon = 0.02$ on CIFAR-10. The value represents the success rate of adversarial examples generated by the base model in attacking the target model. + +# F.5 COMPARE WITH ADVERSARIAL TRAINING + +We use target attacks in our data transformation, which differs significantly from adversarial training. First, we employ a simple pre-trained network (VGG11 in our experiment) to compute adversarial examples, thereby accelerating the training process. Second, we only utilize the low amplitude part of the adversarial examples for data transformation, which helps maintain the model's clean accuracy. We compare our method with several popular approaches (Wang et al., 2023b; Rade & Moosavi-Dezfooli, 2021; Xu et al., 2023) on CIFAR-10 using AutoAttack under $l_{\infty}$ perturbations ( $\epsilon = 8 / 255$ ). Wang et al. (2023b) generated training datasets using a diffusion model, followed by adversarial training on these datasets. For fairness, we compare our method with the version proposed by Wang et al. (2023b) that uses 50k generated images. Rade & Moosavi-Dezfooli (2021) used "helper example" to help the adversarial training. Xu et al. (2023) proposed Dynamics-Aware Robust Training, which encourages the decision boundary to adjust in a way that prioritizes increasing smaller margins. We use WideResNet-28-10 as the sub-model and ensemble eight sub-models without using generated data. The results in Table 10 indicate that, although the robustness of our method is not the highest, it maintains clean accuracy with almost no decline. Moreover, our method does not require additional generated data or adversarial training, and even with the need for ensembling, the training efficiency remains relatively high. This suggests a potential way to enhance robustness while minimizing the decrease in clean accuracy. + +To further illustrate our method's advantage, we conduct additional experiments to compare the "robustness-clean accuracy" trade-off curves of our method and AT under different settings. Fig. 8 compares the trade-off curves obtained by HAT Rade & Moosavi-Dezfooli (2021) with that of FDT-hybrid. For HAT, we fix $\gamma = 0.25$ and vary $\beta \in \{0.1, 0.5, 2.5, 3.0, 4.0, 5.0\}$ ( $\beta$ is the coefficient of the robustness loss, and higher $\beta$ indicates higher robust accuracy); for FDT-hybrid, we fix $\tau_{1} = 0.2$ and vary $\tau_{2} \in \{0.5, 0.7, 0.9, 1.1, 1.3, 1.5\}$ . We observe that HAT's robustness declines rapidly when the $\beta$ parameter is small (as increasing the clean accuracy). This result shows the significant advantage of our method when a clean accuracy above $90\%$ is required. + +![](images/6373b177569abf83ef1d4ee46c795b8921d8725a1ce03bdf96e33c8e603fc9a8.jpg) +Trade-off between clean accuracy and robust accuracy +Figure 8: It shows the trade-off curves on CIFAR-10. From left to right, we decrease the trade-off parameter $\tau_{2}$ for FDT, and decrease the trade-off parameter $\beta$ for HAT. + +Table 10: Clean accuracy and robust accuracy (\%) of different methods against AutoAttack under $l_{\infty}$ perturbations ( $\epsilon = 8/255$ ) on CIFAR-10. + +
CIFAR-10clean accuracyrobust accuracy
(Wang et al., 2023b)86.1555.71
(Rade & Moosavi-Dezfooli, 2021)84.9049.08
(Xu et al., 2023)85.5554.69
OURS (FDT-hybrid)93.7234.61
\ No newline at end of file diff --git a/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/images.zip b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..95829874f5870f4ce6a52feedf640418aaa2aeca --- /dev/null +++ b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53bf42e85414c10a31e7c455a66adc7a8b470a9aa608bee4b6c9e154f0cb538a +size 1222151 diff --git a/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/layout.json b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a5feda7876e202ffb20ae0afe9482b95329998a0 --- /dev/null +++ b/2025/To Tackle Adversarial Transferability_ A Novel Ensemble Training Method with Fourier Transformation/layout.json @@ -0,0 +1,19311 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 506, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 506, + 135 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 506, + 135 + ], + "type": "text", + "content": "TO TACKLE ADVERSARIAL TRANSFERABILITY: A NOVEL ENSEMBLE TRAINING METHOD WITH FOURIER TRANSFORMATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "spans": [ + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "text", + "content": "Wanlin Zhang" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "inline_equation", + "content": "^{1,3}" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "text", + "content": ", Weichen Lin" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "text", + "content": ", Ruomin Huang" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "text", + "content": ", Shihong Song" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "text", + "content": ", Hu Ding" + }, + { + "bbox": [ + 110, + 153, + 441, + 166 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "spans": [ + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "text", + "content": "School of Computer Science and Technology, University of Science and Technology of China \n" + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "text", + "content": "School of Artificial Intelligence and Data Science, University of Science and Technology of China \n" + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "text", + "content": "Shanghai Innovation Institute " + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 110, + 166, + 511, + 223 + ], + "type": "text", + "content": "Department of Computer Science, Duke University \n{ideven, linweichen, shihongsong}@mail.ustc.edu.cn \nruomin.huang@duke.edu,HUDING@ustc.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 251, + 334, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 251, + 334, + 262 + ], + "spans": [ + { + "bbox": [ + 276, + 251, + 334, + 262 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 275, + 470, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 275, + 470, + 418 + ], + "spans": [ + { + "bbox": [ + 140, + 275, + 470, + 418 + ], + "type": "text", + "content": "Ensemble methods are commonly used for enhancing robustness in machine learning. However, due to the \"transferability\" of adversarial examples, the performance of an ensemble model can be seriously affected even it contains a set of independently trained sub-models. To address this issue, we propose an efficient data transformation method based on a cute \"weakness allocation\" strategy, to diversify non-robust features. Our approach relies on a fine-grained analysis on the relation between non-robust features and adversarial attack directions. Moreover, our approach enjoys several other advantages, e.g., it does not require any communication between sub-models and the construction complexity is also quite low. We conduct a set of experiments to evaluate the performance of our proposed method and compare it with several popular baselines. The results suggest that our approach can achieve significantly improved robust accuracy over most existing ensemble methods, and meanwhile preserve high clean accuracy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 438, + 206, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 438, + 206, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 206, + 449 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 455, + 506, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 455, + 506, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 506, + 522 + ], + "type": "text", + "content": "In the past decade, Deep neural networks (DNNs) have achieved prominent performance on a broad range of real-world tasks (Goodfellow et al., 2016). However, a number of previous works show that DNNs are susceptible to carefully-crafted manipulations, where the manipulated data are called \"adversarial examples\" (Szegedy et al., 2014; Zhou et al., 2018; Heaven, 2019). The existence of adversarial examples severely impedes the application of DNNs in security-conscious scenarios, such as self-driving car (Rossolini et al., 2023; Zhu et al., 2021) and heath care (Newaz et al., 2020)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 527, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 594 + ], + "type": "text", + "content": "The adversarial training approach (Wang et al., 2023a; Madry et al., 2018) has gained significant attention due to its great effectiveness for defending against adversarial examples. However, the adversarial training approach often necessitates considerably high training time and large training dataset (Gowal et al., 2021; Carmon et al., 2019). Moreover, it has been observed that adversarial training is likely to incur certain decline in the accuracy on clean data, which also hinders the trained model to be applied for many practical tasks (Tsipras et al., 2018; Zhang et al., 2019)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 599, + 506, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 506, + 687 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 506, + 687 + ], + "type": "text", + "content": "Another important approach to enhance adversarial robustness is ensemble training (Tramér et al., 2018). But recent studies (Yang et al., 2025; Gao et al., 2022; Waseda et al., 2023) demonstrated that an adversarial example can attack different models even they are trained independently, and this phenomenon is the so-called \"transferability\" of adversarial examples. Hence, the strategy that simply integrates different models trained on the same original dataset is not sufficient to guarantee the overall robustness. To resolve this issue, different approaches have been proposed for maximizing the \"diversity\" among sub-models; in general, these approaches can be categorized into two classes: \"simultaneous training\" and \"individual training\" (Pang et al., 2019)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 693, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 693, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 693, + 504, + 715 + ], + "type": "text", + "content": "To reduce the similarity among sub-models, most existing \"simultaneous training\" methods attempt to incorporate some penalty during each epoch of parameter updates. Kariyappa & Qureshi (2019)" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 721, + 204, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 721, + 204, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 721, + 204, + 732 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 203 + ], + "type": "text", + "content": "proposed the \"Gradient Alignment Loss (GAL)\" method to minimize the gradient similarity between sub-models directly. Further, Yang et al. (2021) proposed the \"Transferability Reduced Smooth (TRS)\" method to improve GAL by adding a regularization term to increase the smoothness, as the models with a smoother loss function can reduce the \"transferability\" of attacks. Yang et al. (2020) aimed to isolate the adversarial vulnerability in each sub-model by distilling non-robust features, where the sub-models can then generate diverse outputs being resilient against transfer attacks. Despite their effectiveness for defending adversarial attacks, the simultaneous training methods often require a substantial amount of memory since all the sub-models need to be stored in the GPUs in the training stage, which could be prohibitive if the number of sub-models is not small (say, more than 10) and/or their sizes are large. Additionally, the information interaction in parallel training can also cause extra large communication cost." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 209, + 506, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 331 + ], + "type": "text", + "content": "Different from simultaneous training, most \"individual training\" methods train each sub-model independently on a randomly transformed version of the given training dataset (Pang et al., 2019; AprilPyone & Kiya, 2021). This \"random transformation\" strategy yields diverse datasets, and thus different sub-models trained on these datasets can present diverse performances when confronting an adversarial attack. The individual training approach has higher flexibility and also requires less GPU memory, because the sub-models do not need to be stored simultaneously. Since there is no communication between sub-models, individual training methods are more suitable for parallel training with multiple GPUs. But unfortunately, recent studies showed that the commonly used random transformations (e.g. image cropping and rescaling) are not that effective under adversarial attacks (Athalye et al., 2018). The major cause of suppressing the performance of individual training is that the \"transferability\" problem is still not well addressed." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 335, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 504, + 357 + ], + "type": "text", + "content": "Our contributions. To tackle the transferability obstacle, we consider developing a new data transformation method for ensemble training. Our main contributions are summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 103, + 363, + 504, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 363, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 103, + 363, + 504, + 419 + ], + "type": "text", + "content": "- First, we propose a fine-grained analysis on the relation between non-robust features and adversarial attack directions (Section 3). Being different from the previous analysis on non-robust features, our new analysis provides us the hints that are particularly useful to allocate the potential vulnerability directions to a set of sub-models, and therefore paves the way for designing our ensemble training strategy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 103, + 424, + 506, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 424, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 103, + 424, + 506, + 491 + ], + "type": "text", + "content": "- Second, we propose a data transform framework that can effectively promote the diversity of training data for robust ensemble training. The framework consists of two steps: \"frequency selection\" and \"frequency transformation\", where the frequency is based on the Fourier transformation on the images. We propose two efficient frequency transformations with low complexities on the identified non-robust features. The first one is based on simple random noise, and the second one is a cute \"targeted attack transformation\" that can modify the non-robust features more effectively (Section 4.2)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 103, + 496, + 506, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 496, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 103, + 496, + 506, + 563 + ], + "type": "text", + "content": "- Finally, we conduct a set of experiments to evaluate the adversarial robustness of our approach on several benchmark datasets under the widely used attack algorithms. We also compare our approach with several open-source ensemble methods, such as ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021). Compared with those baselines, the experimental results suggest that our proposed approach can significantly outperform most of them in robust accuracy and also preserve comparable high clean accuracy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 574, + 237, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 237, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 237, + 586 + ], + "type": "text", + "content": "1.1 OTHER RELATED WORKS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 594, + 506, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 661 + ], + "type": "text", + "content": "Data transformation for ensemble training. Guo et al. (2018) and Raff et al. (2019) proposed the transformations that preserve semantic information to reduce the impact of adversarial perturbation. AprilPyone & Kiya (2021) developed a training method that employs block-wise data transformations, where the input image is partitioned into blocks based on some private key. LINAC (Rusu et al., 2022) uses a predetermined random seed (private key) to initialize and train a DNN to encode the input data, serving as an encrypted input transformation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "content": "Adversarial attack from frequency perspective. Wang et al. (2020) explained that the model's vulnerability to small distortions may be due to its dependence on high-frequency features. Yucel et al. (2023) proposed a data augmentation method that reduces the reliance on high-frequency components, so as to improve model's robustness while maintaining clean accuracy. Maiya et al. (2021) and Bernhard et al. (2021) respectively showed that to fully understand the vulnerability, we should consider the distribution of the entire dataset with high and low frequencies." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 209, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 209, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 209, + 94 + ], + "type": "text", + "content": "2 PRELIMINARIES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": "Some notations. We consider the " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": "-classification task: " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\mathcal{X} \\to \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " is the input data space and " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\mathcal{Y} = \\{1,2,\\dots,k\\}" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " is the set of labels. A soft-classification model " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "f(\\cdot;\\beta)" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " maps each " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " to a vector " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "f(x;\\beta) \\in \\mathbb{R}^k" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " is the parameter vector that needs to be trained. Its associated hard-classification model is " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "F(x;\\beta) = \\arg \\max_i [f(x;\\beta)]_i" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "[\\cdot]_i" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " stands for the " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": "-th coordinate. The model " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " is usually equipped with a loss function " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\ell(f(x;\\beta), y)" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "y \\in \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": ", which is differentiable on " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " (e.g., cross-entropy loss). We refer to the accuracy on the original dataset as \"clean accuracy\" and the accuracy on adversarial examples as \"robust accuracy\". We denote the one-hot " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": "-dimensional vector that corresponds to the target label " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "inline_equation", + "content": "h(y)" + }, + { + "bbox": [ + 104, + 99, + 506, + 189 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 193, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 217 + ], + "type": "text", + "content": "Definition 2.1 (Ensemble Model) Let " + }, + { + "bbox": [ + 104, + 193, + 506, + 217 + ], + "type": "inline_equation", + "content": "\\mathcal{M} = \\{f_1, \\dots, f_M\\}" + }, + { + "bbox": [ + 104, + 193, + 506, + 217 + ], + "type": "text", + "content": " be a set of sub-models for a " + }, + { + "bbox": [ + 104, + 193, + 506, + 217 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 193, + 506, + 217 + ], + "type": "text", + "content": "-classification task. We build the ensemble model with the following function:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 224, + 218, + 505, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 218, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 224, + 218, + 505, + 249 + ], + "type": "interline_equation", + "content": "f _ {\\mathrm {E}} (x; \\beta_ {[ 1: M ]}) = \\frac {1}{M} \\sum_ {m \\in [ M ]} \\widehat {F} _ {m} (x; \\beta_ {m}), \\tag {1}", + "image_path": "36e88bb2baac363a72816987de3be3fc0aaca723b69c763415a11c6902a2c7cd.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\beta_{[1:M]} = \\{\\beta_m \\mid 1 \\leq m \\leq M\\}" + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\widehat{F}_m(x; \\beta_m)" + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "text", + "content": " is the one-hot " + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "text", + "content": "-dimensional vector of the hard-classification model " + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "inline_equation", + "content": "F_m(x; \\beta_m)" + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "inline_equation", + "content": "f_m" + }, + { + "bbox": [ + 104, + 255, + 504, + 282 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": "Definition 2.2 (Adversarial Attack and Targeted Attack) Given a model " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "f(\\cdot; \\beta)" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": " and an input " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "(x, y) \\in \\mathcal{X} \\times \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": ", the adversarial attack algorithm " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": " returns a perturbed data " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": " inside the " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "l_p" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": " ball of radius " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": ", which maximizes the loss function " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "\\ell(f(\\cdot; \\beta), \\cdot)" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": ", or minimizes the loss function " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "\\ell(f(\\cdot; \\beta), y_t)" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": " if given a target label " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "y_t \\neq y" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": ". For the latter one, we say it is a \"targeted attack from " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "y_t" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": "\". Usually we set " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "p = 2" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "inline_equation", + "content": "p = \\infty" + }, + { + "bbox": [ + 104, + 291, + 505, + 347 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "content": "As mentioned in Section 1, because our proposed approach is based on Fourier transform, we introduce several necessary notations below. Given an image " + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "inline_equation", + "content": "L \\times N" + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "content": ", the corresponding two-dimensional discrete Fourier transform can be written as: for any " + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "inline_equation", + "content": "0 \\leq u \\leq L - 1" + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "inline_equation", + "content": "0 \\leq v \\leq N - 1" + }, + { + "bbox": [ + 104, + 350, + 506, + 384 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 219, + 391, + 505, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 391, + 505, + 423 + ], + "spans": [ + { + "bbox": [ + 219, + 391, + 505, + 423 + ], + "type": "interline_equation", + "content": "\\tilde {x} [ u, v ] = \\sum_ {s = 0} ^ {L - 1} \\sum_ {t = 0} ^ {N - 1} x [ s, t ] \\cdot e ^ {- 2 \\mathrm {j} \\pi \\left(\\frac {u _ {s}}{L} + \\frac {v t}{N}\\right)}, \\tag {2}", + "image_path": "54c664b22dc0eb173862c8266dc8af7d432e26117ebf87d10ab206e0baad9724.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": "where “" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": "” denotes the imaginary unit, and “" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "\\tilde{x}[u,v]" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": "” is the entry in the " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": "-th column and " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": "-th row of the Fourier matrix " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "\\tilde{x}" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": " (“" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "x[s,t]" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": "” is defined similarly for the original image " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": "). The pixels of the image " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": " form the time domain, and the entries of " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "\\tilde{x}" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": " form the frequency domain. For a frequency " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": ", the amplitude is the absolute value " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "|\\tilde{x}[u,v]|" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": ". We call a frequency " + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 104, + 429, + 506, + 477 + ], + "type": "text", + "content": " as a frequency feature." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 483, + 462, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 462, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 462, + 496 + ], + "type": "text", + "content": "3 FINE-GRAINED ANALYSIS ON ENSEMBLE MODEL VULNERABILITY" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 504, + 506, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 506, + 605 + ], + "type": "text", + "content": "The previous work (Ilyas et al., 2019) categorizes the features learned by a model into robust and non-robust features. It shows that adversarial vulnerability is a natural consequence of the presence of highly predictive but non-robust features. Moreover, different models trained on the same dataset often have similar non-robust features, and therefore an adversarial example usually exhibits the \"transferability\" property among them. Several other works also presented detailed discussions on the impact of non-robust features (Benz et al., 2021; Springer et al., 2021). Following those studies, a natural idea for tackling the transferability issue is to ensure that the sub-models should have diverse non-robust features. In this section, we provide a fine-grained analysis on the vulnerability of ensemble models and then conclude two important hints for achieving this \"diversity\" goal." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "text", + "content": "The following definitions are inspired by (Ilyas et al., 2019). Note that different from the term \"feature\" used in their article, we use \"feature extractor\" instead in our paper, since \"feature\" will be particularly used for referring to image feature in time or frequency domain. Specifically, we define a \"feature extractor\" as a function that maps the input " + }, + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "text", + "content": " to a vector in " + }, + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^k" + }, + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "text", + "content": ". A model " + }, + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 609, + 505, + 687 + ], + "type": "text", + "content": " is composed of a set of different feature extractors, with each feature extractor focusing on distinct feature. The combination of outputs from these feature extractors forms the model's final output. We then further define the \"useful feature extractors\"." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 692, + 504, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 692, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 692, + 504, + 714 + ], + "type": "text", + "content": "Definition 3.1 (Useful feature extractor) For a given data distribution " + }, + { + "bbox": [ + 104, + 692, + 504, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\mathcal{X}\\times \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 692, + 504, + 714 + ], + "type": "text", + "content": ", a feature extractor " + }, + { + "bbox": [ + 104, + 692, + 504, + 714 + ], + "type": "inline_equation", + "content": "\\theta :\\mathcal{X}\\to \\mathbb{R}^k" + }, + { + "bbox": [ + 104, + 692, + 504, + 714 + ], + "type": "text", + "content": " is useful, if we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 245, + 713, + 505, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 713, + 505, + 736 + ], + "spans": [ + { + "bbox": [ + 245, + 713, + 505, + 736 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} [ h (y) ^ {\\top} \\theta (x) ] > \\frac {1}{k}. \\tag {3}", + "image_path": "0637e80601b691d16b65c19cf6b80fc2ffb94ff43420e8f61a6cf766e52d46e6.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "Recall that " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "h(y)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " is the one-hot " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "-dimensional vector of the label " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": ". Roughly speaking, the inequality (3) implies that the expected contribution of a useful feature extractor to the model's correct prediction is higher than the average contribution over all the " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " classes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": "Definition 3.2 (robust and non-robust feature extractor) We use " + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(x)" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": " to denote the adversarial example of a data item " + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": " as described in Definition 2.2. Let " + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": " be a useful feature extractor. (1) We say " + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": " is robust if the following condition holds for any " + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq k" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": "):" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 249, + 159, + 360, + 182 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 159, + 360, + 182 + ], + "spans": [ + { + "bbox": [ + 249, + 159, + 360, + 182 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {(x, y) \\sim \\mathcal {D} _ {i}} \\left[ \\theta (\\mathcal {A} (x)) \\right] _ {i} > \\frac {1}{k}", + "image_path": "9a35794c9b482062ca662630ecb9382d8de23d8c822e3cf0f656314e74e67926.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_i" + }, + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "text", + "content": "-th class data. We denote the set of these robust feature extractors as " + }, + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "inline_equation", + "content": "\\Theta_{R}" + }, + { + "bbox": [ + 104, + 185, + 504, + 198 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "text", + "content": "(2) The remaining useful feature extractors are non-robust. We assign these non-robust extractors to " + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "inline_equation", + "content": "k(k - 1)" + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "text", + "content": " sets: " + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "inline_equation", + "content": "\\{\\Theta_{i,j} \\mid 1 \\leq i \\neq j \\leq k\\}" + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "text", + "content": " as follows. Initially, all these " + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "inline_equation", + "content": "k(k - 1)" + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "text", + "content": " sets are empty. Then we go through all the non-robust feature extractors. For each non-robust " + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 202, + 506, + 246 + ], + "type": "text", + "content": ", there must exist at least an index \"i\" such that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 235, + 247, + 354, + 261 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 247, + 354, + 261 + ], + "spans": [ + { + "bbox": [ + 235, + 247, + 354, + 261 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {(x, y) \\sim \\mathcal {D} _ {i}} [ \\theta (\\mathcal {A} (x)) ] _ {i} \\leq 1 / k;", + "image_path": "865fd382fd4c316fdee8553bcf37dd6e83610a1bc72c7849d027e19ddf3b5484.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": "we let " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "j = \\arg \\max_{s} \\mathbb{E}_{(x,y) \\sim \\mathcal{D}_i}[\\theta(\\mathcal{A}(x))]_s" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " and assign " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\Theta_{i,j}" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " (note that " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " should be not equal to " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": ", or there are multiple indices achieving the maximum expectation and at least one is not equal to " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": ", since otherwise " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\sum_{s=1}^{k} \\mathbb{E}_{(x,y) \\sim \\mathcal{D}_i}[\\theta(\\mathcal{A}(x))]_s" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " will less than 1). Eventually, these " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "k(k-1)" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " sets are constructed, where each " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\Theta_{i,j}" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " contains the feature extractors that are not robust to the attack from " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 265, + 504, + 323 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 332, + 505, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 332, + 505, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 505, + 368 + ], + "type": "text", + "content": "Remark 3.3 Intuitively, if a feature extractor is robust, it should have the capability to preserve its contribution to the correct prediction even under perturbation. It is also worth noting that a non-robust feature extractor " + }, + { + "bbox": [ + 104, + 332, + 505, + 368 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 332, + 505, + 368 + ], + "type": "text", + "content": " could be assigned to multiple " + }, + { + "bbox": [ + 104, + 332, + 505, + 368 + ], + "type": "inline_equation", + "content": "\\Theta_{i,j}s" + }, + { + "bbox": [ + 104, + 332, + 505, + 368 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 376, + 505, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 505, + 410 + ], + "type": "text", + "content": "Assume we have a standardly trained model " + }, + { + "bbox": [ + 104, + 376, + 505, + 410 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 376, + 505, + 410 + ], + "type": "text", + "content": " consisting of a set of useful feature extractors, and we denote it as " + }, + { + "bbox": [ + 104, + 376, + 505, + 410 + ], + "type": "inline_equation", + "content": "\\Theta_f" + }, + { + "bbox": [ + 104, + 376, + 505, + 410 + ], + "type": "text", + "content": ". Each of them can be classified as robust or non-robust as Definition 3.2. Similar with the formulation proposed in (Ilyas et al., 2019), we can represent the model as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 185, + 415, + 505, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 415, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 185, + 415, + 505, + 449 + ], + "type": "interline_equation", + "content": "f (x) = \\sum_ {\\theta \\in \\Theta_ {R} \\cap \\Theta_ {f}} w _ {\\theta} \\theta (x) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} \\cap \\Theta_ {f}} w _ {\\theta} \\theta (x), \\tag {4}", + "image_path": "55e2592e18e0e48dbf3ae20d4f6d3a25df257a7d62217c79f8bd5e28de9a9e36.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 453, + 506, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 506, + 554 + ], + "type": "text", + "content": "where each " + }, + { + "bbox": [ + 104, + 453, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 453, + 506, + 554 + ], + "type": "text", + "content": " has a coefficient " + }, + { + "bbox": [ + 104, + 453, + 506, + 554 + ], + "type": "inline_equation", + "content": "w_{\\theta} \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 453, + 506, + 554 + ], + "type": "text", + "content": ". We then conduct our analysis based on Equation (4). Some recent works reveal that adversarial training method can obtain robust model through reducing the dependence on non-robust feature extractors (Allen-Zhu & Li, 2022; Tsipras et al., 2018). However, this strategy may cause certain downgrade performance on clean accuracy (because the non-robust feature extractors also contribute to obtaining correct prediction). Fortunately, we are able to avoid this dilemma in the context of ensemble training. Namely, we just need to keep the non-robust features as diverse as possible, instead of entirely eliminating the dependence on those non-robust feature extractors. To pave the way for realizing this goal, we introduce the definition of vulnerability of ensemble model below." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "content": "Definition 3.4 (Vulnerability of ensemble model) Suppose " + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{E}}" + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "content": " is an ensemble model as described in Definition 2.1, and its associated hard-classification model is denoted by " + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}" + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "inline_equation", + "content": "\\forall x" + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}(x) = \\arg \\max_{i}[f_{\\mathrm{E}}(x)]_{i}" + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "content": ". Given the data distribution " + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\mathcal{X} \\times \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "content": ", the vulnerability of " + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}" + }, + { + "bbox": [ + 104, + 562, + 505, + 597 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 190, + 601, + 505, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 601, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 190, + 601, + 505, + 622 + ], + "type": "interline_equation", + "content": "\\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\right\\} \\right], \\tag {5}", + "image_path": "766f544831d48509546beb7ab404ed49896faeee29935f01dcd968a8e66a4d50.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "inline_equation", + "content": "\\mathbb{I}(\\cdot)" + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "text", + "content": " represents the indicator function. Furthermore, for any target class " + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "text", + "content": ", we can define the vulnerability towards " + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "inline_equation", + "content": "\\mathrm{Vr}(F_{\\mathrm{E}}, y_{t}) = \\mathbb{E}_{(x,y) \\sim \\mathcal{D}}\\left[\\mathbb{I}\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_{t}\\}\\right]" + }, + { + "bbox": [ + 105, + 625, + 504, + 657 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "text", + "content": "The vulnerability of Definition 3.4 describes the success probability of an attack " + }, + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "text", + "content": " to the ensemble model " + }, + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}" + }, + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "text", + "content": ". We have the following key inequality, which indicates that " + }, + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "inline_equation", + "content": "\\operatorname{Vr}(F_{\\mathrm{E}})" + }, + { + "bbox": [ + 104, + 665, + 505, + 699 + ], + "type": "text", + "content": " is bounded by considering all the attack directions, i.e.," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 247, + 703, + 505, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 703, + 505, + 730 + ], + "spans": [ + { + "bbox": [ + 247, + 703, + 505, + 730 + ], + "type": "interline_equation", + "content": "\\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) \\leq \\sum_ {y _ {t} \\in \\mathcal {Y}} \\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right). \\tag {6}", + "image_path": "6c3df5b8adcbb0e4cc98bfc0585e2b6c67fb0069ebbd2e16c6476601eaf03933.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "spans": [ + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "content": "The proof of Inequality (6) is placed in Appendix A.1. Moreover, if " + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t" + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "content": ", there are at least " + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "inline_equation", + "content": "M / k" + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "content": " sub-models returning the wrong label " + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "inline_equation", + "content": "y_t" + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "content": " due to the pigeonhole principle. Namely, \" " + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "inline_equation", + "content": "\\sum_{m=1}^{M} \\mathbb{I}\\left([f_m(\\mathcal{A}(x))]_y < [f_m(\\mathcal{A}(x))]_{y_t}\\right) > \\frac{M}{k}" + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "content": "\" should be a necessary condition for successfully attacking from " + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "inline_equation", + "content": "y_t" + }, + { + "bbox": [ + 103, + 82, + 506, + 130 + ], + "type": "text", + "content": ". So it implies" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 151, + 135, + 505, + 167 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 135, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 151, + 135, + 505, + 167 + ], + "type": "interline_equation", + "content": "\\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right) \\leq \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left(\\sum_ {m = 1} ^ {M} \\mathbb {I} \\left([ f _ {m} (\\mathcal {A} (x)) ] _ {y} < [ f _ {m} (\\mathcal {A} (x)) ] _ {y _ {t}}\\right) > \\frac {M}{k}\\right) \\right]. \\tag {7}", + "image_path": "43b509e9ec5f2d27c7aa1ca7f5da0993c29126d7d62aa520b1b857c0073af139.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "spans": [ + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "content": "From the upper bound (6), we can decrease the total vulnerability by reducing " + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "inline_equation", + "content": "\\mathrm{Vr}(F_{\\mathrm{E}},y_t)" + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "content": ". Also, from (7) we know that " + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "inline_equation", + "content": "\\mathrm{Vr}(F_{\\mathrm{E}},y_{t})" + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "content": " can be reduced by decreasing the chance of “" + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "inline_equation", + "content": "[f_m(\\mathcal{A}(x))]_y < [f_m(\\mathcal{A}(x))]_{y_t}" + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "content": "” over " + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "inline_equation", + "content": "m\\in \\{1,2,\\dots ,M\\}" + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "content": ". According to the Equation (4), the inequality " + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "inline_equation", + "content": "\\left[f_m(\\mathcal{A}(x))\\right]_y < \\left[f_m(\\mathcal{A}(x))\\right]_{y_t}" + }, + { + "bbox": [ + 103, + 172, + 504, + 222 + ], + "type": "text", + "content": "” can be rewritten as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 227, + 505, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 227, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 113, + 227, + 505, + 262 + ], + "type": "interline_equation", + "content": "\\left[ \\sum_ {\\theta \\in \\Theta_ {R} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) \\right] _ {y} < \\left[ \\sum_ {\\theta \\in \\Theta_ {R} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) + \\sum_ {i, j = 1, i \\neq j} ^ {k} \\sum_ {\\theta \\in \\Theta_ {i, j} ^ {m}} w _ {\\theta} \\theta (\\mathcal {A} (x)) \\right] _ {y _ {t}}, \\tag {8}", + "image_path": "c8ba6bc4f9eed7b06110c9e4a4a9e8f6cca19f78f3305d46ef4a3fc4f0109ea8.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\Theta_R^m" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\Theta_{i,j}^{m}" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": " respectively denote the sets of robust and non-robust feature extractors for the sub-model " + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "f_{m}" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": ". Moreover, the set " + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\Theta_{y,y_t}^m" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": " should have relatively larger influence to the right-hand side of (8) than other feature extractor set " + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\Theta_{y,j}^{m}" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "j\\neq y_{t}" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": ", due to the outer operator “" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "[\\cdot ]_{y_t}" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": ". Therefore, we conclude our first hint as an intuition for reducing " + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\mathrm{Vr}(F_{\\mathrm{E}})" + }, + { + "bbox": [ + 104, + 267, + 506, + 316 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "text", + "content": "Hint (i): To decrease the vulnerability in the attack direction " + }, + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "text", + "content": " (i.e., each term " + }, + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "inline_equation", + "content": "\\mathrm{Vr}(F_{\\mathrm{E}},y_{t})" + }, + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "text", + "content": " in the upper bound of (6)), it is reasonable to decrease the influence from the non-robust feature extractors of " + }, + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "inline_equation", + "content": "\\Theta_{y,y_t}^m" + }, + { + "bbox": [ + 104, + 320, + 504, + 356 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 359, + 505, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 505, + 426 + ], + "type": "text", + "content": "In Hint (i), a major difference from the previous analysis (Ilyas et al., 2019; Allen-Zhu & Li, 2022) is that, we in particular relate each attack direction " + }, + { + "bbox": [ + 104, + 359, + 505, + 426 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 359, + 505, + 426 + ], + "type": "text", + "content": " to some specific non-robust feature extractors, where the benefit is that these correspondences can effectively help us to build the diverse ensemble model. Moreover, According to the principle of ensemble methods, as long as at least " + }, + { + "bbox": [ + 104, + 359, + 505, + 426 + ], + "type": "inline_equation", + "content": "M / 2 + 1" + }, + { + "bbox": [ + 104, + 359, + 505, + 426 + ], + "type": "text", + "content": " sub-models are not successfully attacked, the ensemble model will successfully defend against the attack. So we conclude the second hint that is also important for designing our approach." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "text", + "content": "Hint (ii): For each attack direction " + }, + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "text", + "content": ", we only need to consider manipulating the training data of " + }, + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "inline_equation", + "content": "M / 2 + 1" + }, + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "text", + "content": " sub-models instead of all the " + }, + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 430, + 506, + 453 + ], + "type": "text", + "content": " sub-models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 459, + 504, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 504, + 480 + ], + "type": "text", + "content": "Overall, the above Hint (i) & (ii) play the key roles for inspiring our data transformation method in Section 4." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 490, + 315, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 315, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 315, + 502 + ], + "type": "text", + "content": "4 OUR ENSEMBLE TRAINING METHOD" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 507, + 504, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 504, + 530 + ], + "type": "text", + "content": "We first introduce our model and high-level idea in Section 4.1, and then elaborate on the technical details for the data transformations in Section 4.2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 544, + 274, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 274, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 274, + 554 + ], + "type": "text", + "content": "4.1 OVERVIEW OF OUR FRAMEWORK" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "text", + "content": "Note that the feature extractors of a model depend on the given training data. Namely, any modification on the features of the training data can implicitly influence the model. Thus, in this section we follow the Hint (i) & (ii) of Section 3 to design an effective data transformation method. The transformation is expected to modify the features of the training data, so as to enhance the robustness of the trained ensemble model. We train a set of distinct sub-models on the transformed training data; these sub-models can be integrated into an ensemble model being robust against adversarial attacks, while preserving the clean accuracy of each sub-model as much as possible. We use “" + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "inline_equation", + "content": "\\pi_{m}" + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "text", + "content": "” to denote the transformation for the " + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "text", + "content": "-th sub-model, " + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "inline_equation", + "content": "1 \\leq m \\leq M" + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "text", + "content": ", and formulate the following problem by slightly modifying Definition 2.1 (replace " + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "text", + "content": " by the adversarial example " + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(x)" + }, + { + "bbox": [ + 104, + 563, + 504, + 664 + ], + "type": "text", + "content": " for each sub-model):" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 204, + 668, + 505, + 699 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 668, + 505, + 699 + ], + "spans": [ + { + "bbox": [ + 204, + 668, + 505, + 699 + ], + "type": "interline_equation", + "content": "\\min \\mathbb {E} _ {(x, y) \\sim \\mathcal {X} \\times \\mathcal {Y}} \\ell \\left(\\frac {1}{M} \\sum_ {m \\in [ M ]} \\widehat {F} _ {m} (\\mathcal {A} (x); \\beta_ {m}), y\\right) \\tag {9}", + "image_path": "61d85e705c6d229c4b7258be48bdad0351842ed6e9ae0c493e99127843c2cbcc.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "inline_equation", + "content": "\\beta_{m}" + }, + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "text", + "content": " is obtained by training on the transformed data, i.e., " + }, + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "inline_equation", + "content": "\\beta_{m} = \\operatorname*{argmin}_{\\beta}\\mathbb{E}_{(x,y)\\sim \\mathcal{X}\\times \\mathcal{Y}}\\ell (f_{m}(\\pi_{m}(x);\\beta),y)" + }, + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "inline_equation", + "content": "m\\in \\{1,2,\\dots ,M\\}" + }, + { + "bbox": [ + 104, + 703, + 504, + 734 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": "The major challenge for solving the above problem (9) is how to design a set of appropriate transformations " + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "inline_equation", + "content": "\\{\\pi_m\\mid 1\\leq m\\leq M\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": ", so that the obtained parameters " + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "inline_equation", + "content": "\\beta_{[1:M]}" + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": " can yield sufficiently diverse sub-models. To address this issue, we leverage the transformation from frequency domain to guide the non-robust features of each sub-model to be as diverse as possible. Specifically, we introduce a method called \"Frequency Domain Transformation (FDT)\" for constructing the set of diverse training datasets " + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "inline_equation", + "content": "\\{\\pi_1(\\mathcal{X}),\\pi_2(\\mathcal{X}),\\dots ,\\pi_M(\\mathcal{X})\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": ". FDT relies on a key \"weakness allocation\" strategy. Roughly speaking, the strategy aims to promote the diversity of the constructed datasets, and meanwhile preserve that the overall clean accuracy should not be sacrificed in the ensemble. The details are presented in the next section." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 194, + 306, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 306, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 306, + 205 + ], + "type": "text", + "content": "4.2 FREQUENCY DOMAIN TRANSFORMATION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 209, + 506, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 287 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 287 + ], + "type": "text", + "content": "Before performing the transformation, we need to select a set of non-robust features. In time domain, a simple observation is that an image feature is usually invariant under spatial translation, e.g., it can appear at different positions in images. This property causes the challenge for directly identifying and representing non-robust features in time domain. Thus we turn our attention to the frequency domain. Moreover, some previous studies on robust learning already revealed that robust and non-robust features are often deeply related to frequency domain (Wang et al., 2020; Bernhard et al., 2021; Maiya et al., 2021)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "content": "Amplitude based selection. To identify the non-robust frequencies, a straightforward idea is to test the robustness of each individual frequency and select the non-robust ones. Nevertheless, it may take a large computational cost since the number of frequencies is high (e.g., if the input image is " + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "content": ", the number of frequencies is also " + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "inline_equation", + "content": "64 \\times 64 \\approx 4 \\times 10^{3}" + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "content": "). We propose an easy-to-implement selection idea based on the amplitudes, since the amplitudes can be directly obtained via the Fourier transformation with low complexity. According to the previous research (Ilyas et al., 2019; Benz et al., 2021; Springer et al., 2021), a feature can be regarded as \"robust\" if it cannot be easily manipulated by small perturbations. We observe that high-amplitude frequency features usually dominate the ground truth of an image. Figure 4 in our Appendix illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image. So in our following approach, we maintain high-amplitude frequency features as \"robust features\", and select the frequencies with low amplitudes (by setting a threshold \"" + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "content": "\" to transform. Moreover, we can conveniently observe the performance changing through varying the threshold " + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 292, + 506, + 469 + ], + "type": "text", + "content": " in our experiment. Figure1 illustrates the amplitude-based selection." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 485, + 304, + 550 + ], + "blocks": [ + { + "bbox": [ + 106, + 485, + 304, + 550 + ], + "lines": [ + { + "bbox": [ + 106, + 485, + 304, + 550 + ], + "spans": [ + { + "bbox": [ + 106, + 485, + 304, + 550 + ], + "type": "image", + "image_path": "0d7e14b81dd688209bf8bee950ed47a465fdf9705d30a2b85cd269279036a2fb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 561, + 308, + 639 + ], + "lines": [ + { + "bbox": [ + 104, + 561, + 308, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 308, + 639 + ], + "type": "text", + "content": "Figure 1: We use a " + }, + { + "bbox": [ + 104, + 561, + 308, + 639 + ], + "type": "inline_equation", + "content": "5 \\times 5" + }, + { + "bbox": [ + 104, + 561, + 308, + 639 + ], + "type": "text", + "content": " image as a toy example, where the intensity of the color indicates the magnitude of the amplitude. In our amplitude-based selection, we retain the high-amplitude frequencies (i.e., the darker regions) and perform data transformations on the low-amplitude frequencies (i.e., the white regions)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 312, + 473, + 506, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 473, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 312, + 473, + 506, + 649 + ], + "type": "text", + "content": "Following our frequency selection, we propose two transformation methods for promoting the diversity by using the identified non-robust features. Our first approach is from a straightforward idea, which is just to replace the non-robust features by random noise (due to the space limit, we leave the details to Appendix C). This method is very easy to implement in practice. Though it can achieve certain degree of improvement upon previous ensemble training methods, the performance is still not very promising (as shown in our experiments). To further improve the effectiveness, we propose a more sophisticated approach called \"targetedattack transformation\", which constructs a set of different \"substitute\" features through attack" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 648, + 505, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 505, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 505, + 672 + ], + "type": "text", + "content": "ing the images to different targeted classes, and then use them to replace the selected non-robust frequencies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "Targeted-attack transformation: We briefly explain our intuition first. It was shown that adversarial attacks have the capability to manipulate non-robust features (Ilyas et al., 2019; Yang et al., 2020). In particular, a targeted attack as introduced in Definition 2.2, aims at modifying non-robust features that are associated with a specific target label. For instance, let us consider a data point " + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": " in the original dataset " + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{X} \\times \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "; we set the target label as " + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": " and obtain the corresponding adversarial example" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "x^{\\prime}" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "x^{\\prime}" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": " contains the modified non-robust features that are associated with " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": "). When training a model using " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "(x^{\\prime}, y)" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": ", intuitively it can be viewed as an \"immunization\" for defending the attack from " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": "; and consequently, the chance that obtaining the wrong label " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": " for the data with label " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": " decreases. In other words, it becomes more difficult to attack the images with label " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 82, + 506, + 138 + ], + "type": "text", + "content": " than to the other classes. We call the modified non-robust feature as a \"substitute\" feature derived by the targeted attack." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "content": "Motivated by this observation, we can construct different transformations by using " + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "inline_equation", + "content": "k \\times (k - 1)" + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "content": " targeted attacks (since each label can be attacked to be the other " + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "inline_equation", + "content": "k - 1" + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "content": " labels); these attacks can yield different substitute features, and then we use these features to replace the corresponding non-robust features in the original dataset (based on Hint (i) in Section 3); finally, the " + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "content": " transformed datasets are obtained via an allocation algorithm, where each substitute feature is captured by at least " + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "inline_equation", + "content": "M / 2 + 1" + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "content": " datasets (based on Hint (ii) in Section 3). Overall, due to the completeness of the " + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "inline_equation", + "content": "k \\times (k - 1)" + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "content": " targeted attacks, the " + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 143, + 506, + 232 + ], + "type": "text", + "content": " sub-models trained on those datasets can guarantee the robustness of the final ensemble solution. We introduce some definitions for our transformation first." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": "Definition 4.1 (Strengthen a dataset) Let " + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "y_{1} \\neq y_{2} \\in \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": ". If a given training dataset " + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": " contains at least one adversarial example who has the original label " + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "y_{1}" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": " but is misclassified as " + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "y_{2}" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": ", we say that " + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": " has been strengthened by the attack direction from " + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "y_{1}" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "y_{2}" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": " (\"" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "inline_equation", + "content": "\\overrightarrow{y_{1}y_{2}}" + }, + { + "bbox": [ + 104, + 241, + 505, + 276 + ], + "type": "text", + "content": "-direction\" for short)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": "In other words, if " + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": " is not strengthened in " + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "inline_equation", + "content": "\\overrightarrow{y_1y_2}" + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": "-direction, the model trained on " + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": " is more likely to be fragile to the targeted attacks from " + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "inline_equation", + "content": "y_1" + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "inline_equation", + "content": "y_2" + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": ". Also, the dataset " + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": " may have not been strengthened in multiple different directions. So we define its \"weakness set\" " + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "inline_equation", + "content": "\\mathcal{W} = \\{\\overrightarrow{y_1y_2} \\mid 1 \\leq y_1, y_2 \\leq k, y_1 \\neq y_2, \\text{ and } P \\text{ has not been strengthened in } \\overrightarrow{y_1y_2} \\text{-direction}\\}" + }, + { + "bbox": [ + 104, + 285, + 504, + 331 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "text", + "content": "Definition 4.2 (Diversity of weakness sets) Given " + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "text", + "content": " datasets " + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "inline_equation", + "content": "\\{P_1, P_2, \\dots, P_M\\}" + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "text", + "content": " with their corresponding weakness sets " + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{W}_1, \\mathcal{W}_2, \\dots, \\mathcal{W}_M\\}" + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "text", + "content": ", we define their diversity:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 184, + 368, + 426, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 368, + 426, + 396 + ], + "spans": [ + { + "bbox": [ + 184, + 368, + 426, + 396 + ], + "type": "interline_equation", + "content": "\\boldsymbol {D i v} (P _ {1}, P _ {2}, \\dots , P _ {M}) = 1 - \\frac {| \\mathcal {W} _ {1} \\cap \\mathcal {W} _ {2} \\cap \\cdots \\cap \\mathcal {W} _ {M} |}{\\max \\{| \\mathcal {W} _ {1} | , | \\mathcal {W} _ {2} | , \\cdots , | \\mathcal {W} _ {M} | \\}}.", + "image_path": "7a16e866fec9c441c334c30c068715868927246340c8d57f981243f045a301bb.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 406, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 504, + 462 + ], + "type": "text", + "content": "It is easy to see that the higher the value " + }, + { + "bbox": [ + 104, + 406, + 504, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{Div}(P_1, P_2, \\dots, P_M)" + }, + { + "bbox": [ + 104, + 406, + 504, + 462 + ], + "type": "text", + "content": ", the more diverse the corresponding weakness sets. A higher diversity suggests that the vulnerabilities of the " + }, + { + "bbox": [ + 104, + 406, + 504, + 462 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 406, + 504, + 462 + ], + "type": "text", + "content": " sub-models trained on those datasets are more likely to be different. To achieve a nice performance in terms of both accuracy and robustness, we need to take account of the diversity function \"Div\" for designing the transformations. The basic principle is:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 467, + 504, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 504, + 523 + ], + "type": "text", + "content": "On the one hand, our transformed datasets should have a sufficiently large number of diverse substitute features, so that one adversarial attack cannot easily capture more than half of the " + }, + { + "bbox": [ + 104, + 467, + 504, + 523 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 467, + 504, + 523 + ], + "type": "text", + "content": " sub-models. On the other hand, the datasets should also maintain the major information of the original input as much as possible, since otherwise the clean accuracy may decline due to the added substitute features." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "text", + "content": "To provide an appropriate trade-off, we propose the following constrained optimization objective: let " + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "inline_equation", + "content": "\\mathbb{C}" + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "text", + "content": " be the set of all the " + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "inline_equation", + "content": "\\binom{M}{\\lceil M/2 \\rceil}" + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "text", + "content": " combinations of " + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "inline_equation", + "content": "\\lceil M/2 \\rceil" + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "text", + "content": "-size subsets from " + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "inline_equation", + "content": "\\{1, 2, \\dots, M\\}" + }, + { + "bbox": [ + 104, + 527, + 504, + 553 + ], + "type": "text", + "content": ", and then" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 243, + 553, + 504, + 570 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 553, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 243, + 553, + 504, + 570 + ], + "type": "interline_equation", + "content": "\\max _ {P _ {1}, P _ {2}, \\dots , P _ {M}} \\quad \\min \\left\\{\\left| \\mathcal {W} _ {1} \\right|, \\left| \\mathcal {W} _ {2} \\right|, \\dots , \\left| \\mathcal {W} _ {M} \\right| \\right\\} \\tag {10}", + "image_path": "b7f06af8d9b1ae9e2d6d65adf81c9b0171cee26857f5cee7f248411c63ce7670.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 165, + 573, + 504, + 587 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 573, + 504, + 587 + ], + "spans": [ + { + "bbox": [ + 165, + 573, + 504, + 587 + ], + "type": "interline_equation", + "content": "\\mathrm {s . t .} \\forall \\left\\{i _ {1}, i _ {2}, \\dots , i _ {\\lceil M / 2 \\rceil} \\right\\} \\in \\mathbb {C}, \\quad \\boldsymbol {D i v} \\left(P _ {i _ {1}}, P _ {i _ {2}}, \\dots , P _ {i _ {\\lceil M / 2 \\rceil}}\\right) = 1. \\tag {11}", + "image_path": "302c0187cf9593acb02d6a9c19721d4f2549bd700ef1fe12461b16c7380eec24.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 590, + 506, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 679 + ], + "type": "text", + "content": "We maximize the objective function of (10) because we want to minimize the modification degree for each transformed dataset. Intuitively, a large weakness set indicates that the corresponding dataset is not changed significantly by the transformation, and thus the clean accuracy is likely to be well preserved. The constraint (11) guarantees that any " + }, + { + "bbox": [ + 104, + 590, + 506, + 679 + ], + "type": "inline_equation", + "content": "\\lceil M/2\\rceil" + }, + { + "bbox": [ + 104, + 590, + 506, + 679 + ], + "type": "text", + "content": " datasets have the intersection " + }, + { + "bbox": [ + 104, + 590, + 506, + 679 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{i_1} \\cap \\mathcal{W}_{i_2} \\cap \\dots \\cap \\mathcal{W}_{i_{\\lceil M/2 \\rceil}} = \\emptyset" + }, + { + "bbox": [ + 104, + 590, + 506, + 679 + ], + "type": "text", + "content": ", that is, they do not share any common direction. Consequently, the ensemble solution should be robust to any attack direction. To achieve this twofold goal, we design an efficient allocation strategy together with an attack-guided transformation on the training data. Specifically, the procedure consists of the following two stages." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "text", + "content": "Stage (1): allocating the weakness sets to the sub-models. For each " + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\overrightarrow{y_1y_2}" + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "text", + "content": "-direction, " + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "inline_equation", + "content": "1 \\leq y_1, y_2 \\leq k" + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "text", + "content": ", there are at most " + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\lceil \\frac{M}{2} \\rceil - 1" + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "text", + "content": " sets that contain this direction (due to the constraint (11)), so the sum " + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\sum_{1 \\leq i \\leq M} |\\mathcal{W}_i|" + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "text", + "content": " is no larger than " + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "inline_equation", + "content": "k(k - 1) * (\\lceil \\frac{M}{2} \\rceil - 1)" + }, + { + "bbox": [ + 104, + 684, + 505, + 733 + ], + "type": "text", + "content": ". Therefore, the maximum value of Eq (10) is no larger than" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 249, + 79, + 505, + 102 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 79, + 505, + 102 + ], + "spans": [ + { + "bbox": [ + 249, + 79, + 505, + 102 + ], + "type": "interline_equation", + "content": "k (k - 1) * \\left(\\left\\lceil \\frac {M}{2} \\right\\rceil - 1\\right) / M \\tag {12}", + "image_path": "e2123420f89f3263b529b0c18071bba9134a4433a5fc0a099a87327e6e481c45.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "text", + "content": "based on the pigeonhole principle. We assign the total " + }, + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "inline_equation", + "content": "k(k - 1)*\\left(\\lceil \\frac{M}{2}\\rceil -1\\right)" + }, + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "text", + "content": " directions (each direction is duplicated to be " + }, + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\lceil \\frac{M}{2}\\rceil -1" + }, + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "text", + "content": " copies) to " + }, + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 104, + 506, + 140 + ], + "type": "text", + "content": " sets in a round-robin way, where the number of directions assigned to each set is no larger than the upper bound (12). Please refer to Figure 2 for an example." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 149, + 504, + 281 + ], + "blocks": [ + { + "bbox": [ + 106, + 149, + 504, + 281 + ], + "lines": [ + { + "bbox": [ + 106, + 149, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 106, + 149, + 504, + 281 + ], + "type": "image", + "image_path": "81da266492f525e16623c8d9c4838cae2ed0700f7989ac6d5225781028064ca7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 117, + 301, + 488, + 314 + ], + "lines": [ + { + "bbox": [ + 117, + 301, + 488, + 314 + ], + "spans": [ + { + "bbox": [ + 117, + 301, + 488, + 314 + ], + "type": "text", + "content": "Figure 2: Assign the attack directions to five sub-models for a three-class classification task." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": "Stage (2): constructing the new datasets. Following the allocation, we transform the original dataset, denoted by " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{ori}}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ", to align with the assigned weakness sets for the " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " sub-models correspondingly. Using the same notations of Definition 4.2, we denote the waiting-to-construct dataset for the " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": "-th sub-model as " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "P_{m}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " (which is initialized to be " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\emptyset" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": "), " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "1 \\leq m \\leq M" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ". First, we divide " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{ori}}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " subsets " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "C_1, C_2, \\dots, C_k" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ", where each " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "C_j" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " corresponds to the label " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "1 \\leq j \\leq k" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": "; further, each " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "C_j" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " is equally partitioned to " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "k - 1" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " disjoint parts " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\{C_{j,1}, C_{j,2}, \\dots, C_{j,k - 1}\\}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " at random. For each data " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "(x,j)" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "C_{j,i}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ", we attack it from " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " (let " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "h = i + j \\mod k" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ") to obtain the adversarial perturbation; then we only substitute the low-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " with the perturbation, and other frequencies (which have their amplitudes higher than the aforementioned threshold " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ") remain unchanged. We denote the new dataset as " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "C_{j,i}'" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ". Finally, we add " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "C_{j,i}'" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "P_{m}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " if the " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "i \\overrightarrow{h}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": "-direction is not in the weakness set " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_m" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ". From the construction method of the weakness sets, we know that the " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "i \\overrightarrow{h}" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": "-direction can appear in at most " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\lceil \\frac{M}{2} \\rceil - 1" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " weakness sets. So, the set " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "C_{i,j}'" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " can be added to at least " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\lceil \\frac{M}{2} \\rceil" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " different " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "P_m s" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": ". Consequently, the completeness for defending the " + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "inline_equation", + "content": "k(k - 1)" + }, + { + "bbox": [ + 104, + 327, + 506, + 501 + ], + "type": "text", + "content": " attack directions can be guaranteed, i.e., the constraint (11) is satisfied. Figure 3 shows the schematic diagram of the construction process, and the full details are shown in Appendix D." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 144, + 509, + 466, + 658 + ], + "blocks": [ + { + "bbox": [ + 144, + 509, + 466, + 658 + ], + "lines": [ + { + "bbox": [ + 144, + 509, + 466, + 658 + ], + "spans": [ + { + "bbox": [ + 144, + 509, + 466, + 658 + ], + "type": "image", + "image_path": "59d02788a1cae134e8012c9728787df7de52ced148d3576dd7f45ebf2927d6cd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "text", + "content": "Figure 3: A schematic diagram of the construction process. In the allocation stage, each " + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "inline_equation", + "content": "C_{j,i}^{\\prime}" + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "text", + "content": " is added to " + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "inline_equation", + "content": "P_{m}" + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "text", + "content": " if the " + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "inline_equation", + "content": "i \\vec{h}" + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "text", + "content": "-direction is not in the weakness set " + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{m}" + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "inline_equation", + "content": "h = i + j \\mod k" + }, + { + "bbox": [ + 104, + 667, + 504, + 696 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "Remark 4.3 We are aware of some previous robust learning approaches that also depend on data modification (Allen-Zhu & Li, 2022; Tsipras et al., 2018). But their approaches usually tend to" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "completely eliminate non-robust features. Our method is quite different, where the goal is to leverage the carefully selected non-robust features to weaken the transferability among sub-models. For each sub-model, we only modify the non-robust features corresponding to certain directions, rather than all non-robust features, and therefore the modification yields relatively lower impact on clean accuracy. Moreover, we partition each class " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "C_j" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "k - 1" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " subsets " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\{C_{j,1}, C_{j,2}, \\dots, C_{j,k - 1}\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": ", with each subset being attacked to a specified class. This step eliminates the need to attack each data point across all classes, thereby reducing the computational complexity of constructing the new datasets." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 175, + 201, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 201, + 188 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 201, + 188 + ], + "type": "text", + "content": "5 EXPERIMENTS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "content": "We conduct our experiments on the widely used image datasets CIFAR-10, CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). As for the baselines, we reproduce the existing ensemble models including ADP (Pang et al., 2019), GAL (Kariyappa & Qureshi, 2019), DVERGE (Yang et al., 2020), and TRS (Yang et al., 2021), with their released codes and recommended hyperparameter settings. As for our approach, \"FDT-random\" and \"FDT-target\" respectively denote the methods utilizing random noise based transformation and target-attack transformation; \"FDT-hybrid\" represents the method that combines both, that is, we set two frequency selection thresholds " + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\tau_{1} < \\tau_{2}" + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "content": "), and perform random and target-attack transformations on the frequencies less than " + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "content": " and the frequencies between " + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 192, + 506, + 304 + ], + "type": "text", + "content": ", respectively (due to the space limit, more details are shown in Appendix E). Our code will be available at https://github.com/ideven123/FDT." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 308, + 506, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 397 + ], + "type": "text", + "content": "We train each sub-model based on ResNet-20 (He et al., 2016) and use Adam optimizer (Kingma & Ba, 2015) with an initial learning rate of 0.001 for 200 epochs. To further test their performance on neural network with larger scale, we also use WideResNet28-10 (Zagoruyko & Komodakis, 2016) to train the sub-models and the results are placed in our supplement. All the experiments are implemented with PyTorch (Paszke et al., 2017) on a single NVIDIA GeForce RTX 3090 with 24GB of memory and 1TB of storage. We assess the performance of our models through 5 repeated runs and compute error bars. Utilizing the numpy library, we calculate the standard deviation and subsequently derive the standard error of the mean (SEM)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": "Varying the number of sub-models. We take the ResNet-20 model trained on CIFAR-10 as an example and test the performance of FDT with different numbers of sub-models in the ensemble. In this experiment, we set the frequency selection threshold " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": " to be 0.2 and " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": " to be 0.8. Then we evaluate the performance of FDT-hybrid under FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), and AutoAttack (AA) (Croce & Hein, 2020) attack methods with " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": " perturbations of size " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.02" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": ". The results in Table 1 indicate that our clean accuracy has relatively smaller change as the number increases, while the robust accuracy can be substantially improved from 3 to 20 sub-models." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 108, + 502, + 501, + 564 + ], + "blocks": [ + { + "bbox": [ + 130, + 491, + 479, + 502 + ], + "lines": [ + { + "bbox": [ + 130, + 491, + 479, + 502 + ], + "spans": [ + { + "bbox": [ + 130, + 491, + 479, + 502 + ], + "type": "text", + "content": "Table 1: Performance of FDT-hybrid with different sub-model numbers on CIFAR-10." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 502, + 501, + 564 + ], + "lines": [ + { + "bbox": [ + 108, + 502, + 501, + 564 + ], + "spans": [ + { + "bbox": [ + 108, + 502, + 501, + 564 + ], + "type": "table", + "html": "
Sub-model numbers3581220
Clean accuracy90.20 ± 0.0390.75±0.0391.35±0.0591.51 ± 0.0691.86±0.07
FGSM (ε=0.02)58.04 ± 0.1361.66± 0.1562.41 ± 0.1163.96 ± 0.1264.27 ± 0.14
PGD (ε=0.02)20.01 ± 0.0426.10± 0.0729.20± 0.0529.78 ± 0.0829.71± 0.07
AutoAttack (ε=0.02)19.42± 0.0425.37± 0.0527.33± 0.0428.12± 0.0728.92± 0.07
", + "image_path": "719405a5d2d8cbb9f19328bc5857d634c80b85130360abd317e4feadec2be521.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "text", + "content": "Results for white-box attack. To maintain consistency with the baseline ensemble methods from the literature, we ensemble three ResNet-20 sub-models here and evaluate the robust accuracy using " + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.01" + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.02" + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "text", + "content": ". In this experiment, we set the frequency selection threshold " + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "text", + "content": " to be 0.2 and " + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 571, + 506, + 672 + ], + "type": "text", + "content": " to be 0.8. In the white-box attack setting, the attacker has full knowledge of the models, including model parameters, architecture, and ensemble training strategy. To evaluate the adversarial robustness of the ensemble, we conduct the following white-box attacks: PGD, FGSM, BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), C&W (Carlini & Wagner, 2017) and AutoAttack (AA). The attacks are implemented using AdverTorch (Ding et al., 2019). We take the robust and clean accuracies, and average training time per epoch as the evaluation metrics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "Table 2 presents the obtained robust accuracies of the baseline ensemble methods on CIFAR-10 and CIFAR-100. In addition, we show the average training time per epoch of different ensemble methods. The experimental results suggest that our FDT-random method can achieve higher adversarial robustness over other baselines on both CIFAR-10 and CIFAR-100, with the training time only higher than ADP (and much lower than other baselines). Furthermore, the FDT-hybrid ensemble method" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 113, + 134, + 496, + 438 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "type": "text", + "content": "Table 2: Robust and Clean Accuracy (\\%) and average training time of different ensemble methods against white-box attacks on CIFAR-10 and CIFAR-100. “" + }, + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "type": "text", + "content": "” and “" + }, + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "type": "text", + "content": "” stand for the " + }, + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 89, + 504, + 133 + ], + "type": "text", + "content": " norm of the adversarial perturbation and the coefficient of C&W attack respectively. The TRS results are reported in the original paper Yang et al. (2021), with “-” indicating results not provided." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 113, + 134, + 496, + 438 + ], + "lines": [ + { + "bbox": [ + 113, + 134, + 496, + 438 + ], + "spans": [ + { + "bbox": [ + 113, + 134, + 496, + 438 + ], + "type": "table", + "html": "
CIFAR-10ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy91.8491.8191.37-89.88±0.0290.16±0.0490.20±0.03
FGSM (ε=0.01)59.4844.9770.05-66.96±0.1272.88±0.1272.24±0.12
FGSM (ε=0.02)53.3830.5856.3344.246.28±0.1055.54±0.0958.04±0.13
PGD (ε=0.01)14.451.3540.5550.545.42±0.0946.58±0.0748.48±0.09
PGD (ε=0.02)2.950.3411.4915.112.24±0.0315.08±0.0520.01±0.04
BIM (ε=0.01)14.151.3740.5150.645.24±0.0346.86±0.0448.57±0.05
BIM (ε=0.02)3.010.2710.6515.811.68±0.0314.86±0.0316.63±0.02
MIM (ε=0.01)20.382.0544.7451.547.73±0.0549.97±0.0651.50±0.07
MIM (ε=0.02)5.110.6914.7617.215.14±0.0418.27±0.0220.09±0.03
AA (ε=0.01)1.800.0043.34-46.09±0.0948.83±0.0851.56±0.08
AA (ε=0.02)0.000.0013.72-9.38±0.0515.70±0.0519.42±0.04
C&W (λ=0.1)20.9631.5752.3558.145.01±0.1055.48±0.1056.08±0.11
CIFAR-100ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
Clean accuracy67.0467.7066.16-66.29±0.1167.64±0.0866.70±0.09
FGSM (ε=0.01)17.8216.8933.94-35.42±0.1240.46±0.1439.85±0.14
FGSM (ε=0.02)10.537.8026.6119.322.40±0.0532.30±0.0630.27±0.08
PGD (ε=0.01)0.800.1114.6223.021.54±0.0622.19±0.0524.93±0.05
PGD (ε=0.02)0.010.024.255.34.84±0.037.27±0.028.63±0.03
BIM (ε=0.01)0.680.2314.8522.921.10±0.0922.39±0.0724.35±0.06
BIM (ε=0.02)0.020.04.075.44.80±0.046.80±0.058.40±0.05
MIM (ε=0.01)0.780.1216.8223.423.14±0.0624.68±0.1027.09±0.09
MIM (ε=0.02)0.010.025.316.26.47±0.038.87±0.0410.19±0.05
AA (ε=0.01)0.010.0011.23-16.02±0.0916.03±0.0916.41±0.12
AA (ε=0.02)0.000.002.72-3.12±0.044.54±0.055.47±0.07
C&W (λ=0.1)0.743.7010.6826.925.07±0.1029.43±0.0930.66±0.13
", + "image_path": "55e1a9fcdd1e91daee42c95bed2f183be402b94549e910b3159a1ea61cc33fe0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 126, + 441, + 481, + 481 + ], + "blocks": [ + { + "bbox": [ + 126, + 441, + 481, + 481 + ], + "lines": [ + { + "bbox": [ + 126, + 441, + 481, + 481 + ], + "spans": [ + { + "bbox": [ + 126, + 441, + 481, + 481 + ], + "type": "table", + "html": "
Time (s)ADPGALDVERGETRSFDT-randomFDT-targetFDT-hybrid
CIFAR-1030.1569.92134.33350.4237.04108.22114.23
CIFAR-10030.3469.71129.25344.9237.12108.43113.87
", + "image_path": "33367032556425bfd27a7ff1d5b581121cbba297437e9fd41abecae725ac9758.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 502, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 504, + 525 + ], + "type": "text", + "content": "achieves an even better robustness than FDT-random, though its running time is higher since it needs to perform the target-attack transformation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 530, + 506, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 506, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 506, + 619 + ], + "type": "text", + "content": "Summary on other experimental results placed in Appendix F. We also conduct the experiments to examine the performance of FDT under black-box attack, and assess the transferability of our method across various sub-models. The results indicate the competitive robustness of our method in defending against black-box attacks. Then, we evaluate the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold " + }, + { + "bbox": [ + 104, + 530, + 506, + 619 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 530, + 506, + 619 + ], + "type": "text", + "content": ". The result shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of " + }, + { + "bbox": [ + 104, + 530, + 506, + 619 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 530, + 506, + 619 + ], + "type": "text", + "content": ". Moreover, we included some ablation studies on datasets and model architectures. These experiments demonstrate that our method performs the best among ensemble-based baseline methods." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 633, + 303, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 633, + 303, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 303, + 645 + ], + "type": "text", + "content": "6 CONCLUSION AND FUTURE WORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "In this paper, we present a novel data transformation approach to improve the robustness of ensemble models against adversarial attacks. By leveraging the frequency based features and strategically allocating adversarial examples, we demonstrate the effectiveness of our method in enhancing adversarial robustness while maintaining high accuracy on clean data. As for the future work, we can consider other types of transformation methods (e.g., beyond using frequency) to improve the ensemble robustness. Also, it is interesting to consider more complicated scenarios for ensemble training, such as federated learning with concerning the privacy issue." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 102, + 507, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 102, + 507, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 507, + 148 + ], + "type": "text", + "content": "The authors would like to thank the reviewers for their constructive comments and suggestions. This work was partially supported by the National Natural Science Foundation of China (No. 62272432 and No. 62432016), the National Key Research and Development Program of China (No. 2021YFA1000900), and the Natural Science Foundation of Anhui Province (No. 2208085MF163)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 163, + 176, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 176, + 175 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 176, + 175 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 180, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 180, + 505, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 505, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 505, + 216 + ], + "type": "text", + "content": "Zeyuan Allen-Zhu and Yanzhi Li. Feature purification: How adversarial training performs robust deep learning. In 2021 IEEE 62nd Annual Symposium on Foundations of Computer Science (FOCS), pp. 977-988. IEEE, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 222, + 506, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 222, + 506, + 257 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 506, + 257 + ], + "type": "text", + "content": "MaungMaung AprilPyone and Hitoshi Kiya. Block-wise image transformation with secret key for adversarially robust defense. IEEE Transactions on Information Forensics and Security, 16: 2709-2723, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 263, + 505, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 505, + 298 + ], + "type": "text", + "content": "Anish Athalye, Nicholas Carlini, and David Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International conference on machine learning, pp. 274-283. PMLR, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 506, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 350 + ], + "type": "text", + "content": "Philipp Benz, Chaoning Zhang, and In So Kweon. Batch normalization increases adversarial vulnerability and decreases adversarial transferability: A non-robust feature perspective. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7818-7827, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 357, + 506, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 506, + 402 + ], + "type": "text", + "content": "Rémi Bernhard, Pierre-Alain Moëllic, Martial Mermillod, Yannick Bourrier, Romain Cohendet, Miguel Solinas, and Marina Reyboz. Impact of spatial frequency based constraints on adversarial robustness. In 2021 International Joint Conference on Neural Networks (IJCNN), pp. 1-8. IEEE, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 410, + 505, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 505, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 505, + 434 + ], + "type": "text", + "content": "Nicholas Carlini and David Wagner. Towards evaluating the robustness of neural networks. In 2017, IEEE symposium on security and privacy (sp), pp. 39-57. IEEE, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 441, + 506, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 506, + 475 + ], + "type": "text", + "content": "Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian Goodfellow, Aleksander Madry, and Alexey Kurakin. On evaluating adversarial robustness. arXiv preprint arXiv:1902.06705, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 482, + 506, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 482, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 506, + 516 + ], + "type": "text", + "content": "Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, John C Duchi, and Percy S Liang. Unlabeled data improves adversarial robustness. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 523, + 506, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 523, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 506, + 557 + ], + "type": "text", + "content": "Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International conference on machine learning, pp. 2206-2216. PMLR, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 564, + 506, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 564, + 506, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 506, + 600 + ], + "type": "text", + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "type": "text", + "content": "Gavin Weiguang Ding, Luyu Wang, and Xiaomeng Jin. Advertorch v0.1: An adversarial robustness toolbox based on pytorch. arXiv preprint arXiv:1902.07623, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 636, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 636, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 504, + 672 + ], + "type": "text", + "content": "Yinpeng Dong, Fangzhou Liao, Tianyu Pang, Hang Su, Jun Zhu, Xiaolin Hu, and Jianguo Li. Boosting adversarial attacks with momentum. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 9185-9193, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "type": "text", + "content": "Xitong Gao, Cheng-Zhong Xu, et al. Mora: Improving ensemble robustness evaluation with model reweighing attack. Advances in Neural Information Processing Systems, 35:26955-26965, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 708, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 733 + ], + "type": "text", + "content": "Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 1050:20, 2015." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Ian J. Goodfellow, Yoshua Bengio, and Aaron C. Courville. Deep Learning. Adaptive computation and machine learning. MIT Press, 2016. ISBN 978-0-262-03561-3." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 505, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 112, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 505, + 146 + ], + "type": "text", + "content": "Sven Gowal, Sylvestre-Alvise Rebuffi, Olivia Wiles, Florian Stimberg, Dan Andrei Calian, and Timothy A Mann. Improving robustness using generated data. Advances in Neural Information Processing Systems, 34:4218-4233, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 153, + 505, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 153, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 153, + 505, + 199 + ], + "type": "text", + "content": "Chuan Guo, Mayank Rana, Moustapha Cissé, and Laurens van der Maaten. Countering adversarial images using input transformations. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 205, + 505, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 505, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 505, + 241 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 247, + 491, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 491, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 491, + 259 + ], + "type": "text", + "content": "Douglas Heaven. Why deep-learning ais are so easy to fool. Nature, 574(7777):163-166, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 266, + 505, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 505, + 300 + ], + "type": "text", + "content": "Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 307, + 505, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 505, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 505, + 331 + ], + "type": "text", + "content": "Sanjay Kariyappa and Moinuddin K Qureshi. Improving adversarial robustness of ensembles with diversity training. arXiv e-prints, pp. arXiv-1901, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 338, + 505, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 505, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 505, + 373 + ], + "type": "text", + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 378, + 505, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 378, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 505, + 392 + ], + "type": "text", + "content": "Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 398, + 505, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 398, + 505, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 398, + 505, + 443 + ], + "type": "text", + "content": "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 450, + 505, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 450, + 505, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 505, + 474 + ], + "type": "text", + "content": "Shishira R. Maiya, Max Ehrlich, Vatsal Agarwal, Ser-Nam Lim, Tom Goldstein, and Abhinav Shrivastava. A frequency perspective of adversarial robustness. CoRR, abs/2111.00861, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 480, + 505, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 480, + 505, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 505, + 515 + ], + "type": "text", + "content": "AKM Iqtidar Newaz, Nur Imtiazul Haque, Amit Kumar Sikder, Mohammad Ashiqur Rahman, and A Selcuk Uluagac. Adversarial attacks to machine learning-based smart healthcare systems. In GLOBECOM 2020-2020 IEEE Global Communications Conference, pp. 1-6. IEEE, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 522, + 505, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 505, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 505, + 556 + ], + "type": "text", + "content": "Tianyu Pang, Kun Xu, Chao Du, Ning Chen, and Jun Zhu. Improving adversarial robustness via promoting ensemble diversity. In International Conference on Machine Learning, pp. 4970-4979. PMLR, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 563, + 505, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 505, + 597 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 604, + 505, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 505, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 639 + ], + "type": "text", + "content": "Rahul Rade and Seyed-Mohsen Moosavi-Dezfooli. *Helper-based adversarial training: Reducing excessive margin to achieve a better accuracy vs. robustness trade-off*. In ICML 2021 Workshop on Adversarial Machine Learning, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 646, + 505, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 646, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 505, + 680 + ], + "type": "text", + "content": "Edward Raff, Jared Sylvester, Steven Forsyth, and Mark McLean. Barrage of random transforms for adversarially robust defense. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6528-6537, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "type": "text", + "content": "Giulio Rossolini, Federico Nesti, Gianluca D'Amico, Saasha Nair, Alessandro Biondi, and Giorgio Buttazzo. On the real-world adversarial robustness of real-time semantic segmentation models for autonomous driving. IEEE Transactions on Neural Networks and Learning Systems, pp. 1-15, 2023. doi: 10.1109/TNNLS.2023.3314512." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "text", + "content": "Andrei A. Rusu, Dan Andrei Calian, Sven Gowal, and Raia Hadsell. Hinding adversarial attacks with implicit neural representations. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvári, Gang Niu, and Sivan Sabato (eds.), International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pp. 18910-18934. PMLR, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 145, + 506, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 145, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 506, + 190 + ], + "type": "text", + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 198, + 504, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 198, + 504, + 221 + ], + "spans": [ + { + "bbox": [ + 107, + 198, + 504, + 221 + ], + "type": "text", + "content": "James C Spall. Multivariate stochastic approximation using a simultaneous perturbation gradient approximation. IEEE transactions on automatic control, 37(3):332-341, 1992." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 229, + 506, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 229, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 106, + 229, + 506, + 262 + ], + "type": "text", + "content": "Jacob Springer, Melanie Mitchell, and Garrett Kenyon. A little robustness goes a long way: Leveraging robust features for targeted transfer attacks. Advances in Neural Information Processing Systems, 34:9759-9773, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 270, + 506, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 270, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 107, + 270, + 506, + 304 + ], + "type": "text", + "content": "Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In 2nd International Conference on Learning Representations, ICLR 2014, 2014." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 312, + 506, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 312, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 107, + 312, + 506, + 346 + ], + "type": "text", + "content": "Florian Tramér, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 354, + 506, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 354, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 107, + 354, + 506, + 387 + ], + "type": "text", + "content": "Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 396, + 506, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 396, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 107, + 396, + 506, + 452 + ], + "type": "text", + "content": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 36246-36263. PMLR, 2023a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 460, + 506, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 460, + 506, + 493 + ], + "spans": [ + { + "bbox": [ + 107, + 460, + 506, + 493 + ], + "type": "text", + "content": "Zekai Wang, Tianyu Pang, Chao Du, Min Lin, Weiwei Liu, and Shuicheng Yan. Better diffusion models further improve adversarial training. In International Conference on Machine Learning, pp. 36246-36263. PMLR, 2023b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 501, + 504, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 501, + 504, + 524 + ], + "spans": [ + { + "bbox": [ + 107, + 501, + 504, + 524 + ], + "type": "text", + "content": "Zifan Wang, Yilin Yang, Ankit Shrivastava, Varun Rawal, and Zihao Ding. Towards frequency-based explanation for robust cnn. arXiv preprint arXiv:2005.03141, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 532, + 506, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 532, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 107, + 532, + 506, + 576 + ], + "type": "text", + "content": "Futa Waseda, Sosuke Nishikawa, Trung-Nghia Le, Huy H Nguyen, and Isao Echizen. Closer look at the transferability of adversarial examples: How they fool different models differently. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 1360-1368, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 584, + 506, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 584, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 107, + 584, + 506, + 596 + ], + "type": "text", + "content": "Sven-Ake Wegner. Lecture notes on high-dimensional data. arXiv preprint arXiv:2101.05841, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 604, + 504, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 604, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 107, + 604, + 504, + 639 + ], + "type": "text", + "content": "Yuancheng Xu, Yanchao Sun, Micah Goldblum, Tom Goldstein, and Furong Huang. Exploring and exploiting decision boundary dynamics for adversarial robustness. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 646, + 506, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 646, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 107, + 646, + 506, + 690 + ], + "type": "text", + "content": "Huanrui Yang, Jingyang Zhang, Hongliang Dong, Nathan Inkawich, Andrew Gardner, Andrew Touchet, Wesley Wilkes, Heath Berry, and Hai Li. Diverse: diversifying vulnerabilities for enhanced robust generation of ensembles. Advances in Neural Information Processing Systems, 33:5505-5515, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 698, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 506, + 731 + ], + "type": "text", + "content": "Ruijie Yang, Yuanfang Guo, Junfu Wang, Jiantao Zhou, and Yunhong Wang. Common knowledge learning for generating transferable adversarial examples. Frontiers Comput. Sci., 19(10):1910359, 2025. doi: 10.1007/S11704-024-40533-4." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 320 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "text", + "content": "Zhuolin Yang, Linyi Li, Xiaojun Xu, Shiliang Zuo, Qian Chen, Pan Zhou, Benjamin Rubinstein, Ce Zhang, and Bo Li. Trs: Transferability reduced ensemble via promoting gradient diversity and model smoothness. Advances in Neural Information Processing Systems, 34:17642-17655, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 507, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 507, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 507, + 158 + ], + "type": "text", + "content": "Mehmet Kerim Yucel, Ramazan Gokberk Cinbis, and Pinar Duygulu. Hybridaugment++: Unified frequency spectra perturbations for model robustness. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5718-5728, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 501, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 501, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 501, + 177 + ], + "type": "text", + "content": "Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. CoRR, abs/1605.07146, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 507, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 507, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 507, + 239 + ], + "type": "text", + "content": "Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric P. Xing, Laurent El Ghaoui, and Michael I. Jordan. Theoretically principled trade-off between robustness and accuracy. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 7472-7482. PMLR, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 245, + 507, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 507, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 507, + 280 + ], + "type": "text", + "content": "Wen Zhou, Xin Hou, Yongjun Chen, Mengyun Tang, Xiangqi Huang, Xiang Gan, and Yong Yang. Transferable adversarial perturbations. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-467, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 286, + 507, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 286, + 507, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 507, + 320 + ], + "type": "text", + "content": "Yi Zhu, Chenglin Miao, Tianhang Zheng, Foad Hajiaghajani, Lu Su, and Chunming Qiao. Can we use arbitrary objects to attack lidar perception in autonomous driving? In Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security, pp. 1945-1960, 2021." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 222, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 222, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 222, + 94 + ], + "type": "text", + "content": "A OMITTED PROOFS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 109, + 253, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 109, + 253, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 109, + 253, + 121 + ], + "type": "text", + "content": "A.1 PROOF OF INEQUALITY (6):" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 131, + 360, + 183 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 131, + 360, + 183 + ], + "spans": [ + { + "bbox": [ + 107, + 131, + 360, + 183 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sum_ {y _ {t} \\in \\mathcal {Y}} \\operatorname {V r} \\left(F _ {\\mathrm {E}}, y _ {t}\\right) \\\\ = \\sum_ {y _ {t} \\in \\mathcal {Y}} \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right] \\\\ = \\sum_ {y _ {t} \\in \\mathcal {Y}} \\sum_ {(x, y) \\in \\mathcal {D}} p _ {(x, y)} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right]. \\\\ \\end{array}", + "image_path": "7278311629a29e51488df29264c3aa4b09f706bea0488b39049c75bbf4855c25.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 187, + 443, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 187, + 443, + 199 + ], + "spans": [ + { + "bbox": [ + 104, + 187, + 443, + 199 + ], + "type": "text", + "content": "Then, we interchange the order of summation, and so the above equation is equal to" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 205, + 348, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 205, + 348, + 249 + ], + "spans": [ + { + "bbox": [ + 107, + 205, + 348, + 249 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left. \\sum_ {(x, y) \\in \\mathcal {D}} p _ {(x, y)} \\sum_ {y _ {t} \\in \\mathcal {Y}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\right\\} \\right] \\right. \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\Big [ \\sum_ {y _ {t} \\in \\mathcal {Y}} \\mathbb {I} \\big \\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y _ {t} \\big \\} \\Big ]. \\\\ \\end{array}", + "image_path": "a18af01b3eeca59c332dd9940f3eba28b4c1d25461767f9f691dda73e9601756.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "content": "For each " + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "content": ", without loss of generality, let " + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_0" + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "inline_equation", + "content": "y_t \\neq y_0" + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "inline_equation", + "content": "\\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t\\big\\} = 0" + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "inline_equation", + "content": "y_t = y_0" + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "inline_equation", + "content": "\\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y \\wedge F_{\\mathrm{E}}(\\mathcal{A}(x)) = y_t\\big\\} = \\mathbb{I}\\big\\{F_{\\mathrm{E}}(x) = y\\big\\}" + }, + { + "bbox": [ + 104, + 254, + 504, + 291 + ], + "type": "text", + "content": ". So the above equation is equal to" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 296, + 440, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 296, + 440, + 365 + ], + "spans": [ + { + "bbox": [ + 107, + 296, + 440, + 365 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\right\\} \\right] \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge \\left(F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\vee F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y\\right) \\right\\} \\right] \\\\ = \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) \\neq y \\right\\} + \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y \\right\\} \\right]. \\\\ \\end{array}", + "image_path": "def729f10ddf4d1b1213e20dfa47f33c96362946fa22ab7f764e7629cf6cfe84.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "text", + "content": "We split " + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\mathbb{I}(.)" + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "text", + "content": " because " + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}(\\mathcal{A}(x)) \\neq y" + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{E}}(\\mathcal{A}(x)) = y" + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "text", + "content": " are mutually exclusive. Then, the above equation is equal to" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 398, + 326, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 398, + 326, + 434 + ], + "spans": [ + { + "bbox": [ + 107, + 398, + 326, + 434 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) + \\mathbb {E} _ {(x, y) \\sim \\mathcal {D}} \\left[ \\mathbb {I} \\left\\{F _ {\\mathrm {E}} (x) = y \\wedge F _ {\\mathrm {E}} (\\mathcal {A} (x)) = y \\right\\} \\right] \\\\ \\geq \\operatorname {V r} \\left(F _ {\\mathrm {E}}\\right) \\\\ \\end{array}", + "image_path": "9e50582296ac5e1274ea3b0ac4ce33cb903151fd53ddc65c010eb191e36fc990.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 438, + 394, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 394, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 394, + 453 + ], + "type": "text", + "content": "Overall, we obtain the inequality (6): " + }, + { + "bbox": [ + 104, + 438, + 394, + 453 + ], + "type": "inline_equation", + "content": "\\sum_{y_t\\in \\mathcal{Y}}\\mathrm{Vr}(F_{\\mathrm{E}},y_t)\\geq \\mathrm{Vr}(F_{\\mathrm{E}})" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 472, + 253, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 253, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 253, + 484 + ], + "type": "text", + "content": "B FREQUENCY SELECTION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "type": "text", + "content": "Figure 4 illustrates an example to show that, if we keep high-amplitude frequencies and remove low-amplitude ones, the image is changed slightly even with adding certain noise (i.e., we can still recognize the ground truth from the modified image). On the other hand, if we keep the low-amplitude frequencies only, the semantic information is almost missing. This observation suggests that high-amplitude frequency features are more strongly related to the semantic information of image." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 583, + 346, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 583, + 346, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 346, + 594 + ], + "type": "text", + "content": "C RANDOM NOISE BASED TRANSFORMATION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": "Random noise based transformation: This approach substitutes the identified non-robust frequencies with Gaussian noise. For an " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "N \\times N" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " image, we take the non-robust frequencies based on the pre-specified threshold " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ", and replace them with random vector for each sub-model in our experiment. In particular, to further increase the randomness, we perform this transformation for each epoch in the training stage. If we select the top " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " non-robust frequencies, the overall dimensionality of the edited random feature should be " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "s \\times E" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " (we concatenate those " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": "-dimensional features together), where " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " is the number of epochs. For example, if " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "N = 32" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "s = N^2 / 2" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "E = 200" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ", the overall dimensionality can be as large as " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "10^5" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ". Because these " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " features are random and have high dimensions, they are very likely to be nearly orthogonal with each other (this phenomenon in high-dimensional geometry can be proved by the central limit theorem (Wegner, 2021)). As a consequence, they tend to yield diverse training results for the sub-models." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 187, + 83, + 422, + 216 + ], + "blocks": [ + { + "bbox": [ + 187, + 83, + 422, + 216 + ], + "lines": [ + { + "bbox": [ + 187, + 83, + 422, + 216 + ], + "spans": [ + { + "bbox": [ + 187, + 83, + 422, + 216 + ], + "type": "image", + "image_path": "4f4ad8a9ae15fd8ff6ef622e7b947bd3b49e9657db4fb8f2ef987138577adde8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 226, + 506, + 295 + ], + "lines": [ + { + "bbox": [ + 104, + 226, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 226, + 506, + 295 + ], + "type": "text", + "content": "Figure 4: The first and second rows are the figures by adding random noise to high-amplitude and low-amplitude frequencies, respectively. \"20% changed\" for the first row means we remove the 20% lowest-amplitudes frequencies, and add small noise to the remaining high-amplitude frequencies. \"20% changed\" for the second row means we remove the 20% highest-amplitude frequencies, and add small noise to the remaining low-amplitude frequencies. \"50% changed\" and \"80% changed\" follow the same procedure as \"20% changed\"." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": "The implementation details are as follows. Given an image " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": ", we perform Fourier Transform on " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": " and also on a generated Gaussian noise " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": ". Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": " by setting an amplitude threshold. Next, we generate two masks " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "(M_1" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "M_2" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": ") to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "M_2(n_0)" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": ") to high-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "M_1(x)" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": "), and obtain the transformation of " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": " (denoted as " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": "). Finally, we transform " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": " to time domain by inverse Fourier transform and train the model with " + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 327, + 506, + 406 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 432, + 237, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 432, + 237, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 237, + 444 + ], + "type": "text", + "content": "D ALGORITHM OF FDT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 504, + 486 + ], + "type": "text", + "content": "Algorithm 1 shows the overall framework of training an ensemble model with FDT. It illustrates that our data transformation is performed at each iteration." + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 106, + 521, + 436, + 610 + ], + "blocks": [ + { + "bbox": [ + 106, + 508, + 313, + 519 + ], + "lines": [ + { + "bbox": [ + 106, + 508, + 313, + 519 + ], + "spans": [ + { + "bbox": [ + 106, + 508, + 313, + 519 + ], + "type": "text", + "content": "Algorithm 1 Training ensemble model with FDT" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "lines": [ + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "spans": [ + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": "Input: dataset " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "\\mathcal{X}\\times \\mathcal{Y}" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " , the number of sub-models " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " , and the epoch number " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " Output: sub-model " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "\\beta_{1},\\beta_{2},\\dots ,\\beta_{M}" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " \nfor " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "i = 1" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " do Run Targeted-attack Transformation and obtain " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "P_{1},P_{2},\\dots ,P_{M}" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " . for " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "j = 1" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " do train " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "\\beta_{j}" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "inline_equation", + "content": "P_{j}" + }, + { + "bbox": [ + 106, + 521, + 436, + 610 + ], + "type": "text", + "content": " \nend for \nend for" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": "Algorithm 2 shows the details of targeted-attack transformation method on the whole dataset. For each specific image " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": ", we obtain the targeted class according to the allocation scheme mentioned in \"Stage (1)\". Then, we use targeted PGD attack to obtain the adversarial sample " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": ". After that, we perform Fourier Transform on " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": ", and we can obtain the low-amplitude frequencies and high-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": " by setting an amplitude threshold. Next, we generate two masks " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "(M_1" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "M_2" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": ") to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "M_2(x')" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": ") to high-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "M_1(x)" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": "), and obtain the transformation of " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": " (denoted as " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": "). Finally, we transform " + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": " to time domain by inverse Fourier transform." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 95, + 504, + 273 + ], + "blocks": [ + { + "bbox": [ + 106, + 82, + 299, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 299, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 299, + 94 + ], + "type": "text", + "content": "Algorithm 2 Targeted-attack Transformation" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "lines": [ + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": "Input: dataset " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "P_{ori}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " , number M, steps s, class number k \nOutput: Transformed data " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "P_{1},P_{2},\\dots ,P_{M}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " \nDivide the dataset " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "P_{ori}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " into k parts " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\{C_1,C_2,\\dots ,C_k\\}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " according to labels \nRandomly partition the dataset " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "C_j" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " equally into disjoint " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "k - 1" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " parts " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\{C_{j,1},C_{j,2},\\dots ,C_{j,k - 1}\\}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " \nInitialize " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "P_{1},P_{2},\\dots ,P_{M}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " with empty set; \n" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "m\\gets 0" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " \nfor " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "j = 1" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " to k do for " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "i = 1" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "k - 1" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "C_{j,i}^{\\prime}\\gets" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " calculate targeted attack example in " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "C_{j,i}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " with label " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "i + j" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " mod " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " and perform data transformation on each image; for " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "s = 1" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\lceil \\frac{M}{2}\\rceil +1" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "m\\gets m + 1" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " mod M; Append " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "C_{j,i}^{\\prime}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "inline_equation", + "content": "P_{m}" + }, + { + "bbox": [ + 106, + 95, + 504, + 273 + ], + "type": "text", + "content": " end for end for end for" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 297, + 193, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 193, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 193, + 309 + ], + "type": "text", + "content": "E IMPLEMENT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 325, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 325, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 506, + 425 + ], + "type": "text", + "content": "In this section, we provide more experimental details. In our work, we utilize the CIFAR-10 (Krizhevsky & Hinton, 2009), CIFAR-100 (Krizhevsky & Hinton, 2009), and Tiny-ImageNet-200 (Deng et al., 2009). In the testing process, the primary reason for selecting FGSM (Madry et al., 2018), PGD (Carlini et al., 2019), BIM (Goodfellow et al., 2015), MIM (Dong et al., 2018), CW(Carlini & Wagner, 2017) as attack methods is to keep consistent with the baseline methods from the literature. Further, we select AA (Croce & Hein, 2020) because it is also a popular attack method and more powerful than those base methods. To reduce the computational complexity of targeted attacks, we leverage the transferability of adversarial examples and utilize a pre-trained simple network (VGG11(Simonyan & Zisserman, 2015)) structure for targeted attacks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "Further, we introduce the implement of \"FDT-random\", \"FDT-target\" and \"FDT-hybrid\" here. For \"FDT-random\", we perform Fourier Transform on " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " and also on a randomly sampled standard Gaussian noise " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": ". Then, we can obtain the low-amplitude frequencies and high-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " by setting an amplitude threshold. Next, we generate two masks " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "(M_1" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_2" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": ") to select high-amplitude frequencies and low-amplitude frequencies. We add the low-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_2(n_0)" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": ") to high-amplitude frequencies of " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_1(x)" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "), and obtain the transformation of " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " (denoted as " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "). Finally, we transform " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " to time domain by inverse Fourier transform and train the model with " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": ". For \"FDT-target\", we obtain the targeted class according to the allocation scheme mentioned in \"Stage (1)\". Then, we use targeted PGD attack to obtain the adversarial sample " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": ". After that, perform the same steps as with FDT-random (we substitute " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "). For FDT-hybrid, we set two frequency selection thresholds " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\tau_1" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\tau_2" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\tau_1 < \\tau_2" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "), and generate three masks to select the frequencies: " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_1" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " for the high-amplitude frequencies (amplitude " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "> \\tau_2" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "), " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_2" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " for the middle part (" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\tau_1 <" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " amplitude " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "< \\tau_2" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "), and " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_3" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " for the small part (amplitude " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "< \\tau_1" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "). Next, we combine " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_1(x), M_2(x')" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "M_3(n_0)" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": " to obtain the transformation " + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 104, + 429, + 506, + 586 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 605, + 324, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 324, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 324, + 617 + ], + "type": "text", + "content": "F ADDITIONAL EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "In this section, we provide more experimental results. Firstly, we extend our experiments to SVHN, Tiny-ImageNet-200, and WideResNet-28-10 in Appendix F.1. We also conduct the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture in Appendix F.1. Then, we evaluate the performance of FDT under black-box attacks on the CIFAR-10 and CIFAR-100 in Appendix F.2. Then we present the trade-off between clean accuracy and robust accuracy on the CIFAR-100 using FDT method in Appendix F.3. This trade-off sheds light on the effectiveness of FDT with changing the trade-off parameter. Additionally, in Appendix F.4, we compare the transferability across various sub-models with the baseline methods. Furthermore, we compare our method with more related methods in Appendix F.5." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 216, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 216, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 216, + 94 + ], + "type": "text", + "content": "F.1 ABLATION STUDIES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 103, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 103, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 504, + 137 + ], + "type": "text", + "content": "In this section, we extend our experiments to additional datasets (SVHN, Tiny-ImageNet-200) and architecture (WideResNet-28-10). We also explore the ablation studies on weakness set allocation method, amplitude-based selection threshold and model architecture." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 142, + 506, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 142, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 104, + 142, + 506, + 198 + ], + "type": "text", + "content": "Table 3 presents the performance of ensemble methods trained with ResNet-20 on SVHN against several widely used white-box attacks. The experimental results demonstrate that all ensemble models achieve comparable levels of clean accuracy. Specifically, the FDT approach exhibits better robust accuracy than the other methods. These observations highlight the effectiveness of FDT in achieving favorable clean accuracy and robustness of ensemble models." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 156, + 261, + 455, + 414 + ], + "blocks": [ + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "lines": [ + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "type": "text", + "content": "Table 3: Robust Accuracy (%) of different ensemble methods against white-box attacks on SVHN. The " + }, + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "type": "text", + "content": " stand for the " + }, + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 217, + 506, + 252 + ], + "type": "text", + "content": " norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 156, + 261, + 455, + 414 + ], + "lines": [ + { + "bbox": [ + 156, + 261, + 455, + 414 + ], + "spans": [ + { + "bbox": [ + 156, + 261, + 455, + 414 + ], + "type": "table", + "html": "
SVHNADPGALDVERGETRSFDT-hybrid
clean accuracy96.8394.6696.2894.5296.73 ± 0.12
FGSM (ε=0.01)84.3880.285.672.8790.13 ± 0.09
FGSM (ε=0.02)78.0841.581.453.986.78± 0.07
PGD (ε= 0.01)51.0150.153.3154.4359.42 ± 0.07
PGD (ε = 0.02)17.748.2417.4218.8622.74 ± 0.04
BIM (ε= 0.01)54.3847.7352.0853.7157.91± 0.08
BIM (ε = 0.02)21.268.114.5818.0520.23 ± 0.05
MIM (ε= 0.01)61.2451.9658.5156.3262.14± 0.08
MIM (ε= 0.02)24.845.1423.2221.9525.37 ± 0.04
AA (ε= 0.01)49.9248.3952.0252.8357.54± 0.09
AA (ε= 0.02)16.136.9016.9517.4820.12 ± 0.05
C&W (λ = 0.1)55.8149.9466.8252.7472.14± 0.11
", + "image_path": "ea730086b6dc727654d80fe2f9d4344644e34be0554cfdd816145d2404b70897.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 427, + 506, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 506, + 473 + ], + "type": "text", + "content": "We also extend our experiment to the sub-models trained with WideResNet-28-10 on CIFAR-10. Table 4 shows the performance of the models facing various whitebox attacks. The results indicate that FDT maintains good performance even on more complex network structures. We also evaluated the robustness of an ensemble of eight sub-models, with the results presented in Table 5." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 170, + 536, + 440, + 689 + ], + "blocks": [ + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "lines": [ + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "text", + "content": "Table 4: Robust Accuracy " + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "text", + "content": " of different ensemble methods against white-box attacks on CIFAR-10. The " + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "text", + "content": " stand for the " + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 492, + 507, + 526 + ], + "type": "text", + "content": " norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 170, + 536, + 440, + 689 + ], + "lines": [ + { + "bbox": [ + 170, + 536, + 440, + 689 + ], + "spans": [ + { + "bbox": [ + 170, + 536, + 440, + 689 + ], + "type": "table", + "html": "
CIFAR-10ADPGALDVERGEFDT-hybrid
clean accuracy92.9982.1494.3294.18 ± 0.06
FGSM (ε=0.01)60.0444.9471.0180.64 ± 0.05
FGSM (ε=0.02)51.6936.8350.4360.09 ± 0.05
PGD (ε=0.01)11.0922.1044.2564.64 ± 0.07
PGD (ε=0.02)2.545.0613.2726.0 ± 0.03
BIM (ε=0.01)15.8122.6246.5367.36 ± 0.10
BIM (ε=0.02)4.505.4317.3832.36 ± 0.06
MIM (ε=0.01)18.1825.9744.2164.36 ± 0.08
MIM (ε=0.02)4.727.8112.8325.64 ± 0.05
AA (ε=0.01)9.3819.3443.2363.45 ± 0.08
AA (ε=0.02)1.173.9312.4925.23 ± 0.04
C&W (λ=0.1)37.8119.0546.3247.23 ± 0.10
", + "image_path": "14d0117ad636af5d0c656f9cd150fadeed1db5e0a79e3e327248db4c1840eb39.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 507, + 734 + ], + "type": "text", + "content": "Table 6 is the result of ensemble methods trained with WideResNet-28-10 on Tiny-ImageNet-200. We test the robustness of different methods under widely used white-box attacks. Due to the high" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 232, + 132, + 376, + 285 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "type": "text", + "content": "Table 5: Robust Accuracy (\\%) of an ensemble of eight sub-models against white-box attacks on CIFAR-10. The " + }, + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "type": "text", + "content": " stand for the " + }, + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 89, + 506, + 124 + ], + "type": "text", + "content": " norm of the adversarial perturbation and the coefficient of C&W attack respectively. The architecture of sub-model is WRN-28-10." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 232, + 132, + 376, + 285 + ], + "lines": [ + { + "bbox": [ + 232, + 132, + 376, + 285 + ], + "spans": [ + { + "bbox": [ + 232, + 132, + 376, + 285 + ], + "type": "table", + "html": "
CIFAR-10FDT-hybrid
clean accuracy93.72 ± 0.11
FGSM (ε=0.01)86.31± 0.07
FGSM (ε=0.02)67.29 ± 0.06
PGD (ε= 0.01)72.02 ± 0.07
PGD (ε = 0.02)45.42± 0.05
BIM (ε= 0.01)73.68 ± 0.10
BIM (ε = 0.02)44.53± 0.06
MIM (ε= 0.01)71.36 ± 0.06
MIM (ε= 0.02)45.24 ± 0.06
AA (ε= 0.01)70.45 ± 0.08
AA (ε= 0.02)44.23 ± 0.07
C&W (λ = 0.172.37 ± 0.11
", + "image_path": "229c236fc1f0db8d2b1319a5fcb120c20f1c96eba1c0f330a07fa65fe8a8d57e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": "time complexity of the TRS, we do not compare with it here. The experimental results show that all ensemble models achieve comparable levels of clean accuracy while FDT-hybrid achieves better robust accuracy than other methods." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 164, + 407, + 444, + 558 + ], + "blocks": [ + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "lines": [ + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "type": "text", + "content": "Table 6: Robust Accuracy (%) of different ensemble methods against white-box attacks on TinyImageNet-200. The " + }, + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "type": "text", + "content": " stand for the " + }, + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 372, + 506, + 407 + ], + "type": "text", + "content": " norm of the adversarial perturbation and the coefficient of C&W attack respectively. The last column is the ensemble model trained with FDT-hybrid." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 164, + 407, + 444, + 558 + ], + "lines": [ + { + "bbox": [ + 164, + 407, + 444, + 558 + ], + "spans": [ + { + "bbox": [ + 164, + 407, + 444, + 558 + ], + "type": "table", + "html": "
Tiny/ImageNet-200ADPGALDVERGEFDT-hybrid
clean accuracy49.8845.751.4664.21 ± 0.06
FGSM (ε = 0.01)10.461.2422.8221.73 ± 0.04
FGSM (ε = 0.02)4.380.5918.4219.28 ± 0.04
PGD (ε = 0.01)0.020.023.64.76 ± 0.02
PGD (ε = 0.02)0.020.010.340.45 ± 0.01
BIM (ε = 0.01)0.070.023.354.81 ± 0.03
BIM (ε = 0.02)0.030.010.280.32± 0.00
MIM (ε = 0.01)0.110.024.366.13 ± 0.03
MIN (ε = 0.02)0.030.010.410.48 ± 0.00
AA (ε = 0.01)0002.66 ± 0.02
AA (ε = 0.02)0000.02± 0.00
CW (λ = 0.01)2.360.139.5419.47± 0.06
", + "image_path": "f6d2c34f416926c50bd779fa548c7f14db9a4d7374cd62334fcf7aab8309d2c6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 577, + 506, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 577, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 506, + 624 + ], + "type": "text", + "content": "Ablation study on model architectures. Table 7 presents the results across different model architectures, including ResNet20, ResNet50, WRN28-10, and WRN34-10. While larger models generally achieve higher clean and robust accuracy, the results suggest that our method consistently enhances robustness under various attack scenarios, demonstrating its applicability across diverse architectures." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 626, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 672 + ], + "type": "text", + "content": "Ablation study on allocation methods. Table 8 compares the performance of FDT-hybrid with different weakness set allocation methods on CIFAR-10. The results indicate that our proposed allocation method achieves better clean accuracy and robustness under various attack scenarios than randomly uniform allocation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": "Ablation study on " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": ". Table 9 presents the results of FDT-hybrid with various combinations of selection thresholds " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": " on CIFAR-10. The experiments reveal the impact of different thresholds on both clean accuracy and robustness under adversarial attacks. As " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": " increases, robustness improves across all metrics, but clean accuracy decreases. For a fixed " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": ", increasing " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": " generally leads to a trade-off between clean accuracy and robustness. Setting " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{1} = 0.2" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\tau_{2} = 0.8" + }, + { + "bbox": [ + 104, + 677, + 505, + 734 + ], + "type": "text", + "content": " achieves a relatively" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "balanced performance, maintaining both competitive clean accuracy and robust accuracy under various attacks." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 152, + 160, + 459, + 312 + ], + "blocks": [ + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "lines": [ + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "text", + "content": "Table 7: Robust Accuracy " + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "text", + "content": " of different model architectures against white-box attacks on Cifar10. The " + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "text", + "content": " stand for the " + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 126, + 506, + 159 + ], + "type": "text", + "content": " norm of the adversarial perturbation and the coefficient of C&W attack respectively." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 152, + 160, + 459, + 312 + ], + "lines": [ + { + "bbox": [ + 152, + 160, + 459, + 312 + ], + "spans": [ + { + "bbox": [ + 152, + 160, + 459, + 312 + ], + "type": "table", + "html": "
CIFAR10ResNet20ResNet50WRN28-10WRN34-10
clean accuracy90.0293.2394.1894.63
FGSM (ε = 0.01)72.2476.6580.6481.04
FGSM (ε = 0.02)58.0458.5960.0960.92
PGD (ε = 0.01)48.4860.2364.6465.38
PGD (ε = 0.02)20.0124.3526.0027.42
BIM (ε = 0.01)48.5760.4367.3668.29
BIM (ε = 0.02)16.6323.5732.3633.86
MIM (ε = 0.01)51.4860.8164.3664.71
MIN (ε = 0.02)20.0924.5425.6426.42
AA (ε = 0.01)51.5660.4863.4564.01
AA (ε = 0.02)19.4224.2125.2326.39
CW (λ = 0.01)56.0856.5557.2357.52
", + "image_path": "7e18fe84f6195a634b498bafd1f5b8551dba52b6460020cc8b879d3c9ab43d44.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 113, + 371, + 495, + 411 + ], + "blocks": [ + { + "bbox": [ + 104, + 348, + 506, + 370 + ], + "lines": [ + { + "bbox": [ + 104, + 348, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 506, + 370 + ], + "type": "text", + "content": "Table 8: Performance of FDT-hybrid with different weakness set allocation method on CIFAR-10. The other settings are consistent with those in Table 1." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 113, + 371, + 495, + 411 + ], + "lines": [ + { + "bbox": [ + 113, + 371, + 495, + 411 + ], + "spans": [ + { + "bbox": [ + 113, + 371, + 495, + 411 + ], + "type": "table", + "html": "
Allocation methodClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
Uniform Random89.3256.2018.2417.89
Ours90.2058.0420.0119.42
", + "image_path": "7a88a1aa78ff51829267b334b6794a941fd03d34e267f681384df3b676da0d0b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 116, + 470, + 493, + 587 + ], + "blocks": [ + { + "bbox": [ + 104, + 447, + 504, + 470 + ], + "lines": [ + { + "bbox": [ + 104, + 447, + 504, + 470 + ], + "spans": [ + { + "bbox": [ + 104, + 447, + 504, + 470 + ], + "type": "text", + "content": "Table 9: Performance of FDT-hybrid with different selection thresholds " + }, + { + "bbox": [ + 104, + 447, + 504, + 470 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 447, + 504, + 470 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 447, + 504, + 470 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 447, + 504, + 470 + ], + "type": "text", + "content": " on CIFAR-10. The other settings are consistent with those in Table 1." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 116, + 470, + 493, + 587 + ], + "lines": [ + { + "bbox": [ + 116, + 470, + 493, + 587 + ], + "spans": [ + { + "bbox": [ + 116, + 470, + 493, + 587 + ], + "type": "table", + "html": "
ThresholdsClean accuracyFGSM (ε=0.02)PGD (ε=0.02)AutoAttack (ε=0.02)
τ1=0.2,τ2=0.791.0356.6217.7417.60
τ1=0.2,τ2=0.890.2058.0420.0119.42
τ1=0.2,τ2=0.989.4658.4820.1219.57
τ1=0.4,τ2=0.789.7556.8917.9317.82
τ1=0.4,τ2=0.889.0858.2120.0119.47
τ1=0.4,τ2=0.988.4458.5320.0919.61
τ1=0.6,τ2=0.789.6253.3515.2714.63
τ1=0.6,τ2=0.888.8455.3315.4215.24
τ1=0.6,τ2=0.988.1255.4615.8315.47
", + "image_path": "322f7bd9a01798dfc54a38e98d6770546d8cdddde92530762c95146418592f2d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 621, + 279, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 621, + 279, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 279, + 632 + ], + "type": "text", + "content": "F.2 RESULTS FOR BLACK-BOX ATTACK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 643, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 732 + ], + "type": "text", + "content": "In the black-box setting, the attacker's knowledge usually is limited to the original training dataset and has no information about the model. This setting represents a more practical attack scenario. The attacker can train a surrogate model to generate transferable adversarial examples and transfer them to the target ensemble model. We utilize a single ResNet-20 model as the surrogate model. Adversarial examples are generated on the surrogate model using the SPSA algorithm (Spall, 1992). Figure 5 shows the robust accuracy of ensemble models against black-box attacks under different degrees of perturbation. As we can see, FDT-hybrid ensemble training strategies outperform the other ensemble training strategy against black-box attacks both on CIFAR10 and CIFAR100." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 102, + 301, + 210 + ], + "blocks": [ + { + "bbox": [ + 111, + 102, + 301, + 210 + ], + "lines": [ + { + "bbox": [ + 111, + 102, + 301, + 210 + ], + "spans": [ + { + "bbox": [ + 111, + 102, + 301, + 210 + ], + "type": "image", + "image_path": "3a9bbed4c976150c6e83147e047a6e57ab5122f2f57653ad1f7a35b88ed612c5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 215, + 210, + 224 + ], + "lines": [ + { + "bbox": [ + 198, + 215, + 210, + 224 + ], + "spans": [ + { + "bbox": [ + 198, + 215, + 210, + 224 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 306, + 101, + 497, + 210 + ], + "blocks": [ + { + "bbox": [ + 306, + 101, + 497, + 210 + ], + "lines": [ + { + "bbox": [ + 306, + 101, + 497, + 210 + ], + "spans": [ + { + "bbox": [ + 306, + 101, + 497, + 210 + ], + "type": "image", + "image_path": "41ef6eb8c4ed9db8cc8dfb81e794e29dc4f266be742cbaf143ed3a489bfd37bb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 393, + 215, + 406, + 224 + ], + "lines": [ + { + "bbox": [ + 393, + 215, + 406, + 224 + ], + "spans": [ + { + "bbox": [ + 393, + 215, + 406, + 224 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 238, + 504, + 261 + ], + "lines": [ + { + "bbox": [ + 104, + 238, + 504, + 261 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 504, + 261 + ], + "type": "text", + "content": "Figure 5: Robust Accuracy for different ensemble models against black-box attack with different perturbation scale " + }, + { + "bbox": [ + 104, + 238, + 504, + 261 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 238, + 504, + 261 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 278, + 365, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 278, + 365, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 365, + 289 + ], + "type": "text", + "content": "F.3 TRADE-OFF BETWEEN CLEAN AND ROBUST ACCURACY" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "content": "In this section, we explore the trade-off between clean accuracy and robust accuracy by varying the frequency selection threshold " + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "content": " (as mentioned in Section 4.2). And we set " + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\tau_{1}" + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "content": " to be 0.1. To assess the adversarial robustness, we utilize the PGD attack under " + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "content": " perturbations of size " + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.01" + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "content": " as a benchmark. We train a set of ResNet-20 FDT-hybrid models on CIFAR-10 and CIFAR-100 with various frequency selection threshold " + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\tau_{2} \\in \\{0.4, 0.6, 0.8, 1.0, 1.2, 1.6\\}" + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "content": ". Figure 6 shows that the ensemble model has lower clean accuracy and higher robust accuracy with the increasing of " + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 301, + 504, + 369 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 110, + 407, + 299, + 529 + ], + "blocks": [ + { + "bbox": [ + 110, + 407, + 299, + 529 + ], + "lines": [ + { + "bbox": [ + 110, + 407, + 299, + 529 + ], + "spans": [ + { + "bbox": [ + 110, + 407, + 299, + 529 + ], + "type": "image", + "image_path": "96556ece08cecab0da72a8399ad072088c7c47234f3802bd3d1de1d7d1b1dad8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 534, + 210, + 544 + ], + "lines": [ + { + "bbox": [ + 198, + 534, + 210, + 544 + ], + "spans": [ + { + "bbox": [ + 198, + 534, + 210, + 544 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "lines": [ + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "text", + "content": "Figure 6: (a) shows the trade-off on CIFAR-10 while (b) on CIFAR-100. From left to right, we decrease the trade-off parameter " + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "text", + "content": " for FDT." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 306, + 406, + 496, + 529 + ], + "blocks": [ + { + "bbox": [ + 306, + 406, + 496, + 529 + ], + "lines": [ + { + "bbox": [ + 306, + 406, + 496, + 529 + ], + "spans": [ + { + "bbox": [ + 306, + 406, + 496, + 529 + ], + "type": "image", + "image_path": "1840eb354f1c0d1bf3c15cbd742aca09b7c5a17ae284709ea962b17eb2e3a2b4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 393, + 533, + 406, + 544 + ], + "lines": [ + { + "bbox": [ + 393, + 533, + 406, + 544 + ], + "spans": [ + { + "bbox": [ + 393, + 533, + 406, + 544 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 609, + 349, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 349, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 349, + 620 + ], + "type": "text", + "content": "F.4 TRANSFERABILITY ACROSS VARIOUS SUB-MODELS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 632, + 506, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 731 + ], + "type": "text", + "content": "To further investigate the diversity between sub-models, we conduct an analysis by generating adversarial examples using one sub-model and evaluating their accuracy on other target sub-models. The transferability of these adversarial examples among sub-models is visualized in Figure 7, considering different ensemble training methods on the CIFAR10 dataset. We generate adversarial examples from \"base model\" and test the accuracy of \"target model\". The experimental results indicate that FDT exhibits comparable performance to DVERGE and TRS in reducing the transferability of adversarial samples across different sub-models. This demonstrates that FDT not only enhances the diversity of weaknesses within the dataset but also weakens the transferability of adversarial examples between sub-models." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 148, + 86, + 252, + 194 + ], + "blocks": [ + { + "bbox": [ + 148, + 86, + 252, + 194 + ], + "lines": [ + { + "bbox": [ + 148, + 86, + 252, + 194 + ], + "spans": [ + { + "bbox": [ + 148, + 86, + 252, + 194 + ], + "type": "image", + "image_path": "487827d0491099c648319eac4b498797d48312a2c70fa6583e223a6fb92e0575.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 192, + 200, + 204, + 209 + ], + "lines": [ + { + "bbox": [ + 192, + 200, + 204, + 209 + ], + "spans": [ + { + "bbox": [ + 192, + 200, + 204, + 209 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 254, + 86, + 358, + 194 + ], + "blocks": [ + { + "bbox": [ + 254, + 86, + 358, + 194 + ], + "lines": [ + { + "bbox": [ + 254, + 86, + 358, + 194 + ], + "spans": [ + { + "bbox": [ + 254, + 86, + 358, + 194 + ], + "type": "image", + "image_path": "57ddf9ef19e594195508f3ae1c10e54a8dbe8b1e70cc0fa19353407b75b3f5fb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 200, + 309, + 209 + ], + "lines": [ + { + "bbox": [ + 298, + 200, + 309, + 209 + ], + "spans": [ + { + "bbox": [ + 298, + 200, + 309, + 209 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 359, + 86, + 463, + 194 + ], + "blocks": [ + { + "bbox": [ + 359, + 86, + 463, + 194 + ], + "lines": [ + { + "bbox": [ + 359, + 86, + 463, + 194 + ], + "spans": [ + { + "bbox": [ + 359, + 86, + 463, + 194 + ], + "type": "image", + "image_path": "e2d764b96904022fe7482aa65ff6f3445097fd2fb937afc6e30e43965fbcf8b6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 200, + 414, + 209 + ], + "lines": [ + { + "bbox": [ + 403, + 200, + 414, + 209 + ], + "spans": [ + { + "bbox": [ + 403, + 200, + 414, + 209 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 146, + 219, + 250, + 328 + ], + "blocks": [ + { + "bbox": [ + 146, + 219, + 250, + 328 + ], + "lines": [ + { + "bbox": [ + 146, + 219, + 250, + 328 + ], + "spans": [ + { + "bbox": [ + 146, + 219, + 250, + 328 + ], + "type": "image", + "image_path": "2ad9216a92efe39f3176dc41ec4a12ae77535776bdeea00c191a7bbbed2cc1c2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 333, + 203, + 343 + ], + "lines": [ + { + "bbox": [ + 190, + 333, + 203, + 343 + ], + "spans": [ + { + "bbox": [ + 190, + 333, + 203, + 343 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 253, + 220, + 356, + 327 + ], + "blocks": [ + { + "bbox": [ + 253, + 220, + 356, + 327 + ], + "lines": [ + { + "bbox": [ + 253, + 220, + 356, + 327 + ], + "spans": [ + { + "bbox": [ + 253, + 220, + 356, + 327 + ], + "type": "image", + "image_path": "509f5d101d695c95d266c4e7aed497ab6ccf09a4c35a5562d07b8530693c888f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 297, + 333, + 308, + 343 + ], + "lines": [ + { + "bbox": [ + 297, + 333, + 308, + 343 + ], + "spans": [ + { + "bbox": [ + 297, + 333, + 308, + 343 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 359, + 220, + 461, + 328 + ], + "blocks": [ + { + "bbox": [ + 359, + 220, + 461, + 328 + ], + "lines": [ + { + "bbox": [ + 359, + 220, + 461, + 328 + ], + "spans": [ + { + "bbox": [ + 359, + 220, + 461, + 328 + ], + "type": "image", + "image_path": "9a7aa25d9254e905c5f8bc033b282eaab1dd2a4c750012ce267b3c828efa9408.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 333, + 413, + 343 + ], + "lines": [ + { + "bbox": [ + 403, + 333, + 413, + 343 + ], + "spans": [ + { + "bbox": [ + 403, + 333, + 413, + 343 + ], + "type": "text", + "content": "(f)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "lines": [ + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "type": "text", + "content": "Figure 7: Pair-wise adversarial transferability between sub-models against PGD attack with " + }, + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.02" + }, + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "type": "text", + "content": " on CIFAR-10. The value represents the success rate of adversarial examples generated by the base model in attacking the target model." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 400, + 308, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 400, + 308, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 308, + 411 + ], + "type": "text", + "content": "F.5 COMPARE WITH ADVERSARIAL TRAINING" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 421, + 506, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 421, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 421, + 506, + 609 + ], + "type": "text", + "content": "We use target attacks in our data transformation, which differs significantly from adversarial training. First, we employ a simple pre-trained network (VGG11 in our experiment) to compute adversarial examples, thereby accelerating the training process. Second, we only utilize the low amplitude part of the adversarial examples for data transformation, which helps maintain the model's clean accuracy. We compare our method with several popular approaches (Wang et al., 2023b; Rade & Moosavi-Dezfooli, 2021; Xu et al., 2023) on CIFAR-10 using AutoAttack under " + }, + { + "bbox": [ + 104, + 421, + 506, + 609 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 421, + 506, + 609 + ], + "type": "text", + "content": " perturbations (" + }, + { + "bbox": [ + 104, + 421, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\epsilon = 8 / 255" + }, + { + "bbox": [ + 104, + 421, + 506, + 609 + ], + "type": "text", + "content": "). Wang et al. (2023b) generated training datasets using a diffusion model, followed by adversarial training on these datasets. For fairness, we compare our method with the version proposed by Wang et al. (2023b) that uses 50k generated images. Rade & Moosavi-Dezfooli (2021) used \"helper example\" to help the adversarial training. Xu et al. (2023) proposed Dynamics-Aware Robust Training, which encourages the decision boundary to adjust in a way that prioritizes increasing smaller margins. We use WideResNet-28-10 as the sub-model and ensemble eight sub-models without using generated data. The results in Table 10 indicate that, although the robustness of our method is not the highest, it maintains clean accuracy with almost no decline. Moreover, our method does not require additional generated data or adversarial training, and even with the need for ensembling, the training efficiency remains relatively high. This suggests a potential way to enhance robustness while minimizing the decrease in clean accuracy." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": "To further illustrate our method's advantage, we conduct additional experiments to compare the \"robustness-clean accuracy\" trade-off curves of our method and AT under different settings. Fig. 8 compares the trade-off curves obtained by HAT Rade & Moosavi-Dezfooli (2021) with that of FDT-hybrid. For HAT, we fix " + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\gamma = 0.25" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": " and vary " + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\beta \\in \\{0.1, 0.5, 2.5, 3.0, 4.0, 5.0\\}" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": " is the coefficient of the robustness loss, and higher " + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": " indicates higher robust accuracy); for FDT-hybrid, we fix " + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\tau_{1} = 0.2" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": " and vary " + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\tau_{2} \\in \\{0.5, 0.7, 0.9, 1.1, 1.3, 1.5\\}" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": ". We observe that HAT's robustness declines rapidly when the " + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": " parameter is small (as increasing the clean accuracy). This result shows the significant advantage of our method when a clean accuracy above " + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 613, + 506, + 702 + ], + "type": "text", + "content": " is required." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 211, + 200, + 383, + 316 + ], + "blocks": [ + { + "bbox": [ + 211, + 190, + 400, + 200 + ], + "lines": [ + { + "bbox": [ + 211, + 190, + 400, + 200 + ], + "spans": [ + { + "bbox": [ + 211, + 190, + 400, + 200 + ], + "type": "text", + "content": "Trade-off between clean accuracy and robust accuracy" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 211, + 200, + 383, + 316 + ], + "lines": [ + { + "bbox": [ + 211, + 200, + 383, + 316 + ], + "spans": [ + { + "bbox": [ + 211, + 200, + 383, + 316 + ], + "type": "image", + "image_path": "6373b177569abf83ef1d4ee46c795b8921d8725a1ce03bdf96e33c8e603fc9a8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 330, + 506, + 354 + ], + "lines": [ + { + "bbox": [ + 104, + 330, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 330, + 506, + 354 + ], + "type": "text", + "content": "Figure 8: It shows the trade-off curves on CIFAR-10. From left to right, we decrease the trade-off parameter " + }, + { + "bbox": [ + 104, + 330, + 506, + 354 + ], + "type": "inline_equation", + "content": "\\tau_{2}" + }, + { + "bbox": [ + 104, + 330, + 506, + 354 + ], + "type": "text", + "content": " for FDT, and decrease the trade-off parameter " + }, + { + "bbox": [ + 104, + 330, + 506, + 354 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 330, + 506, + 354 + ], + "type": "text", + "content": " for HAT." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 156, + 578, + 455, + 641 + ], + "blocks": [ + { + "bbox": [ + 104, + 545, + 504, + 569 + ], + "lines": [ + { + "bbox": [ + 104, + 545, + 504, + 569 + ], + "spans": [ + { + "bbox": [ + 104, + 545, + 504, + 569 + ], + "type": "text", + "content": "Table 10: Clean accuracy and robust accuracy (\\%) of different methods against AutoAttack under " + }, + { + "bbox": [ + 104, + 545, + 504, + 569 + ], + "type": "inline_equation", + "content": "l_{\\infty}" + }, + { + "bbox": [ + 104, + 545, + 504, + 569 + ], + "type": "text", + "content": " perturbations (" + }, + { + "bbox": [ + 104, + 545, + 504, + 569 + ], + "type": "inline_equation", + "content": "\\epsilon = 8/255" + }, + { + "bbox": [ + 104, + 545, + 504, + 569 + ], + "type": "text", + "content": ") on CIFAR-10." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 156, + 578, + 455, + 641 + ], + "lines": [ + { + "bbox": [ + 156, + 578, + 455, + 641 + ], + "spans": [ + { + "bbox": [ + 156, + 578, + 455, + 641 + ], + "type": "table", + "html": "
CIFAR-10clean accuracyrobust accuracy
(Wang et al., 2023b)86.1555.71
(Rade & Moosavi-Dezfooli, 2021)84.9049.08
(Xu et al., 2023)85.5554.69
OURS (FDT-hybrid)93.7234.61
", + "image_path": "2c93b3a471c98b16baae6a344fd0b7bf0b48024ec8c1e2dca6182ec63da46113.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file